2 * Copyright (C) 2014 Michael Brown <mbrown@fensystems.co.uk>.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * You can also choose to distribute this program under the terms of
20 * the Unmodified Binary Distribution Licence (as given in the file
21 * COPYING.UBDL), provided that you have satisfied its requirements.
24 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL
);
33 #include <ipxe/malloc.h>
34 #include <ipxe/umalloc.h>
37 #include <ipxe/init.h>
38 #include <ipxe/profile.h>
43 * USB eXtensible Host Controller Interface (xHCI) driver
47 /** Message transfer profiler */
48 static struct profiler xhci_message_profiler __profiler
=
49 { .name
= "xhci.message" };
51 /** Stream transfer profiler */
52 static struct profiler xhci_stream_profiler __profiler
=
53 { .name
= "xhci.stream" };
55 /** Event ring profiler */
56 static struct profiler xhci_event_profiler __profiler
=
57 { .name
= "xhci.event" };
59 /** Transfer event profiler */
60 static struct profiler xhci_transfer_profiler __profiler
=
61 { .name
= "xhci.transfer" };
63 /* Disambiguate the various error causes */
65 __einfo_error ( EINFO_EIO_DATA )
66 #define EINFO_EIO_DATA \
67 __einfo_uniqify ( EINFO_EIO, ( 2 - 0 ), \
70 __einfo_error ( EINFO_EIO_BABBLE )
71 #define EINFO_EIO_BABBLE \
72 __einfo_uniqify ( EINFO_EIO, ( 3 - 0 ), \
75 __einfo_error ( EINFO_EIO_USB )
76 #define EINFO_EIO_USB \
77 __einfo_uniqify ( EINFO_EIO, ( 4 - 0 ), \
78 "USB transaction error" )
80 __einfo_error ( EINFO_EIO_TRB )
81 #define EINFO_EIO_TRB \
82 __einfo_uniqify ( EINFO_EIO, ( 5 - 0 ), \
85 __einfo_error ( EINFO_EIO_STALL )
86 #define EINFO_EIO_STALL \
87 __einfo_uniqify ( EINFO_EIO, ( 6 - 0 ), \
89 #define EIO_RESOURCE \
90 __einfo_error ( EINFO_EIO_RESOURCE )
91 #define EINFO_EIO_RESOURCE \
92 __einfo_uniqify ( EINFO_EIO, ( 7 - 0 ), \
94 #define EIO_BANDWIDTH \
95 __einfo_error ( EINFO_EIO_BANDWIDTH )
96 #define EINFO_EIO_BANDWIDTH \
97 __einfo_uniqify ( EINFO_EIO, ( 8 - 0 ), \
99 #define EIO_NO_SLOTS \
100 __einfo_error ( EINFO_EIO_NO_SLOTS )
101 #define EINFO_EIO_NO_SLOTS \
102 __einfo_uniqify ( EINFO_EIO, ( 9 - 0 ), \
103 "No slots available" )
104 #define EIO_STREAM_TYPE \
105 __einfo_error ( EINFO_EIO_STREAM_TYPE )
106 #define EINFO_EIO_STREAM_TYPE \
107 __einfo_uniqify ( EINFO_EIO, ( 10 - 0 ), \
108 "Invalid stream type" )
110 __einfo_error ( EINFO_EIO_SLOT )
111 #define EINFO_EIO_SLOT \
112 __einfo_uniqify ( EINFO_EIO, ( 11 - 0 ), \
114 #define EIO_ENDPOINT \
115 __einfo_error ( EINFO_EIO_ENDPOINT )
116 #define EINFO_EIO_ENDPOINT \
117 __einfo_uniqify ( EINFO_EIO, ( 12 - 0 ), \
118 "Endpoint not enabled" )
120 __einfo_error ( EINFO_EIO_SHORT )
121 #define EINFO_EIO_SHORT \
122 __einfo_uniqify ( EINFO_EIO, ( 13 - 0 ), \
124 #define EIO_UNDERRUN \
125 __einfo_error ( EINFO_EIO_UNDERRUN )
126 #define EINFO_EIO_UNDERRUN \
127 __einfo_uniqify ( EINFO_EIO, ( 14 - 0 ), \
129 #define EIO_OVERRUN \
130 __einfo_error ( EINFO_EIO_OVERRUN )
131 #define EINFO_EIO_OVERRUN \
132 __einfo_uniqify ( EINFO_EIO, ( 15 - 0 ), \
134 #define EIO_VF_RING_FULL \
135 __einfo_error ( EINFO_EIO_VF_RING_FULL )
136 #define EINFO_EIO_VF_RING_FULL \
137 __einfo_uniqify ( EINFO_EIO, ( 16 - 0 ), \
138 "Virtual function event ring full" )
139 #define EIO_PARAMETER \
140 __einfo_error ( EINFO_EIO_PARAMETER )
141 #define EINFO_EIO_PARAMETER \
142 __einfo_uniqify ( EINFO_EIO, ( 17 - 0 ), \
144 #define EIO_BANDWIDTH_OVERRUN \
145 __einfo_error ( EINFO_EIO_BANDWIDTH_OVERRUN )
146 #define EINFO_EIO_BANDWIDTH_OVERRUN \
147 __einfo_uniqify ( EINFO_EIO, ( 18 - 0 ), \
148 "Bandwidth overrun" )
149 #define EIO_CONTEXT \
150 __einfo_error ( EINFO_EIO_CONTEXT )
151 #define EINFO_EIO_CONTEXT \
152 __einfo_uniqify ( EINFO_EIO, ( 19 - 0 ), \
153 "Context state error" )
154 #define EIO_NO_PING \
155 __einfo_error ( EINFO_EIO_NO_PING )
156 #define EINFO_EIO_NO_PING \
157 __einfo_uniqify ( EINFO_EIO, ( 20 - 0 ), \
159 #define EIO_RING_FULL \
160 __einfo_error ( EINFO_EIO_RING_FULL )
161 #define EINFO_EIO_RING_FULL \
162 __einfo_uniqify ( EINFO_EIO, ( 21 - 0 ), \
164 #define EIO_INCOMPATIBLE \
165 __einfo_error ( EINFO_EIO_INCOMPATIBLE )
166 #define EINFO_EIO_INCOMPATIBLE \
167 __einfo_uniqify ( EINFO_EIO, ( 22 - 0 ), \
168 "Incompatible device" )
170 __einfo_error ( EINFO_EIO_MISSED )
171 #define EINFO_EIO_MISSED \
172 __einfo_uniqify ( EINFO_EIO, ( 23 - 0 ), \
173 "Missed service error" )
174 #define EIO_CMD_STOPPED \
175 __einfo_error ( EINFO_EIO_CMD_STOPPED )
176 #define EINFO_EIO_CMD_STOPPED \
177 __einfo_uniqify ( EINFO_EIO, ( 24 - 0 ), \
178 "Command ring stopped" )
179 #define EIO_CMD_ABORTED \
180 __einfo_error ( EINFO_EIO_CMD_ABORTED )
181 #define EINFO_EIO_CMD_ABORTED \
182 __einfo_uniqify ( EINFO_EIO, ( 25 - 0 ), \
185 __einfo_error ( EINFO_EIO_STOP )
186 #define EINFO_EIO_STOP \
187 __einfo_uniqify ( EINFO_EIO, ( 26 - 0 ), \
189 #define EIO_STOP_LEN \
190 __einfo_error ( EINFO_EIO_STOP_LEN )
191 #define EINFO_EIO_STOP_LEN \
192 __einfo_uniqify ( EINFO_EIO, ( 27 - 0 ), \
193 "Stopped - length invalid" )
194 #define EIO_STOP_SHORT \
195 __einfo_error ( EINFO_EIO_STOP_SHORT )
196 #define EINFO_EIO_STOP_SHORT \
197 __einfo_uniqify ( EINFO_EIO, ( 28 - 0 ), \
198 "Stopped - short packet" )
199 #define EIO_LATENCY \
200 __einfo_error ( EINFO_EIO_LATENCY )
201 #define EINFO_EIO_LATENCY \
202 __einfo_uniqify ( EINFO_EIO, ( 29 - 0 ), \
203 "Maximum exit latency too large" )
205 __einfo_error ( EINFO_EIO_ISOCH )
206 #define EINFO_EIO_ISOCH \
207 __einfo_uniqify ( EINFO_EIO, ( 31 - 0 ), \
208 "Isochronous buffer overrun" )
209 #define EPROTO_LOST \
210 __einfo_error ( EINFO_EPROTO_LOST )
211 #define EINFO_EPROTO_LOST \
212 __einfo_uniqify ( EINFO_EPROTO, ( 32 - 32 ), \
214 #define EPROTO_UNDEFINED \
215 __einfo_error ( EINFO_EPROTO_UNDEFINED )
216 #define EINFO_EPROTO_UNDEFINED \
217 __einfo_uniqify ( EINFO_EPROTO, ( 33 - 32 ), \
219 #define EPROTO_STREAM_ID \
220 __einfo_error ( EINFO_EPROTO_STREAM_ID )
221 #define EINFO_EPROTO_STREAM_ID \
222 __einfo_uniqify ( EINFO_EPROTO, ( 34 - 32 ), \
223 "Invalid stream ID" )
224 #define EPROTO_SECONDARY \
225 __einfo_error ( EINFO_EPROTO_SECONDARY )
226 #define EINFO_EPROTO_SECONDARY \
227 __einfo_uniqify ( EINFO_EPROTO, ( 35 - 32 ), \
228 "Secondary bandwidth error" )
229 #define EPROTO_SPLIT \
230 __einfo_error ( EINFO_EPROTO_SPLIT )
231 #define EINFO_EPROTO_SPLIT \
232 __einfo_uniqify ( EINFO_EPROTO, ( 36 - 32 ), \
233 "Split transaction error" )
234 #define ECODE(code) \
235 ( ( (code) < 32 ) ? \
236 EUNIQ ( EINFO_EIO, ( (code) & 31 ), EIO_DATA, EIO_BABBLE, \
237 EIO_USB, EIO_TRB, EIO_STALL, EIO_RESOURCE, \
238 EIO_BANDWIDTH, EIO_NO_SLOTS, EIO_STREAM_TYPE, \
239 EIO_SLOT, EIO_ENDPOINT, EIO_SHORT, EIO_UNDERRUN, \
240 EIO_OVERRUN, EIO_VF_RING_FULL, EIO_PARAMETER, \
241 EIO_BANDWIDTH_OVERRUN, EIO_CONTEXT, EIO_NO_PING, \
242 EIO_RING_FULL, EIO_INCOMPATIBLE, EIO_MISSED, \
243 EIO_CMD_STOPPED, EIO_CMD_ABORTED, EIO_STOP, \
244 EIO_STOP_LEN, EIO_STOP_SHORT, EIO_LATENCY, \
247 EUNIQ ( EINFO_EPROTO, ( (code) & 31 ), EPROTO_LOST, \
248 EPROTO_UNDEFINED, EPROTO_STREAM_ID, \
249 EPROTO_SECONDARY, EPROTO_SPLIT ) : \
252 /******************************************************************************
256 ******************************************************************************
262 * @v xhci xHCI device
263 * @v regs MMIO registers
265 static void xhci_init ( struct xhci_device
*xhci
, void *regs
) {
274 /* Locate capability, operational, runtime, and doorbell registers */
276 caplength
= readb ( xhci
->cap
+ XHCI_CAP_CAPLENGTH
);
277 rtsoff
= readl ( xhci
->cap
+ XHCI_CAP_RTSOFF
);
278 dboff
= readl ( xhci
->cap
+ XHCI_CAP_DBOFF
);
279 xhci
->op
= ( xhci
->cap
+ caplength
);
280 xhci
->run
= ( xhci
->cap
+ rtsoff
);
281 xhci
->db
= ( xhci
->cap
+ dboff
);
282 DBGC2 ( xhci
, "XHCI %p cap %08lx op %08lx run %08lx db %08lx\n",
283 xhci
, virt_to_phys ( xhci
->cap
), virt_to_phys ( xhci
->op
),
284 virt_to_phys ( xhci
->run
), virt_to_phys ( xhci
->db
) );
286 /* Read structural parameters 1 */
287 hcsparams1
= readl ( xhci
->cap
+ XHCI_CAP_HCSPARAMS1
);
288 xhci
->slots
= XHCI_HCSPARAMS1_SLOTS ( hcsparams1
);
289 xhci
->intrs
= XHCI_HCSPARAMS1_INTRS ( hcsparams1
);
290 xhci
->ports
= XHCI_HCSPARAMS1_PORTS ( hcsparams1
);
291 DBGC ( xhci
, "XHCI %p has %d slots %d intrs %d ports\n",
292 xhci
, xhci
->slots
, xhci
->intrs
, xhci
->ports
);
294 /* Read structural parameters 2 */
295 hcsparams2
= readl ( xhci
->cap
+ XHCI_CAP_HCSPARAMS2
);
296 xhci
->scratchpads
= XHCI_HCSPARAMS2_SCRATCHPADS ( hcsparams2
);
297 DBGC2 ( xhci
, "XHCI %p needs %d scratchpads\n",
298 xhci
, xhci
->scratchpads
);
300 /* Read capability parameters 1 */
301 hccparams1
= readl ( xhci
->cap
+ XHCI_CAP_HCCPARAMS1
);
302 xhci
->addr64
= XHCI_HCCPARAMS1_ADDR64 ( hccparams1
);
303 xhci
->csz_shift
= XHCI_HCCPARAMS1_CSZ_SHIFT ( hccparams1
);
304 xhci
->xecp
= XHCI_HCCPARAMS1_XECP ( hccparams1
);
307 pagesize
= readl ( xhci
->op
+ XHCI_OP_PAGESIZE
);
308 xhci
->pagesize
= XHCI_PAGESIZE ( pagesize
);
309 assert ( xhci
->pagesize
!= 0 );
310 assert ( ( ( xhci
->pagesize
) & ( xhci
->pagesize
- 1 ) ) == 0 );
311 DBGC2 ( xhci
, "XHCI %p page size %zd bytes\n",
312 xhci
, xhci
->pagesize
);
316 * Find extended capability
318 * @v xhci xHCI device
319 * @v id Capability ID
320 * @v offset Offset to previous extended capability instance, or zero
321 * @ret offset Offset to extended capability, or zero if not found
323 static unsigned int xhci_extended_capability ( struct xhci_device
*xhci
,
325 unsigned int offset
) {
329 /* Locate the extended capability */
332 /* Locate first or next capability as applicable */
334 xecp
= readl ( xhci
->cap
+ offset
);
335 next
= XHCI_XECP_NEXT ( xecp
);
343 /* Check if this is the requested capability */
344 xecp
= readl ( xhci
->cap
+ offset
);
345 if ( XHCI_XECP_ID ( xecp
) == id
)
351 * Write potentially 64-bit register
353 * @v xhci xHCI device
355 * @v reg Register address
356 * @ret rc Return status code
358 static inline __attribute__ (( always_inline
)) int
359 xhci_writeq ( struct xhci_device
*xhci
, physaddr_t value
, void *reg
) {
361 /* If this is a 32-bit build, then this can never fail
362 * (allowing the compiler to optimise out the error path).
364 if ( sizeof ( value
) <= sizeof ( uint32_t ) ) {
365 writel ( value
, reg
);
366 writel ( 0, ( reg
+ sizeof ( uint32_t ) ) );
370 /* If the device does not support 64-bit addresses and this
371 * address is outside the 32-bit address space, then fail.
373 if ( ( value
& ~0xffffffffULL
) && ! xhci
->addr64
) {
374 DBGC ( xhci
, "XHCI %p cannot access address %lx\n",
379 /* If this is a 64-bit build, then writeq() is available */
380 writeq ( value
, reg
);
385 * Calculate buffer alignment
388 * @ret align Buffer alignment
390 * Determine alignment required for a buffer which must be aligned to
391 * at least XHCI_MIN_ALIGN and which must not cross a page boundary.
393 static inline size_t xhci_align ( size_t len
) {
396 /* Align to own length (rounded up to a power of two) */
397 align
= ( 1 << fls ( len
- 1 ) );
399 /* Round up to XHCI_MIN_ALIGN if needed */
400 if ( align
< XHCI_MIN_ALIGN
)
401 align
= XHCI_MIN_ALIGN
;
407 * Calculate device context offset
409 * @v xhci xHCI device
410 * @v ctx Context index
412 static inline size_t xhci_device_context_offset ( struct xhci_device
*xhci
,
415 return ( XHCI_DCI ( ctx
) << xhci
->csz_shift
);
419 * Calculate input context offset
421 * @v xhci xHCI device
422 * @v ctx Context index
424 static inline size_t xhci_input_context_offset ( struct xhci_device
*xhci
,
427 return ( XHCI_ICI ( ctx
) << xhci
->csz_shift
);
430 /******************************************************************************
434 ******************************************************************************
438 * Dump host controller registers
440 * @v xhci xHCI device
442 static inline void xhci_dump ( struct xhci_device
*xhci
) {
449 /* Do nothing unless debugging is enabled */
454 usbcmd
= readl ( xhci
->op
+ XHCI_OP_USBCMD
);
455 DBGC ( xhci
, "XHCI %p USBCMD %08x%s%s\n", xhci
, usbcmd
,
456 ( ( usbcmd
& XHCI_USBCMD_RUN
) ?
" run" : "" ),
457 ( ( usbcmd
& XHCI_USBCMD_HCRST
) ?
" hcrst" : "" ) );
460 usbsts
= readl ( xhci
->op
+ XHCI_OP_USBSTS
);
461 DBGC ( xhci
, "XHCI %p USBSTS %08x%s\n", xhci
, usbsts
,
462 ( ( usbsts
& XHCI_USBSTS_HCH
) ?
" hch" : "" ) );
465 pagesize
= readl ( xhci
->op
+ XHCI_OP_PAGESIZE
);
466 DBGC ( xhci
, "XHCI %p PAGESIZE %08x\n", xhci
, pagesize
);
469 dnctrl
= readl ( xhci
->op
+ XHCI_OP_DNCTRL
);
470 DBGC ( xhci
, "XHCI %p DNCTRL %08x\n", xhci
, dnctrl
);
473 config
= readl ( xhci
->op
+ XHCI_OP_CONFIG
);
474 DBGC ( xhci
, "XHCI %p CONFIG %08x\n", xhci
, config
);
478 * Dump port registers
480 * @v xhci xHCI device
481 * @v port Port number
483 static inline void xhci_dump_port ( struct xhci_device
*xhci
,
484 unsigned int port
) {
490 /* Do nothing unless debugging is enabled */
495 portsc
= readl ( xhci
->op
+ XHCI_OP_PORTSC ( port
) );
496 DBGC ( xhci
, "XHCI %p port %d PORTSC %08x%s%s%s%s psiv=%d\n",
498 ( ( portsc
& XHCI_PORTSC_CCS
) ?
" ccs" : "" ),
499 ( ( portsc
& XHCI_PORTSC_PED
) ?
" ped" : "" ),
500 ( ( portsc
& XHCI_PORTSC_PR
) ?
" pr" : "" ),
501 ( ( portsc
& XHCI_PORTSC_PP
) ?
" pp" : "" ),
502 XHCI_PORTSC_PSIV ( portsc
) );
505 portpmsc
= readl ( xhci
->op
+ XHCI_OP_PORTPMSC ( port
) );
506 DBGC ( xhci
, "XHCI %p port %d PORTPMSC %08x\n", xhci
, port
, portpmsc
);
509 portli
= readl ( xhci
->op
+ XHCI_OP_PORTLI ( port
) );
510 DBGC ( xhci
, "XHCI %p port %d PORTLI %08x\n", xhci
, port
, portli
);
513 porthlpmc
= readl ( xhci
->op
+ XHCI_OP_PORTHLPMC ( port
) );
514 DBGC ( xhci
, "XHCI %p port %d PORTHLPMC %08x\n",
515 xhci
, port
, porthlpmc
);
518 /******************************************************************************
522 ******************************************************************************
525 /** Prevent the release of ownership back to BIOS */
526 static int xhci_legacy_prevent_release
;
529 * Initialise USB legacy support
531 * @v xhci xHCI device
533 static void xhci_legacy_init ( struct xhci_device
*xhci
) {
537 /* Locate USB legacy support capability (if present) */
538 legacy
= xhci_extended_capability ( xhci
, XHCI_XECP_ID_LEGACY
, 0 );
540 /* Not an error; capability may not be present */
541 DBGC ( xhci
, "XHCI %p has no USB legacy support capability\n",
546 /* Check if legacy USB support is enabled */
547 bios
= readb ( xhci
->cap
+ legacy
+ XHCI_USBLEGSUP_BIOS
);
548 if ( ! ( bios
& XHCI_USBLEGSUP_BIOS_OWNED
) ) {
549 /* Not an error; already owned by OS */
550 DBGC ( xhci
, "XHCI %p USB legacy support already disabled\n",
555 /* Record presence of USB legacy support capability */
556 xhci
->legacy
= legacy
;
560 * Claim ownership from BIOS
562 * @v xhci xHCI device
564 static void xhci_legacy_claim ( struct xhci_device
*xhci
) {
569 /* Do nothing unless legacy support capability is present */
570 if ( ! xhci
->legacy
)
573 /* Claim ownership */
574 writeb ( XHCI_USBLEGSUP_OS_OWNED
,
575 xhci
->cap
+ xhci
->legacy
+ XHCI_USBLEGSUP_OS
);
577 /* Wait for BIOS to release ownership */
578 for ( i
= 0 ; i
< XHCI_USBLEGSUP_MAX_WAIT_MS
; i
++ ) {
580 /* Check if BIOS has released ownership */
581 bios
= readb ( xhci
->cap
+ xhci
->legacy
+ XHCI_USBLEGSUP_BIOS
);
582 if ( ! ( bios
& XHCI_USBLEGSUP_BIOS_OWNED
) ) {
583 DBGC ( xhci
, "XHCI %p claimed ownership from BIOS\n",
585 ctlsts
= readl ( xhci
->cap
+ xhci
->legacy
+
586 XHCI_USBLEGSUP_CTLSTS
);
588 DBGC ( xhci
, "XHCI %p warning: BIOS retained "
589 "SMIs: %08x\n", xhci
, ctlsts
);
598 /* BIOS did not release ownership. Claim it forcibly by
599 * disabling all SMIs.
601 DBGC ( xhci
, "XHCI %p could not claim ownership from BIOS: forcibly "
602 "disabling SMIs\n", xhci
);
603 writel ( 0, xhci
->cap
+ xhci
->legacy
+ XHCI_USBLEGSUP_CTLSTS
);
607 * Release ownership back to BIOS
609 * @v xhci xHCI device
611 static void xhci_legacy_release ( struct xhci_device
*xhci
) {
613 /* Do nothing unless legacy support capability is present */
614 if ( ! xhci
->legacy
)
617 /* Do nothing if releasing ownership is prevented */
618 if ( xhci_legacy_prevent_release
) {
619 DBGC ( xhci
, "XHCI %p not releasing ownership to BIOS\n", xhci
);
623 /* Release ownership */
624 writeb ( 0, xhci
->cap
+ xhci
->legacy
+ XHCI_USBLEGSUP_OS
);
625 DBGC ( xhci
, "XHCI %p released ownership to BIOS\n", xhci
);
628 /******************************************************************************
630 * Supported protocols
632 ******************************************************************************
636 * Transcribe port speed (for debugging)
638 * @v psi Protocol speed ID
639 * @ret speed Transcribed speed
641 static inline const char * xhci_speed_name ( uint32_t psi
) {
642 static const char *exponents
[4] = { "", "k", "M", "G" };
643 static char buf
[ 10 /* "xxxxxXbps" + NUL */ ];
644 unsigned int mantissa
;
645 unsigned int exponent
;
647 /* Extract mantissa and exponent */
648 mantissa
= XHCI_SUPPORTED_PSI_MANTISSA ( psi
);
649 exponent
= XHCI_SUPPORTED_PSI_EXPONENT ( psi
);
651 /* Transcribe speed */
652 snprintf ( buf
, sizeof ( buf
), "%d%sbps",
653 mantissa
, exponents
[exponent
] );
658 * Find supported protocol extended capability for a port
660 * @v xhci xHCI device
661 * @v port Port number
662 * @ret supported Offset to extended capability, or zero if not found
664 static unsigned int xhci_supported_protocol ( struct xhci_device
*xhci
,
665 unsigned int port
) {
666 unsigned int supported
= 0;
671 /* Iterate over all supported protocol structures */
672 while ( ( supported
= xhci_extended_capability ( xhci
,
673 XHCI_XECP_ID_SUPPORTED
,
676 /* Determine port range */
677 ports
= readl ( xhci
->cap
+ supported
+ XHCI_SUPPORTED_PORTS
);
678 offset
= XHCI_SUPPORTED_PORTS_OFFSET ( ports
);
679 count
= XHCI_SUPPORTED_PORTS_COUNT ( ports
);
681 /* Check if port lies within this range */
682 if ( ( port
- offset
) < count
)
686 DBGC ( xhci
, "XHCI %p port %d has no supported protocol\n",
694 * @v xhci xHCI device
695 * @v port Port number
696 * @ret protocol USB protocol, or zero if not found
698 static unsigned int xhci_port_protocol ( struct xhci_device
*xhci
,
699 unsigned int port
) {
700 unsigned int supported
= xhci_supported_protocol ( xhci
, port
);
705 unsigned int protocol
;
715 /* Fail if there is no supported protocol */
719 /* Determine protocol version */
720 revision
= readl ( xhci
->cap
+ supported
+ XHCI_SUPPORTED_REVISION
);
721 protocol
= XHCI_SUPPORTED_REVISION_VER ( revision
);
723 /* Describe port protocol */
725 name
.raw
= cpu_to_le32 ( readl ( xhci
->cap
+ supported
+
726 XHCI_SUPPORTED_NAME
) );
728 slot
= readl ( xhci
->cap
+ supported
+ XHCI_SUPPORTED_SLOT
);
729 type
= XHCI_SUPPORTED_SLOT_TYPE ( slot
);
730 DBGC2 ( xhci
, "XHCI %p port %d %sv%04x type %d",
731 xhci
, port
, name
.text
, protocol
, type
);
732 ports
= readl ( xhci
->cap
+ supported
+ XHCI_SUPPORTED_PORTS
);
733 psic
= XHCI_SUPPORTED_PORTS_PSIC ( ports
);
735 DBGC2 ( xhci
, " speeds" );
736 for ( i
= 0 ; i
< psic
; i
++ ) {
737 psi
= readl ( xhci
->cap
+ supported
+
738 XHCI_SUPPORTED_PSI ( i
) );
739 psiv
= XHCI_SUPPORTED_PSI_VALUE ( psi
);
740 DBGC2 ( xhci
, " %d:%s", psiv
,
741 xhci_speed_name ( psi
) );
744 DBGC2 ( xhci
, "\n" );
751 * Find port slot type
753 * @v xhci xHCI device
754 * @v port Port number
755 * @ret type Slot type, or negative error
757 static int xhci_port_slot_type ( struct xhci_device
*xhci
, unsigned int port
) {
758 unsigned int supported
= xhci_supported_protocol ( xhci
, port
);
762 /* Fail if there is no supported protocol */
767 slot
= readl ( xhci
->cap
+ supported
+ XHCI_SUPPORTED_SLOT
);
768 type
= XHCI_SUPPORTED_SLOT_TYPE ( slot
);
776 * @v xhci xHCI device
777 * @v port Port number
778 * @v psiv Protocol speed ID value
779 * @ret speed Port speed, or negative error
781 static int xhci_port_speed ( struct xhci_device
*xhci
, unsigned int port
,
782 unsigned int psiv
) {
783 unsigned int supported
= xhci_supported_protocol ( xhci
, port
);
785 unsigned int mantissa
;
786 unsigned int exponent
;
792 /* Fail if there is no supported protocol */
796 /* Get protocol speed ID count */
797 ports
= readl ( xhci
->cap
+ supported
+ XHCI_SUPPORTED_PORTS
);
798 psic
= XHCI_SUPPORTED_PORTS_PSIC ( ports
);
800 /* Use the default mappings if applicable */
803 case XHCI_SPEED_LOW
: return USB_SPEED_LOW
;
804 case XHCI_SPEED_FULL
: return USB_SPEED_FULL
;
805 case XHCI_SPEED_HIGH
: return USB_SPEED_HIGH
;
806 case XHCI_SPEED_SUPER
: return USB_SPEED_SUPER
;
808 DBGC ( xhci
, "XHCI %p port %d non-standard PSI value "
809 "%d\n", xhci
, port
, psiv
);
814 /* Iterate over PSI dwords looking for a match */
815 for ( i
= 0 ; i
< psic
; i
++ ) {
816 psi
= readl ( xhci
->cap
+ supported
+ XHCI_SUPPORTED_PSI ( i
));
817 if ( psiv
== XHCI_SUPPORTED_PSI_VALUE ( psi
) ) {
818 mantissa
= XHCI_SUPPORTED_PSI_MANTISSA ( psi
);
819 exponent
= XHCI_SUPPORTED_PSI_EXPONENT ( psi
);
820 speed
= USB_SPEED ( mantissa
, exponent
);
825 DBGC ( xhci
, "XHCI %p port %d spurious PSI value %d\n",
831 * Find protocol speed ID value
833 * @v xhci xHCI device
834 * @v port Port number
836 * @ret psiv Protocol speed ID value, or negative error
838 static int xhci_port_psiv ( struct xhci_device
*xhci
, unsigned int port
,
839 unsigned int speed
) {
840 unsigned int supported
= xhci_supported_protocol ( xhci
, port
);
842 unsigned int mantissa
;
843 unsigned int exponent
;
849 /* Fail if there is no supported protocol */
853 /* Get protocol speed ID count */
854 ports
= readl ( xhci
->cap
+ supported
+ XHCI_SUPPORTED_PORTS
);
855 psic
= XHCI_SUPPORTED_PORTS_PSIC ( ports
);
857 /* Use the default mappings if applicable */
860 case USB_SPEED_LOW
: return XHCI_SPEED_LOW
;
861 case USB_SPEED_FULL
: return XHCI_SPEED_FULL
;
862 case USB_SPEED_HIGH
: return XHCI_SPEED_HIGH
;
863 case USB_SPEED_SUPER
: return XHCI_SPEED_SUPER
;
865 DBGC ( xhci
, "XHCI %p port %d non-standad speed %d\n",
871 /* Iterate over PSI dwords looking for a match */
872 for ( i
= 0 ; i
< psic
; i
++ ) {
873 psi
= readl ( xhci
->cap
+ supported
+ XHCI_SUPPORTED_PSI ( i
));
874 mantissa
= XHCI_SUPPORTED_PSI_MANTISSA ( psi
);
875 exponent
= XHCI_SUPPORTED_PSI_EXPONENT ( psi
);
876 if ( speed
== USB_SPEED ( mantissa
, exponent
) ) {
877 psiv
= XHCI_SUPPORTED_PSI_VALUE ( psi
);
882 DBGC ( xhci
, "XHCI %p port %d unrepresentable speed %#x\n",
887 /******************************************************************************
889 * Device context base address array
891 ******************************************************************************
895 * Allocate device context base address array
897 * @v xhci xHCI device
898 * @ret rc Return status code
900 static int xhci_dcbaa_alloc ( struct xhci_device
*xhci
) {
905 /* Allocate and initialise structure. Must be at least
906 * 64-byte aligned and must not cross a page boundary, so
907 * align on its own size (rounded up to a power of two and
908 * with a minimum of 64 bytes).
910 len
= ( ( xhci
->slots
+ 1 ) * sizeof ( xhci
->dcbaa
[0] ) );
911 xhci
->dcbaa
= malloc_dma ( len
, xhci_align ( len
) );
912 if ( ! xhci
->dcbaa
) {
913 DBGC ( xhci
, "XHCI %p could not allocate DCBAA\n", xhci
);
917 memset ( xhci
->dcbaa
, 0, len
);
919 /* Program DCBAA pointer */
920 dcbaap
= virt_to_phys ( xhci
->dcbaa
);
921 if ( ( rc
= xhci_writeq ( xhci
, dcbaap
,
922 xhci
->op
+ XHCI_OP_DCBAAP
) ) != 0 )
925 DBGC2 ( xhci
, "XHCI %p DCBAA at [%08lx,%08lx)\n",
926 xhci
, dcbaap
, ( dcbaap
+ len
) );
930 free_dma ( xhci
->dcbaa
, len
);
936 * Free device context base address array
938 * @v xhci xHCI device
940 static void xhci_dcbaa_free ( struct xhci_device
*xhci
) {
945 for ( i
= 0 ; i
<= xhci
->slots
; i
++ )
946 assert ( xhci
->dcbaa
[i
] == 0 );
948 /* Clear DCBAA pointer */
949 xhci_writeq ( xhci
, 0, xhci
->op
+ XHCI_OP_DCBAAP
);
952 len
= ( ( xhci
->slots
+ 1 ) * sizeof ( xhci
->dcbaa
[0] ) );
953 free_dma ( xhci
->dcbaa
, len
);
956 /******************************************************************************
960 ******************************************************************************
964 * Allocate scratchpad buffers
966 * @v xhci xHCI device
967 * @ret rc Return status code
969 static int xhci_scratchpad_alloc ( struct xhci_device
*xhci
) {
976 /* Do nothing if no scratchpad buffers are used */
977 if ( ! xhci
->scratchpads
)
980 /* Allocate scratchpads */
981 len
= ( xhci
->scratchpads
* xhci
->pagesize
);
982 xhci
->scratchpad
= umalloc ( len
);
983 if ( ! xhci
->scratchpad
) {
984 DBGC ( xhci
, "XHCI %p could not allocate scratchpad buffers\n",
989 memset_user ( xhci
->scratchpad
, 0, 0, len
);
991 /* Allocate scratchpad array */
992 array_len
= ( xhci
->scratchpads
* sizeof ( xhci
->scratchpad_array
[0] ));
993 xhci
->scratchpad_array
=
994 malloc_dma ( array_len
, xhci_align ( array_len
) );
995 if ( ! xhci
->scratchpad_array
) {
996 DBGC ( xhci
, "XHCI %p could not allocate scratchpad buffer "
999 goto err_alloc_array
;
1002 /* Populate scratchpad array */
1003 for ( i
= 0 ; i
< xhci
->scratchpads
; i
++ ) {
1004 phys
= user_to_phys ( xhci
->scratchpad
, ( i
* xhci
->pagesize
));
1005 xhci
->scratchpad_array
[i
] = phys
;
1008 /* Set scratchpad array pointer */
1009 assert ( xhci
->dcbaa
!= NULL
);
1010 xhci
->dcbaa
[0] = cpu_to_le64 ( virt_to_phys ( xhci
->scratchpad_array
));
1012 DBGC2 ( xhci
, "XHCI %p scratchpad [%08lx,%08lx) array [%08lx,%08lx)\n",
1013 xhci
, user_to_phys ( xhci
->scratchpad
, 0 ),
1014 user_to_phys ( xhci
->scratchpad
, len
),
1015 virt_to_phys ( xhci
->scratchpad_array
),
1016 ( virt_to_phys ( xhci
->scratchpad_array
) + array_len
) );
1019 free_dma ( xhci
->scratchpad_array
, array_len
);
1021 ufree ( xhci
->scratchpad
);
1027 * Free scratchpad buffers
1029 * @v xhci xHCI device
1031 static void xhci_scratchpad_free ( struct xhci_device
*xhci
) {
1034 /* Do nothing if no scratchpad buffers are used */
1035 if ( ! xhci
->scratchpads
)
1038 /* Clear scratchpad array pointer */
1039 assert ( xhci
->dcbaa
!= NULL
);
1042 /* Free scratchpad array */
1043 array_len
= ( xhci
->scratchpads
* sizeof ( xhci
->scratchpad_array
[0] ));
1044 free_dma ( xhci
->scratchpad_array
, array_len
);
1046 /* Free scratchpads */
1047 ufree ( xhci
->scratchpad
);
1050 /******************************************************************************
1052 * Run / stop / reset
1054 ******************************************************************************
1060 * @v xhci xHCI device
1062 static void xhci_run ( struct xhci_device
*xhci
) {
1066 /* Configure number of device slots */
1067 config
= readl ( xhci
->op
+ XHCI_OP_CONFIG
);
1068 config
&= ~XHCI_CONFIG_MAX_SLOTS_EN_MASK
;
1069 config
|= XHCI_CONFIG_MAX_SLOTS_EN ( xhci
->slots
);
1070 writel ( config
, xhci
->op
+ XHCI_OP_CONFIG
);
1072 /* Set run/stop bit */
1073 usbcmd
= readl ( xhci
->op
+ XHCI_OP_USBCMD
);
1074 usbcmd
|= XHCI_USBCMD_RUN
;
1075 writel ( usbcmd
, xhci
->op
+ XHCI_OP_USBCMD
);
1081 * @v xhci xHCI device
1082 * @ret rc Return status code
1084 static int xhci_stop ( struct xhci_device
*xhci
) {
1089 /* Clear run/stop bit */
1090 usbcmd
= readl ( xhci
->op
+ XHCI_OP_USBCMD
);
1091 usbcmd
&= ~XHCI_USBCMD_RUN
;
1092 writel ( usbcmd
, xhci
->op
+ XHCI_OP_USBCMD
);
1094 /* Wait for device to stop */
1095 for ( i
= 0 ; i
< XHCI_STOP_MAX_WAIT_MS
; i
++ ) {
1097 /* Check if device is stopped */
1098 usbsts
= readl ( xhci
->op
+ XHCI_OP_USBSTS
);
1099 if ( usbsts
& XHCI_USBSTS_HCH
)
1106 DBGC ( xhci
, "XHCI %p timed out waiting for stop\n", xhci
);
1113 * @v xhci xHCI device
1114 * @ret rc Return status code
1116 static int xhci_reset ( struct xhci_device
*xhci
) {
1121 /* The xHCI specification states that resetting a running
1122 * device may result in undefined behaviour, so try stopping
1125 if ( ( rc
= xhci_stop ( xhci
) ) != 0 ) {
1126 /* Ignore errors and attempt to reset the device anyway */
1130 writel ( XHCI_USBCMD_HCRST
, xhci
->op
+ XHCI_OP_USBCMD
);
1132 /* Wait for reset to complete */
1133 for ( i
= 0 ; i
< XHCI_RESET_MAX_WAIT_MS
; i
++ ) {
1135 /* Check if reset is complete */
1136 usbcmd
= readl ( xhci
->op
+ XHCI_OP_USBCMD
);
1137 if ( ! ( usbcmd
& XHCI_USBCMD_HCRST
) )
1144 DBGC ( xhci
, "XHCI %p timed out waiting for reset\n", xhci
);
1148 /******************************************************************************
1150 * Transfer request blocks
1152 ******************************************************************************
1156 * Allocate transfer request block ring
1158 * @v xhci xHCI device
1160 * @v shift Ring size (log2)
1161 * @v slot Device slot
1162 * @v target Doorbell target
1163 * @v stream Doorbell stream ID
1164 * @ret rc Return status code
1166 static int xhci_ring_alloc ( struct xhci_device
*xhci
,
1167 struct xhci_trb_ring
*ring
,
1168 unsigned int shift
, unsigned int slot
,
1169 unsigned int target
, unsigned int stream
) {
1170 struct xhci_trb_link
*link
;
1175 assert ( shift
> 0 );
1177 /* Initialise structure */
1178 memset ( ring
, 0, sizeof ( *ring
) );
1179 ring
->shift
= shift
;
1180 count
= ( 1U << shift
);
1181 ring
->mask
= ( count
- 1 );
1182 ring
->len
= ( ( count
+ 1 /* Link TRB */ ) * sizeof ( ring
->trb
[0] ) );
1183 ring
->db
= ( xhci
->db
+ ( slot
* sizeof ( ring
->dbval
) ) );
1184 ring
->dbval
= XHCI_DBVAL ( target
, stream
);
1186 /* Allocate I/O buffers */
1187 ring
->iobuf
= zalloc ( count
* sizeof ( ring
->iobuf
[0] ) );
1188 if ( ! ring
->iobuf
) {
1190 goto err_alloc_iobuf
;
1194 ring
->trb
= malloc_dma ( ring
->len
, xhci_align ( ring
->len
) );
1195 if ( ! ring
->trb
) {
1199 memset ( ring
->trb
, 0, ring
->len
);
1201 /* Initialise Link TRB */
1202 link
= &ring
->trb
[count
].link
;
1203 link
->next
= cpu_to_le64 ( virt_to_phys ( ring
->trb
) );
1204 link
->flags
= XHCI_TRB_TC
;
1205 link
->type
= XHCI_TRB_LINK
;
1210 free_dma ( ring
->trb
, ring
->len
);
1212 free ( ring
->iobuf
);
1218 * Reset transfer request block ring
1222 static void xhci_ring_reset ( struct xhci_trb_ring
*ring
) {
1223 unsigned int count
= ( 1U << ring
->shift
);
1225 /* Reset producer and consumer counters */
1229 /* Reset TRBs (except Link TRB) */
1230 memset ( ring
->trb
, 0, ( count
* sizeof ( ring
->trb
[0] ) ) );
1234 * Free transfer request block ring
1238 static void xhci_ring_free ( struct xhci_trb_ring
*ring
) {
1239 unsigned int count
= ( 1U << ring
->shift
);
1243 assert ( ring
->cons
== ring
->prod
);
1244 for ( i
= 0 ; i
< count
; i
++ )
1245 assert ( ring
->iobuf
[i
] == NULL
);
1248 free_dma ( ring
->trb
, ring
->len
);
1250 /* Free I/O buffers */
1251 free ( ring
->iobuf
);
1255 * Enqueue a transfer request block
1258 * @v iobuf I/O buffer (if any)
1259 * @v trb Transfer request block (with empty Cycle flag)
1260 * @ret rc Return status code
1262 * This operation does not implicitly ring the doorbell register.
1264 static int xhci_enqueue ( struct xhci_trb_ring
*ring
, struct io_buffer
*iobuf
,
1265 const union xhci_trb
*trb
) {
1266 union xhci_trb
*dest
;
1273 assert ( ! ( trb
->common
.flags
& XHCI_TRB_C
) );
1275 /* Fail if ring is full */
1276 if ( ! xhci_ring_remaining ( ring
) )
1279 /* Update producer counter (and link TRB, if applicable) */
1280 prod
= ring
->prod
++;
1282 cycle
= ( ( ~( prod
>> ring
->shift
) ) & XHCI_TRB_C
);
1283 index
= ( prod
& mask
);
1285 ring
->link
->flags
= ( XHCI_TRB_TC
| ( cycle
^ XHCI_TRB_C
) );
1287 /* Record I/O buffer */
1288 ring
->iobuf
[index
] = iobuf
;
1291 dest
= &ring
->trb
[index
];
1292 dest
->template.parameter
= trb
->template.parameter
;
1293 dest
->template.status
= trb
->template.status
;
1295 dest
->template.control
= ( trb
->template.control
|
1296 cpu_to_le32 ( cycle
) );
1302 * Dequeue a transfer request block
1305 * @ret iobuf I/O buffer
1307 static struct io_buffer
* xhci_dequeue ( struct xhci_trb_ring
*ring
) {
1308 struct io_buffer
*iobuf
;
1314 assert ( xhci_ring_fill ( ring
) != 0 );
1316 /* Update consumer counter */
1317 cons
= ring
->cons
++;
1319 index
= ( cons
& mask
);
1321 /* Retrieve I/O buffer */
1322 iobuf
= ring
->iobuf
[index
];
1323 ring
->iobuf
[index
] = NULL
;
1329 * Enqueue multiple transfer request blocks
1332 * @v iobuf I/O buffer
1333 * @v trbs Transfer request blocks (with empty Cycle flag)
1334 * @v count Number of transfer request blocks
1335 * @ret rc Return status code
1337 * This operation does not implicitly ring the doorbell register.
1339 static int xhci_enqueue_multi ( struct xhci_trb_ring
*ring
,
1340 struct io_buffer
*iobuf
,
1341 const union xhci_trb
*trbs
,
1342 unsigned int count
) {
1343 const union xhci_trb
*trb
= trbs
;
1347 assert ( iobuf
!= NULL
);
1349 /* Fail if ring does not have sufficient space */
1350 if ( xhci_ring_remaining ( ring
) < count
)
1353 /* Enqueue each TRB, recording the I/O buffer with the final TRB */
1355 rc
= xhci_enqueue ( ring
, ( count ? NULL
: iobuf
), trb
++ );
1356 assert ( rc
== 0 ); /* Should never be able to fail */
1363 * Dequeue multiple transfer request blocks
1366 * @ret iobuf I/O buffer
1368 static struct io_buffer
* xhci_dequeue_multi ( struct xhci_trb_ring
*ring
) {
1369 struct io_buffer
*iobuf
;
1371 /* Dequeue TRBs until we reach the final TRB for an I/O buffer */
1373 iobuf
= xhci_dequeue ( ring
);
1374 } while ( iobuf
== NULL
);
1380 * Ring doorbell register
1384 static inline __attribute__ (( always_inline
)) void
1385 xhci_doorbell ( struct xhci_trb_ring
*ring
) {
1388 writel ( ring
->dbval
, ring
->db
);
1391 /******************************************************************************
1393 * Command and event rings
1395 ******************************************************************************
1399 * Allocate command ring
1401 * @v xhci xHCI device
1402 * @ret rc Return status code
1404 static int xhci_command_alloc ( struct xhci_device
*xhci
) {
1408 /* Allocate TRB ring */
1409 if ( ( rc
= xhci_ring_alloc ( xhci
, &xhci
->command
, XHCI_CMD_TRBS_LOG2
,
1411 goto err_ring_alloc
;
1413 /* Program command ring control register */
1414 crp
= virt_to_phys ( xhci
->command
.trb
);
1415 if ( ( rc
= xhci_writeq ( xhci
, ( crp
| XHCI_CRCR_RCS
),
1416 xhci
->op
+ XHCI_OP_CRCR
) ) != 0 )
1419 DBGC2 ( xhci
, "XHCI %p CRCR at [%08lx,%08lx)\n",
1420 xhci
, crp
, ( crp
+ xhci
->command
.len
) );
1424 xhci_ring_free ( &xhci
->command
);
1432 * @v xhci xHCI device
1434 static void xhci_command_free ( struct xhci_device
*xhci
) {
1437 assert ( ( readl ( xhci
->op
+ XHCI_OP_CRCR
) & XHCI_CRCR_CRR
) == 0 );
1439 /* Clear command ring control register */
1440 xhci_writeq ( xhci
, 0, xhci
->op
+ XHCI_OP_CRCR
);
1443 xhci_ring_free ( &xhci
->command
);
1447 * Allocate event ring
1449 * @v xhci xHCI device
1450 * @ret rc Return status code
1452 static int xhci_event_alloc ( struct xhci_device
*xhci
) {
1453 struct xhci_event_ring
*event
= &xhci
->event
;
1458 /* Allocate event ring */
1459 count
= ( 1 << XHCI_EVENT_TRBS_LOG2
);
1460 len
= ( count
* sizeof ( event
->trb
[0] ) );
1461 event
->trb
= malloc_dma ( len
, xhci_align ( len
) );
1462 if ( ! event
->trb
) {
1466 memset ( event
->trb
, 0, len
);
1468 /* Allocate event ring segment table */
1469 event
->segment
= malloc_dma ( sizeof ( event
->segment
[0] ),
1470 xhci_align ( sizeof (event
->segment
[0])));
1471 if ( ! event
->segment
) {
1473 goto err_alloc_segment
;
1475 memset ( event
->segment
, 0, sizeof ( event
->segment
[0] ) );
1476 event
->segment
[0].base
= cpu_to_le64 ( virt_to_phys ( event
->trb
) );
1477 event
->segment
[0].count
= cpu_to_le32 ( count
);
1479 /* Program event ring registers */
1480 writel ( 1, xhci
->run
+ XHCI_RUN_ERSTSZ ( 0 ) );
1481 if ( ( rc
= xhci_writeq ( xhci
, virt_to_phys ( event
->trb
),
1482 xhci
->run
+ XHCI_RUN_ERDP ( 0 ) ) ) != 0 )
1483 goto err_writeq_erdp
;
1484 if ( ( rc
= xhci_writeq ( xhci
, virt_to_phys ( event
->segment
),
1485 xhci
->run
+ XHCI_RUN_ERSTBA ( 0 ) ) ) != 0 )
1486 goto err_writeq_erstba
;
1488 DBGC2 ( xhci
, "XHCI %p event ring [%08lx,%08lx) table [%08lx,%08lx)\n",
1489 xhci
, virt_to_phys ( event
->trb
),
1490 ( virt_to_phys ( event
->trb
) + len
),
1491 virt_to_phys ( event
->segment
),
1492 ( virt_to_phys ( event
->segment
) +
1493 sizeof (event
->segment
[0] ) ) );
1496 xhci_writeq ( xhci
, 0, xhci
->run
+ XHCI_RUN_ERSTBA ( 0 ) );
1498 xhci_writeq ( xhci
, 0, xhci
->run
+ XHCI_RUN_ERDP ( 0 ) );
1500 free_dma ( event
->trb
, len
);
1502 free_dma ( event
->segment
, sizeof ( event
->segment
[0] ) );
1510 * @v xhci xHCI device
1512 static void xhci_event_free ( struct xhci_device
*xhci
) {
1513 struct xhci_event_ring
*event
= &xhci
->event
;
1517 /* Clear event ring registers */
1518 writel ( 0, xhci
->run
+ XHCI_RUN_ERSTSZ ( 0 ) );
1519 xhci_writeq ( xhci
, 0, xhci
->run
+ XHCI_RUN_ERSTBA ( 0 ) );
1520 xhci_writeq ( xhci
, 0, xhci
->run
+ XHCI_RUN_ERDP ( 0 ) );
1522 /* Free event ring segment table */
1523 free_dma ( event
->segment
, sizeof ( event
->segment
[0] ) );
1525 /* Free event ring */
1526 count
= ( 1 << XHCI_EVENT_TRBS_LOG2
);
1527 len
= ( count
* sizeof ( event
->trb
[0] ) );
1528 free_dma ( event
->trb
, len
);
1532 * Handle transfer event
1534 * @v xhci xHCI device
1535 * @v transfer Transfer event TRB
1537 static void xhci_transfer ( struct xhci_device
*xhci
,
1538 struct xhci_trb_transfer
*transfer
) {
1539 struct xhci_slot
*slot
;
1540 struct xhci_endpoint
*endpoint
;
1541 struct io_buffer
*iobuf
;
1544 /* Profile transfer events */
1545 profile_start ( &xhci_transfer_profiler
);
1548 if ( ( transfer
->slot
> xhci
->slots
) ||
1549 ( ( slot
= xhci
->slot
[transfer
->slot
] ) == NULL
) ) {
1550 DBGC ( xhci
, "XHCI %p transfer event invalid slot %d:\n",
1551 xhci
, transfer
->slot
);
1552 DBGC_HDA ( xhci
, 0, transfer
, sizeof ( *transfer
) );
1556 /* Identify endpoint */
1557 if ( ( transfer
->endpoint
> XHCI_CTX_END
) ||
1558 ( ( endpoint
= slot
->endpoint
[transfer
->endpoint
] ) == NULL
) ) {
1559 DBGC ( xhci
, "XHCI %p slot %d transfer event invalid epid "
1560 "%d:\n", xhci
, slot
->id
, transfer
->endpoint
);
1561 DBGC_HDA ( xhci
, 0, transfer
, sizeof ( *transfer
) );
1565 /* Dequeue TRB(s) */
1566 iobuf
= xhci_dequeue_multi ( &endpoint
->ring
);
1567 assert ( iobuf
!= NULL
);
1569 /* Check for errors */
1570 if ( ! ( ( transfer
->code
== XHCI_CMPLT_SUCCESS
) ||
1571 ( transfer
->code
== XHCI_CMPLT_SHORT
) ) ) {
1573 /* Construct error */
1574 rc
= -ECODE ( transfer
->code
);
1575 DBGC ( xhci
, "XHCI %p slot %d ctx %d failed (code %d): %s\n",
1576 xhci
, slot
->id
, endpoint
->ctx
, transfer
->code
,
1578 DBGC_HDA ( xhci
, 0, transfer
, sizeof ( *transfer
) );
1581 assert ( ( endpoint
->context
->state
& XHCI_ENDPOINT_STATE_MASK
)
1582 != XHCI_ENDPOINT_RUNNING
);
1584 /* Report failure to USB core */
1585 usb_complete_err ( endpoint
->ep
, iobuf
, rc
);
1589 /* Record actual transfer size */
1590 iob_unput ( iobuf
, le16_to_cpu ( transfer
->residual
) );
1592 /* Sanity check (for successful completions only) */
1593 assert ( xhci_ring_consumed ( &endpoint
->ring
) ==
1594 le64_to_cpu ( transfer
->transfer
) );
1596 /* Report completion to USB core */
1597 usb_complete ( endpoint
->ep
, iobuf
);
1598 profile_stop ( &xhci_transfer_profiler
);
1602 * Handle command completion event
1604 * @v xhci xHCI device
1605 * @v complete Command completion event
1607 static void xhci_complete ( struct xhci_device
*xhci
,
1608 struct xhci_trb_complete
*complete
) {
1611 /* Ignore "command ring stopped" notifications */
1612 if ( complete
->code
== XHCI_CMPLT_CMD_STOPPED
) {
1613 DBGC2 ( xhci
, "XHCI %p command ring stopped\n", xhci
);
1617 /* Ignore unexpected completions */
1618 if ( ! xhci
->pending
) {
1619 rc
= -ECODE ( complete
->code
);
1620 DBGC ( xhci
, "XHCI %p unexpected completion (code %d): %s\n",
1621 xhci
, complete
->code
, strerror ( rc
) );
1622 DBGC_HDA ( xhci
, 0, complete
, sizeof ( *complete
) );
1626 /* Dequeue command TRB */
1627 xhci_dequeue ( &xhci
->command
);
1630 assert ( xhci_ring_consumed ( &xhci
->command
) ==
1631 le64_to_cpu ( complete
->command
) );
1633 /* Record completion */
1634 memcpy ( xhci
->pending
, complete
, sizeof ( *xhci
->pending
) );
1635 xhci
->pending
= NULL
;
1639 * Handle port status event
1641 * @v xhci xHCI device
1642 * @v port Port status event
1644 static void xhci_port_status ( struct xhci_device
*xhci
,
1645 struct xhci_trb_port_status
*port
) {
1649 assert ( ( port
->port
> 0 ) && ( port
->port
<= xhci
->ports
) );
1651 /* Clear port status change bits */
1652 portsc
= readl ( xhci
->op
+ XHCI_OP_PORTSC ( port
->port
) );
1653 portsc
&= ( XHCI_PORTSC_PRESERVE
| XHCI_PORTSC_CHANGE
);
1654 writel ( portsc
, xhci
->op
+ XHCI_OP_PORTSC ( port
->port
) );
1656 /* Report port status change */
1657 usb_port_changed ( usb_port ( xhci
->bus
->hub
, port
->port
) );
1661 * Handle host controller event
1663 * @v xhci xHCI device
1664 * @v host Host controller event
1666 static void xhci_host_controller ( struct xhci_device
*xhci
,
1667 struct xhci_trb_host_controller
*host
) {
1670 /* Construct error */
1671 rc
= -ECODE ( host
->code
);
1672 DBGC ( xhci
, "XHCI %p host controller event (code %d): %s\n",
1673 xhci
, host
->code
, strerror ( rc
) );
1679 * @v xhci xHCI device
1681 static void xhci_event_poll ( struct xhci_device
*xhci
) {
1682 struct xhci_event_ring
*event
= &xhci
->event
;
1683 union xhci_trb
*trb
;
1684 unsigned int shift
= XHCI_EVENT_TRBS_LOG2
;
1685 unsigned int count
= ( 1 << shift
);
1686 unsigned int mask
= ( count
- 1 );
1687 unsigned int consumed
;
1690 /* Poll for events */
1691 profile_start ( &xhci_event_profiler
);
1692 for ( consumed
= 0 ; ; consumed
++ ) {
1694 /* Stop if we reach an empty TRB */
1696 trb
= &event
->trb
[ event
->cons
& mask
];
1697 if ( ! ( ( trb
->common
.flags
^
1698 ( event
->cons
>> shift
) ) & XHCI_TRB_C
) )
1702 type
= ( trb
->common
.type
& XHCI_TRB_TYPE_MASK
);
1705 case XHCI_TRB_TRANSFER
:
1706 xhci_transfer ( xhci
, &trb
->transfer
);
1709 case XHCI_TRB_COMPLETE
:
1710 xhci_complete ( xhci
, &trb
->complete
);
1713 case XHCI_TRB_PORT_STATUS
:
1714 xhci_port_status ( xhci
, &trb
->port
);
1717 case XHCI_TRB_HOST_CONTROLLER
:
1718 xhci_host_controller ( xhci
, &trb
->host
);
1722 DBGC ( xhci
, "XHCI %p unrecognised event %#x\n:",
1723 xhci
, event
->cons
);
1724 DBGC_HDA ( xhci
, virt_to_phys ( trb
),
1725 trb
, sizeof ( *trb
) );
1729 /* Consume this TRB */
1733 /* Update dequeue pointer if applicable */
1735 xhci_writeq ( xhci
, virt_to_phys ( trb
),
1736 xhci
->run
+ XHCI_RUN_ERDP ( 0 ) );
1737 profile_stop ( &xhci_event_profiler
);
1744 * @v xhci xHCI device
1746 static void xhci_abort ( struct xhci_device
*xhci
) {
1749 /* Abort the command */
1750 DBGC2 ( xhci
, "XHCI %p aborting command\n", xhci
);
1751 xhci_writeq ( xhci
, XHCI_CRCR_CA
, xhci
->op
+ XHCI_OP_CRCR
);
1753 /* Allow time for command to abort */
1754 mdelay ( XHCI_COMMAND_ABORT_DELAY_MS
);
1757 assert ( ( readl ( xhci
->op
+ XHCI_OP_CRCR
) & XHCI_CRCR_CRR
) == 0 );
1759 /* Consume (and ignore) any final command status */
1760 xhci_event_poll ( xhci
);
1762 /* Reset the command ring control register */
1763 xhci_ring_reset ( &xhci
->command
);
1764 crp
= virt_to_phys ( xhci
->command
.trb
);
1765 xhci_writeq ( xhci
, ( crp
| XHCI_CRCR_RCS
), xhci
->op
+ XHCI_OP_CRCR
);
1769 * Issue command and wait for completion
1771 * @v xhci xHCI device
1772 * @v trb Transfer request block (with empty Cycle flag)
1773 * @ret rc Return status code
1775 * On a successful completion, the TRB will be overwritten with the
1778 static int xhci_command ( struct xhci_device
*xhci
, union xhci_trb
*trb
) {
1779 struct xhci_trb_complete
*complete
= &trb
->complete
;
1783 /* Record the pending command */
1784 xhci
->pending
= trb
;
1786 /* Enqueue the command */
1787 if ( ( rc
= xhci_enqueue ( &xhci
->command
, NULL
, trb
) ) != 0 )
1790 /* Ring the command doorbell */
1791 xhci_doorbell ( &xhci
->command
);
1793 /* Wait for the command to complete */
1794 for ( i
= 0 ; i
< XHCI_COMMAND_MAX_WAIT_MS
; i
++ ) {
1796 /* Poll event ring */
1797 xhci_event_poll ( xhci
);
1799 /* Check for completion */
1800 if ( ! xhci
->pending
) {
1801 if ( complete
->code
!= XHCI_CMPLT_SUCCESS
) {
1802 rc
= -ECODE ( complete
->code
);
1803 DBGC ( xhci
, "XHCI %p command failed (code "
1804 "%d): %s\n", xhci
, complete
->code
,
1806 DBGC_HDA ( xhci
, 0, trb
, sizeof ( *trb
) );
1817 DBGC ( xhci
, "XHCI %p timed out waiting for completion\n", xhci
);
1821 xhci_abort ( xhci
);
1824 xhci
->pending
= NULL
;
1829 * Issue NOP and wait for completion
1831 * @v xhci xHCI device
1832 * @ret rc Return status code
1834 static inline int xhci_nop ( struct xhci_device
*xhci
) {
1836 struct xhci_trb_common
*nop
= &trb
.common
;
1839 /* Construct command */
1840 memset ( nop
, 0, sizeof ( *nop
) );
1841 nop
->flags
= XHCI_TRB_IOC
;
1842 nop
->type
= XHCI_TRB_NOP_CMD
;
1844 /* Issue command and wait for completion */
1845 if ( ( rc
= xhci_command ( xhci
, &trb
) ) != 0 )
1854 * @v xhci xHCI device
1856 * @ret slot Device slot ID, or negative error
1858 static inline int xhci_enable_slot ( struct xhci_device
*xhci
,
1859 unsigned int type
) {
1861 struct xhci_trb_enable_slot
*enable
= &trb
.enable
;
1862 struct xhci_trb_complete
*enabled
= &trb
.complete
;
1866 /* Construct command */
1867 memset ( enable
, 0, sizeof ( *enable
) );
1868 enable
->slot
= type
;
1869 enable
->type
= XHCI_TRB_ENABLE_SLOT
;
1871 /* Issue command and wait for completion */
1872 if ( ( rc
= xhci_command ( xhci
, &trb
) ) != 0 ) {
1873 DBGC ( xhci
, "XHCI %p could not enable new slot: %s\n",
1874 xhci
, strerror ( rc
) );
1878 /* Extract slot number */
1879 slot
= enabled
->slot
;
1881 DBGC2 ( xhci
, "XHCI %p slot %d enabled\n", xhci
, slot
);
1888 * @v xhci xHCI device
1889 * @v slot Device slot
1890 * @ret rc Return status code
1892 static inline int xhci_disable_slot ( struct xhci_device
*xhci
,
1893 unsigned int slot
) {
1895 struct xhci_trb_disable_slot
*disable
= &trb
.disable
;
1898 /* Construct command */
1899 memset ( disable
, 0, sizeof ( *disable
) );
1900 disable
->type
= XHCI_TRB_DISABLE_SLOT
;
1901 disable
->slot
= slot
;
1903 /* Issue command and wait for completion */
1904 if ( ( rc
= xhci_command ( xhci
, &trb
) ) != 0 ) {
1905 DBGC ( xhci
, "XHCI %p could not disable slot %d: %s\n",
1906 xhci
, slot
, strerror ( rc
) );
1910 DBGC2 ( xhci
, "XHCI %p slot %d disabled\n", xhci
, slot
);
1915 * Issue context-based command and wait for completion
1917 * @v xhci xHCI device
1918 * @v slot Device slot
1919 * @v endpoint Endpoint
1921 * @v populate Input context populater
1922 * @ret rc Return status code
1924 static int xhci_context ( struct xhci_device
*xhci
, struct xhci_slot
*slot
,
1925 struct xhci_endpoint
*endpoint
, unsigned int type
,
1926 void ( * populate
) ( struct xhci_device
*xhci
,
1927 struct xhci_slot
*slot
,
1928 struct xhci_endpoint
*endpoint
,
1931 struct xhci_trb_context
*context
= &trb
.context
;
1936 /* Allocate an input context */
1937 len
= xhci_input_context_offset ( xhci
, XHCI_CTX_END
);
1938 input
= malloc_dma ( len
, xhci_align ( len
) );
1943 memset ( input
, 0, len
);
1945 /* Populate input context */
1946 populate ( xhci
, slot
, endpoint
, input
);
1948 /* Construct command */
1949 memset ( context
, 0, sizeof ( *context
) );
1950 context
->type
= type
;
1951 context
->input
= cpu_to_le64 ( virt_to_phys ( input
) );
1952 context
->slot
= slot
->id
;
1954 /* Issue command and wait for completion */
1955 if ( ( rc
= xhci_command ( xhci
, &trb
) ) != 0 )
1959 free_dma ( input
, len
);
1965 * Populate address device input context
1967 * @v xhci xHCI device
1968 * @v slot Device slot
1969 * @v endpoint Endpoint
1970 * @v input Input context
1972 static void xhci_address_device_input ( struct xhci_device
*xhci
,
1973 struct xhci_slot
*slot
,
1974 struct xhci_endpoint
*endpoint
,
1976 struct xhci_control_context
*control_ctx
;
1977 struct xhci_slot_context
*slot_ctx
;
1978 struct xhci_endpoint_context
*ep_ctx
;
1981 assert ( endpoint
->ctx
== XHCI_CTX_EP0
);
1983 /* Populate control context */
1984 control_ctx
= input
;
1985 control_ctx
->add
= cpu_to_le32 ( ( 1 << XHCI_CTX_SLOT
) |
1986 ( 1 << XHCI_CTX_EP0
) );
1988 /* Populate slot context */
1989 slot_ctx
= ( input
+ xhci_input_context_offset ( xhci
, XHCI_CTX_SLOT
));
1990 slot_ctx
->info
= cpu_to_le32 ( XHCI_SLOT_INFO ( 1, 0, slot
->psiv
,
1992 slot_ctx
->port
= slot
->port
;
1993 slot_ctx
->tt_id
= slot
->tt_id
;
1994 slot_ctx
->tt_port
= slot
->tt_port
;
1996 /* Populate control endpoint context */
1997 ep_ctx
= ( input
+ xhci_input_context_offset ( xhci
, XHCI_CTX_EP0
) );
1998 ep_ctx
->type
= XHCI_EP_TYPE_CONTROL
;
1999 ep_ctx
->burst
= endpoint
->ep
->burst
;
2000 ep_ctx
->mtu
= cpu_to_le16 ( endpoint
->ep
->mtu
);
2001 ep_ctx
->dequeue
= cpu_to_le64 ( virt_to_phys ( endpoint
->ring
.trb
) |
2003 ep_ctx
->trb_len
= cpu_to_le16 ( XHCI_EP0_TRB_LEN
);
2009 * @v xhci xHCI device
2010 * @v slot Device slot
2011 * @ret rc Return status code
2013 static inline int xhci_address_device ( struct xhci_device
*xhci
,
2014 struct xhci_slot
*slot
) {
2015 struct usb_device
*usb
= slot
->usb
;
2016 struct xhci_slot_context
*slot_ctx
;
2019 /* Assign device address */
2020 if ( ( rc
= xhci_context ( xhci
, slot
, slot
->endpoint
[XHCI_CTX_EP0
],
2021 XHCI_TRB_ADDRESS_DEVICE
,
2022 xhci_address_device_input
) ) != 0 )
2025 /* Get assigned address */
2026 slot_ctx
= ( slot
->context
+
2027 xhci_device_context_offset ( xhci
, XHCI_CTX_SLOT
) );
2028 usb
->address
= slot_ctx
->address
;
2029 DBGC2 ( xhci
, "XHCI %p assigned address %d to %s\n",
2030 xhci
, usb
->address
, usb
->name
);
2036 * Populate configure endpoint input context
2038 * @v xhci xHCI device
2039 * @v slot Device slot
2040 * @v endpoint Endpoint
2041 * @v input Input context
2043 static void xhci_configure_endpoint_input ( struct xhci_device
*xhci
,
2044 struct xhci_slot
*slot
,
2045 struct xhci_endpoint
*endpoint
,
2047 struct xhci_control_context
*control_ctx
;
2048 struct xhci_slot_context
*slot_ctx
;
2049 struct xhci_endpoint_context
*ep_ctx
;
2051 /* Populate control context */
2052 control_ctx
= input
;
2053 control_ctx
->add
= cpu_to_le32 ( ( 1 << XHCI_CTX_SLOT
) |
2054 ( 1 << endpoint
->ctx
) );
2056 /* Populate slot context */
2057 slot_ctx
= ( input
+ xhci_input_context_offset ( xhci
, XHCI_CTX_SLOT
));
2058 slot_ctx
->info
= cpu_to_le32 ( XHCI_SLOT_INFO ( ( XHCI_CTX_END
- 1 ),
2059 ( slot
->ports ?
1 : 0 ),
2061 slot_ctx
->ports
= slot
->ports
;
2063 /* Populate endpoint context */
2064 ep_ctx
= ( input
+ xhci_input_context_offset ( xhci
, endpoint
->ctx
) );
2065 ep_ctx
->interval
= endpoint
->interval
;
2066 ep_ctx
->type
= endpoint
->type
;
2067 ep_ctx
->burst
= endpoint
->ep
->burst
;
2068 ep_ctx
->mtu
= cpu_to_le16 ( endpoint
->ep
->mtu
);
2069 ep_ctx
->dequeue
= cpu_to_le64 ( virt_to_phys ( endpoint
->ring
.trb
) |
2071 ep_ctx
->trb_len
= cpu_to_le16 ( endpoint
->ep
->mtu
); /* best guess */
2075 * Configure endpoint
2077 * @v xhci xHCI device
2078 * @v slot Device slot
2079 * @v endpoint Endpoint
2080 * @ret rc Return status code
2082 static inline int xhci_configure_endpoint ( struct xhci_device
*xhci
,
2083 struct xhci_slot
*slot
,
2084 struct xhci_endpoint
*endpoint
) {
2087 /* Configure endpoint */
2088 if ( ( rc
= xhci_context ( xhci
, slot
, endpoint
,
2089 XHCI_TRB_CONFIGURE_ENDPOINT
,
2090 xhci_configure_endpoint_input
) ) != 0 )
2093 DBGC2 ( xhci
, "XHCI %p slot %d ctx %d configured\n",
2094 xhci
, slot
->id
, endpoint
->ctx
);
2099 * Populate deconfigure endpoint input context
2101 * @v xhci xHCI device
2102 * @v slot Device slot
2103 * @v endpoint Endpoint
2104 * @v input Input context
2107 xhci_deconfigure_endpoint_input ( struct xhci_device
*xhci __unused
,
2108 struct xhci_slot
*slot __unused
,
2109 struct xhci_endpoint
*endpoint
,
2111 struct xhci_control_context
*control_ctx
;
2112 struct xhci_slot_context
*slot_ctx
;
2114 /* Populate control context */
2115 control_ctx
= input
;
2116 control_ctx
->add
= cpu_to_le32 ( 1 << XHCI_CTX_SLOT
);
2117 control_ctx
->drop
= cpu_to_le32 ( 1 << endpoint
->ctx
);
2119 /* Populate slot context */
2120 slot_ctx
= ( input
+ xhci_input_context_offset ( xhci
, XHCI_CTX_SLOT
));
2121 slot_ctx
->info
= cpu_to_le32 ( XHCI_SLOT_INFO ( ( XHCI_CTX_END
- 1 ),
2126 * Deconfigure endpoint
2128 * @v xhci xHCI device
2129 * @v slot Device slot
2130 * @v endpoint Endpoint
2131 * @ret rc Return status code
2133 static inline int xhci_deconfigure_endpoint ( struct xhci_device
*xhci
,
2134 struct xhci_slot
*slot
,
2135 struct xhci_endpoint
*endpoint
) {
2138 /* Deconfigure endpoint */
2139 if ( ( rc
= xhci_context ( xhci
, slot
, endpoint
,
2140 XHCI_TRB_CONFIGURE_ENDPOINT
,
2141 xhci_deconfigure_endpoint_input
) ) != 0 )
2144 DBGC2 ( xhci
, "XHCI %p slot %d ctx %d deconfigured\n",
2145 xhci
, slot
->id
, endpoint
->ctx
);
2150 * Populate evaluate context input context
2152 * @v xhci xHCI device
2153 * @v slot Device slot
2154 * @v endpoint Endpoint
2155 * @v input Input context
2157 static void xhci_evaluate_context_input ( struct xhci_device
*xhci
,
2158 struct xhci_slot
*slot __unused
,
2159 struct xhci_endpoint
*endpoint
,
2161 struct xhci_control_context
*control_ctx
;
2162 struct xhci_slot_context
*slot_ctx
;
2163 struct xhci_endpoint_context
*ep_ctx
;
2165 /* Populate control context */
2166 control_ctx
= input
;
2167 control_ctx
->add
= cpu_to_le32 ( ( 1 << XHCI_CTX_SLOT
) |
2168 ( 1 << endpoint
->ctx
) );
2170 /* Populate slot context */
2171 slot_ctx
= ( input
+ xhci_input_context_offset ( xhci
, XHCI_CTX_SLOT
));
2172 slot_ctx
->info
= cpu_to_le32 ( XHCI_SLOT_INFO ( ( XHCI_CTX_END
- 1 ),
2175 /* Populate endpoint context */
2176 ep_ctx
= ( input
+ xhci_input_context_offset ( xhci
, endpoint
->ctx
) );
2177 ep_ctx
->mtu
= cpu_to_le16 ( endpoint
->ep
->mtu
);
2183 * @v xhci xHCI device
2184 * @v slot Device slot
2185 * @v endpoint Endpoint
2186 * @ret rc Return status code
2188 static inline int xhci_evaluate_context ( struct xhci_device
*xhci
,
2189 struct xhci_slot
*slot
,
2190 struct xhci_endpoint
*endpoint
) {
2193 /* Configure endpoint */
2194 if ( ( rc
= xhci_context ( xhci
, slot
, endpoint
,
2195 XHCI_TRB_EVALUATE_CONTEXT
,
2196 xhci_evaluate_context_input
) ) != 0 )
2199 DBGC2 ( xhci
, "XHCI %p slot %d ctx %d (re-)evaluated\n",
2200 xhci
, slot
->id
, endpoint
->ctx
);
2207 * @v xhci xHCI device
2208 * @v slot Device slot
2209 * @v endpoint Endpoint
2210 * @ret rc Return status code
2212 static inline int xhci_reset_endpoint ( struct xhci_device
*xhci
,
2213 struct xhci_slot
*slot
,
2214 struct xhci_endpoint
*endpoint
) {
2216 struct xhci_trb_reset_endpoint
*reset
= &trb
.reset
;
2219 /* Construct command */
2220 memset ( reset
, 0, sizeof ( *reset
) );
2221 reset
->slot
= slot
->id
;
2222 reset
->endpoint
= endpoint
->ctx
;
2223 reset
->type
= XHCI_TRB_RESET_ENDPOINT
;
2225 /* Issue command and wait for completion */
2226 if ( ( rc
= xhci_command ( xhci
, &trb
) ) != 0 ) {
2227 DBGC ( xhci
, "XHCI %p slot %d ctx %d could not reset endpoint "
2228 "in state %d: %s\n", xhci
, slot
->id
, endpoint
->ctx
,
2229 endpoint
->context
->state
, strerror ( rc
) );
2239 * @v xhci xHCI device
2240 * @v slot Device slot
2241 * @v endpoint Endpoint
2242 * @ret rc Return status code
2244 static inline int xhci_stop_endpoint ( struct xhci_device
*xhci
,
2245 struct xhci_slot
*slot
,
2246 struct xhci_endpoint
*endpoint
) {
2248 struct xhci_trb_stop_endpoint
*stop
= &trb
.stop
;
2251 /* Construct command */
2252 memset ( stop
, 0, sizeof ( *stop
) );
2253 stop
->slot
= slot
->id
;
2254 stop
->endpoint
= endpoint
->ctx
;
2255 stop
->type
= XHCI_TRB_STOP_ENDPOINT
;
2257 /* Issue command and wait for completion */
2258 if ( ( rc
= xhci_command ( xhci
, &trb
) ) != 0 ) {
2259 DBGC ( xhci
, "XHCI %p slot %d ctx %d could not stop endpoint "
2260 "in state %d: %s\n", xhci
, slot
->id
, endpoint
->ctx
,
2261 endpoint
->context
->state
, strerror ( rc
) );
2269 * Set transfer ring dequeue pointer
2271 * @v xhci xHCI device
2272 * @v slot Device slot
2273 * @v endpoint Endpoint
2274 * @ret rc Return status code
2277 xhci_set_tr_dequeue_pointer ( struct xhci_device
*xhci
,
2278 struct xhci_slot
*slot
,
2279 struct xhci_endpoint
*endpoint
) {
2281 struct xhci_trb_set_tr_dequeue_pointer
*dequeue
= &trb
.dequeue
;
2282 struct xhci_trb_ring
*ring
= &endpoint
->ring
;
2289 /* Construct command */
2290 memset ( dequeue
, 0, sizeof ( *dequeue
) );
2293 dcs
= ( ( ~( cons
>> ring
->shift
) ) & XHCI_EP_DCS
);
2294 index
= ( cons
& mask
);
2296 cpu_to_le64 ( virt_to_phys ( &ring
->trb
[index
] ) | dcs
);
2297 dequeue
->slot
= slot
->id
;
2298 dequeue
->endpoint
= endpoint
->ctx
;
2299 dequeue
->type
= XHCI_TRB_SET_TR_DEQUEUE_POINTER
;
2301 /* Issue command and wait for completion */
2302 if ( ( rc
= xhci_command ( xhci
, &trb
) ) != 0 ) {
2303 DBGC ( xhci
, "XHCI %p slot %d ctx %d could not set TR dequeue "
2304 "pointer in state %d: %s\n", xhci
, slot
->id
,
2305 endpoint
->ctx
, endpoint
->context
->state
, strerror ( rc
));
2312 /******************************************************************************
2314 * Endpoint operations
2316 ******************************************************************************
2322 * @v ep USB endpoint
2323 * @ret rc Return status code
2325 static int xhci_endpoint_open ( struct usb_endpoint
*ep
) {
2326 struct usb_device
*usb
= ep
->usb
;
2327 struct xhci_slot
*slot
= usb_get_hostdata ( usb
);
2328 struct xhci_device
*xhci
= slot
->xhci
;
2329 struct xhci_endpoint
*endpoint
;
2332 unsigned int interval
;
2335 /* Calculate context index */
2336 ctx
= XHCI_CTX ( ep
->address
);
2337 assert ( slot
->endpoint
[ctx
] == NULL
);
2339 /* Calculate endpoint type */
2340 type
= XHCI_EP_TYPE ( ep
->attributes
& USB_ENDPOINT_ATTR_TYPE_MASK
);
2341 if ( type
== XHCI_EP_TYPE ( USB_ENDPOINT_ATTR_CONTROL
) )
2342 type
= XHCI_EP_TYPE_CONTROL
;
2343 if ( ep
->address
& USB_DIR_IN
)
2344 type
|= XHCI_EP_TYPE_IN
;
2346 /* Calculate interval */
2347 if ( type
& XHCI_EP_TYPE_PERIODIC
) {
2348 interval
= ( fls ( ep
->interval
) - 1 );
2350 interval
= ep
->interval
;
2353 /* Allocate and initialise structure */
2354 endpoint
= zalloc ( sizeof ( *endpoint
) );
2359 usb_endpoint_set_hostdata ( ep
, endpoint
);
2360 slot
->endpoint
[ctx
] = endpoint
;
2361 endpoint
->xhci
= xhci
;
2362 endpoint
->slot
= slot
;
2364 endpoint
->ctx
= ctx
;
2365 endpoint
->type
= type
;
2366 endpoint
->interval
= interval
;
2367 endpoint
->context
= ( ( ( void * ) slot
->context
) +
2368 xhci_device_context_offset ( xhci
, ctx
) );
2370 /* Allocate transfer ring */
2371 if ( ( rc
= xhci_ring_alloc ( xhci
, &endpoint
->ring
,
2372 XHCI_TRANSFER_TRBS_LOG2
,
2373 slot
->id
, ctx
, 0 ) ) != 0 )
2374 goto err_ring_alloc
;
2376 /* Configure endpoint, if applicable */
2377 if ( ( ctx
!= XHCI_CTX_EP0
) &&
2378 ( ( rc
= xhci_configure_endpoint ( xhci
, slot
, endpoint
) ) != 0 ))
2379 goto err_configure_endpoint
;
2381 DBGC2 ( xhci
, "XHCI %p slot %d ctx %d ring [%08lx,%08lx)\n",
2382 xhci
, slot
->id
, ctx
, virt_to_phys ( endpoint
->ring
.trb
),
2383 ( virt_to_phys ( endpoint
->ring
.trb
) + endpoint
->ring
.len
) );
2386 xhci_deconfigure_endpoint ( xhci
, slot
, endpoint
);
2387 err_configure_endpoint
:
2388 xhci_ring_free ( &endpoint
->ring
);
2390 slot
->endpoint
[ctx
] = NULL
;
2399 * @v ep USB endpoint
2401 static void xhci_endpoint_close ( struct usb_endpoint
*ep
) {
2402 struct xhci_endpoint
*endpoint
= usb_endpoint_get_hostdata ( ep
);
2403 struct xhci_slot
*slot
= endpoint
->slot
;
2404 struct xhci_device
*xhci
= slot
->xhci
;
2405 struct io_buffer
*iobuf
;
2406 unsigned int ctx
= endpoint
->ctx
;
2408 /* Deconfigure endpoint, if applicable */
2409 if ( ctx
!= XHCI_CTX_EP0
)
2410 xhci_deconfigure_endpoint ( xhci
, slot
, endpoint
);
2412 /* Cancel any incomplete transfers */
2413 while ( xhci_ring_fill ( &endpoint
->ring
) ) {
2414 iobuf
= xhci_dequeue_multi ( &endpoint
->ring
);
2415 usb_complete_err ( ep
, iobuf
, -ECANCELED
);
2419 xhci_ring_free ( &endpoint
->ring
);
2420 slot
->endpoint
[ctx
] = NULL
;
2427 * @v ep USB endpoint
2428 * @ret rc Return status code
2430 static int xhci_endpoint_reset ( struct usb_endpoint
*ep
) {
2431 struct xhci_endpoint
*endpoint
= usb_endpoint_get_hostdata ( ep
);
2432 struct xhci_slot
*slot
= endpoint
->slot
;
2433 struct xhci_device
*xhci
= slot
->xhci
;
2436 /* Reset endpoint context */
2437 if ( ( rc
= xhci_reset_endpoint ( xhci
, slot
, endpoint
) ) != 0 )
2440 /* Set transfer ring dequeue pointer */
2441 if ( ( rc
= xhci_set_tr_dequeue_pointer ( xhci
, slot
, endpoint
) ) != 0)
2444 /* Ring doorbell to resume processing */
2445 xhci_doorbell ( &endpoint
->ring
);
2447 DBGC ( xhci
, "XHCI %p slot %d ctx %d reset\n",
2448 xhci
, slot
->id
, endpoint
->ctx
);
2455 * @v ep USB endpoint
2456 * @ret rc Return status code
2458 static int xhci_endpoint_mtu ( struct usb_endpoint
*ep
) {
2459 struct xhci_endpoint
*endpoint
= usb_endpoint_get_hostdata ( ep
);
2460 struct xhci_slot
*slot
= endpoint
->slot
;
2461 struct xhci_device
*xhci
= slot
->xhci
;
2464 /* Evalulate context */
2465 if ( ( rc
= xhci_evaluate_context ( xhci
, slot
, endpoint
) ) != 0 )
2472 * Enqueue message transfer
2474 * @v ep USB endpoint
2475 * @v packet Setup packet
2476 * @v iobuf I/O buffer
2477 * @ret rc Return status code
2479 static int xhci_endpoint_message ( struct usb_endpoint
*ep
,
2480 struct usb_setup_packet
*packet
,
2481 struct io_buffer
*iobuf
) {
2482 struct xhci_endpoint
*endpoint
= usb_endpoint_get_hostdata ( ep
);
2483 unsigned int input
= ( le16_to_cpu ( packet
->request
) & USB_DIR_IN
);
2484 size_t len
= iob_len ( iobuf
);
2485 union xhci_trb trbs
[ 1 /* setup */ + 1 /* possible data */ +
2487 union xhci_trb
*trb
= trbs
;
2488 struct xhci_trb_setup
*setup
;
2489 struct xhci_trb_data
*data
;
2490 struct xhci_trb_status
*status
;
2493 /* Profile message transfers */
2494 profile_start ( &xhci_message_profiler
);
2496 /* Construct setup stage TRB */
2497 memset ( trbs
, 0, sizeof ( trbs
) );
2498 setup
= &(trb
++)->setup
;
2499 memcpy ( &setup
->packet
, packet
, sizeof ( setup
->packet
) );
2500 setup
->len
= cpu_to_le32 ( sizeof ( *packet
) );
2501 setup
->flags
= XHCI_TRB_IDT
;
2502 setup
->type
= XHCI_TRB_SETUP
;
2504 setup
->direction
= ( input ? XHCI_SETUP_IN
: XHCI_SETUP_OUT
);
2506 /* Construct data stage TRB, if applicable */
2508 data
= &(trb
++)->data
;
2509 data
->data
= cpu_to_le64 ( virt_to_phys ( iobuf
->data
) );
2510 data
->len
= cpu_to_le32 ( len
);
2511 data
->type
= XHCI_TRB_DATA
;
2512 data
->direction
= ( input ? XHCI_DATA_IN
: XHCI_DATA_OUT
);
2515 /* Construct status stage TRB */
2516 status
= &(trb
++)->status
;
2517 status
->flags
= XHCI_TRB_IOC
;
2518 status
->type
= XHCI_TRB_STATUS
;
2520 ( ( len
&& input
) ? XHCI_STATUS_OUT
: XHCI_STATUS_IN
);
2523 if ( ( rc
= xhci_enqueue_multi ( &endpoint
->ring
, iobuf
, trbs
,
2524 ( trb
- trbs
) ) ) != 0 )
2527 /* Ring the doorbell */
2528 xhci_doorbell ( &endpoint
->ring
);
2530 profile_stop ( &xhci_message_profiler
);
2535 * Enqueue stream transfer
2537 * @v ep USB endpoint
2538 * @v iobuf I/O buffer
2539 * @v terminate Terminate using a short packet
2540 * @ret rc Return status code
2542 static int xhci_endpoint_stream ( struct usb_endpoint
*ep
,
2543 struct io_buffer
*iobuf
, int terminate
) {
2544 struct xhci_endpoint
*endpoint
= usb_endpoint_get_hostdata ( ep
);
2545 union xhci_trb trbs
[ 1 /* Normal */ + 1 /* Possible zero-length */ ];
2546 union xhci_trb
*trb
= trbs
;
2547 struct xhci_trb_normal
*normal
;
2548 size_t len
= iob_len ( iobuf
);
2551 /* Profile stream transfers */
2552 profile_start ( &xhci_stream_profiler
);
2554 /* Construct normal TRBs */
2555 memset ( &trbs
, 0, sizeof ( trbs
) );
2556 normal
= &(trb
++)->normal
;
2557 normal
->data
= cpu_to_le64 ( virt_to_phys ( iobuf
->data
) );
2558 normal
->len
= cpu_to_le32 ( len
);
2559 normal
->type
= XHCI_TRB_NORMAL
;
2560 if ( terminate
&& ( ( len
& ( ep
->mtu
- 1 ) ) == 0 ) ) {
2561 normal
->flags
= XHCI_TRB_CH
;
2562 normal
= &(trb
++)->normal
;
2563 normal
->type
= XHCI_TRB_NORMAL
;
2565 normal
->flags
= XHCI_TRB_IOC
;
2568 if ( ( rc
= xhci_enqueue_multi ( &endpoint
->ring
, iobuf
, trbs
,
2569 ( trb
- trbs
) ) ) != 0 )
2572 /* Ring the doorbell */
2573 xhci_doorbell ( &endpoint
->ring
);
2575 profile_stop ( &xhci_stream_profiler
);
2579 /******************************************************************************
2583 ******************************************************************************
2590 * @ret rc Return status code
2592 static int xhci_device_open ( struct usb_device
*usb
) {
2593 struct xhci_device
*xhci
= usb_bus_get_hostdata ( usb
->port
->hub
->bus
);
2594 struct usb_port
*tt
= usb_transaction_translator ( usb
);
2595 struct xhci_slot
*slot
;
2596 struct xhci_slot
*tt_slot
;
2602 /* Determine applicable slot type */
2603 type
= xhci_port_slot_type ( xhci
, usb
->port
->address
);
2606 DBGC ( xhci
, "XHCI %p port %d has no slot type\n",
2607 xhci
, usb
->port
->address
);
2611 /* Allocate a device slot number */
2612 id
= xhci_enable_slot ( xhci
, type
);
2615 goto err_enable_slot
;
2617 assert ( xhci
->slot
[id
] == NULL
);
2619 /* Allocate and initialise structure */
2620 slot
= zalloc ( sizeof ( *slot
) );
2625 usb_set_hostdata ( usb
, slot
);
2626 xhci
->slot
[id
] = slot
;
2631 tt_slot
= usb_get_hostdata ( tt
->hub
->usb
);
2632 slot
->tt_id
= tt_slot
->id
;
2633 slot
->tt_port
= tt
->address
;
2636 /* Allocate a device context */
2637 len
= xhci_device_context_offset ( xhci
, XHCI_CTX_END
);
2638 slot
->context
= malloc_dma ( len
, xhci_align ( len
) );
2639 if ( ! slot
->context
) {
2641 goto err_alloc_context
;
2643 memset ( slot
->context
, 0, len
);
2645 /* Set device context base address */
2646 assert ( xhci
->dcbaa
[id
] == 0 );
2647 xhci
->dcbaa
[id
] = cpu_to_le64 ( virt_to_phys ( slot
->context
) );
2649 DBGC2 ( xhci
, "XHCI %p slot %d device context [%08lx,%08lx) for %s\n",
2650 xhci
, slot
->id
, virt_to_phys ( slot
->context
),
2651 ( virt_to_phys ( slot
->context
) + len
), usb
->name
);
2654 xhci
->dcbaa
[id
] = 0;
2655 free_dma ( slot
->context
, len
);
2657 xhci
->slot
[id
] = NULL
;
2660 xhci_disable_slot ( xhci
, id
);
2671 static void xhci_device_close ( struct usb_device
*usb
) {
2672 struct xhci_slot
*slot
= usb_get_hostdata ( usb
);
2673 struct xhci_device
*xhci
= slot
->xhci
;
2674 size_t len
= xhci_device_context_offset ( xhci
, XHCI_CTX_END
);
2675 unsigned int id
= slot
->id
;
2679 if ( ( rc
= xhci_disable_slot ( xhci
, id
) ) != 0 ) {
2680 /* Slot is still enabled. Leak the slot context,
2681 * since the controller may still write to this
2682 * memory, and leave the DCBAA entry intact.
2684 * If the controller later reports that this same slot
2685 * has been re-enabled, then some assertions will be
2688 DBGC ( xhci
, "XHCI %p slot %d leaking context memory\n",
2690 slot
->context
= NULL
;
2694 if ( slot
->context
) {
2695 free_dma ( slot
->context
, len
);
2696 xhci
->dcbaa
[id
] = 0;
2698 xhci
->slot
[id
] = NULL
;
2703 * Assign device address
2706 * @ret rc Return status code
2708 static int xhci_device_address ( struct usb_device
*usb
) {
2709 struct xhci_slot
*slot
= usb_get_hostdata ( usb
);
2710 struct xhci_device
*xhci
= slot
->xhci
;
2711 struct usb_port
*port
= usb
->port
;
2712 struct usb_port
*root_port
;
2716 /* Calculate route string */
2717 slot
->route
= usb_route_string ( usb
);
2719 /* Calculate root hub port number */
2720 root_port
= usb_root_hub_port ( usb
);
2721 slot
->port
= root_port
->address
;
2723 /* Calculate protocol speed ID */
2724 psiv
= xhci_port_psiv ( xhci
, slot
->port
, port
->speed
);
2731 /* Address device */
2732 if ( ( rc
= xhci_address_device ( xhci
, slot
) ) != 0 )
2738 /******************************************************************************
2742 ******************************************************************************
2749 * @ret rc Return status code
2751 static int xhci_bus_open ( struct usb_bus
*bus
) {
2752 struct xhci_device
*xhci
= usb_bus_get_hostdata ( bus
);
2755 /* Allocate device slot array */
2756 xhci
->slot
= zalloc ( xhci
->slots
* sizeof ( xhci
->slot
[0] ) );
2757 if ( ! xhci
->slot
) {
2759 goto err_slot_alloc
;
2762 /* Allocate device context base address array */
2763 if ( ( rc
= xhci_dcbaa_alloc ( xhci
) ) != 0 )
2764 goto err_dcbaa_alloc
;
2766 /* Allocate scratchpad buffers */
2767 if ( ( rc
= xhci_scratchpad_alloc ( xhci
) ) != 0 )
2768 goto err_scratchpad_alloc
;
2770 /* Allocate command ring */
2771 if ( ( rc
= xhci_command_alloc ( xhci
) ) != 0 )
2772 goto err_command_alloc
;
2774 /* Allocate event ring */
2775 if ( ( rc
= xhci_event_alloc ( xhci
) ) != 0 )
2776 goto err_event_alloc
;
2778 /* Start controller */
2784 xhci_event_free ( xhci
);
2786 xhci_command_free ( xhci
);
2788 xhci_scratchpad_free ( xhci
);
2789 err_scratchpad_alloc
:
2790 xhci_dcbaa_free ( xhci
);
2792 free ( xhci
->slot
);
2802 static void xhci_bus_close ( struct usb_bus
*bus
) {
2803 struct xhci_device
*xhci
= usb_bus_get_hostdata ( bus
);
2807 assert ( xhci
->slot
!= NULL
);
2808 for ( i
= 0 ; i
< xhci
->slots
; i
++ )
2809 assert ( xhci
->slot
[i
] == NULL
);
2812 xhci_event_free ( xhci
);
2813 xhci_command_free ( xhci
);
2814 xhci_scratchpad_free ( xhci
);
2815 xhci_dcbaa_free ( xhci
);
2816 free ( xhci
->slot
);
2824 static void xhci_bus_poll ( struct usb_bus
*bus
) {
2825 struct xhci_device
*xhci
= usb_bus_get_hostdata ( bus
);
2827 /* Poll event ring */
2828 xhci_event_poll ( xhci
);
2831 /******************************************************************************
2835 ******************************************************************************
2842 * @ret rc Return status code
2844 static int xhci_hub_open ( struct usb_hub
*hub
) {
2845 struct xhci_slot
*slot
;
2847 /* Do nothing if this is the root hub */
2851 /* Get device slot */
2852 slot
= usb_get_hostdata ( hub
->usb
);
2854 /* Update device slot hub parameters. We don't inform the
2855 * hardware of this information until the hub's interrupt
2856 * endpoint is opened, since the only mechanism for so doing
2857 * provided by the xHCI specification is a Configure Endpoint
2858 * command, and we can't issue that command until we have a
2859 * non-EP0 endpoint to configure.
2861 slot
->ports
= hub
->ports
;
2871 static void xhci_hub_close ( struct usb_hub
*hub __unused
) {
2876 /******************************************************************************
2878 * Root hub operations
2880 ******************************************************************************
2887 * @ret rc Return status code
2889 static int xhci_root_open ( struct usb_hub
*hub
) {
2890 struct usb_bus
*bus
= hub
->bus
;
2891 struct xhci_device
*xhci
= usb_bus_get_hostdata ( bus
);
2892 struct usb_port
*port
;
2896 /* Enable power to all ports */
2897 for ( i
= 1 ; i
<= xhci
->ports
; i
++ ) {
2898 portsc
= readl ( xhci
->op
+ XHCI_OP_PORTSC ( i
) );
2899 portsc
&= XHCI_PORTSC_PRESERVE
;
2900 portsc
|= XHCI_PORTSC_PP
;
2901 writel ( portsc
, xhci
->op
+ XHCI_OP_PORTSC ( i
) );
2904 /* xHCI spec requires us to potentially wait 20ms after
2905 * enabling power to a port.
2907 mdelay ( XHCI_PORT_POWER_DELAY_MS
);
2909 /* USB3 ports may power up as Disabled */
2910 for ( i
= 1 ; i
<= xhci
->ports
; i
++ ) {
2911 portsc
= readl ( xhci
->op
+ XHCI_OP_PORTSC ( i
) );
2912 port
= usb_port ( hub
, i
);
2913 if ( ( port
->protocol
>= USB_PROTO_3_0
) &&
2914 ( ( portsc
& XHCI_PORTSC_PLS_MASK
) ==
2915 XHCI_PORTSC_PLS_DISABLED
) ) {
2916 /* Force link state to RxDetect */
2917 portsc
&= XHCI_PORTSC_PRESERVE
;
2918 portsc
|= ( XHCI_PORTSC_PLS_RXDETECT
| XHCI_PORTSC_LWS
);
2919 writel ( portsc
, xhci
->op
+ XHCI_OP_PORTSC ( i
) );
2923 /* Some xHCI cards seem to require an additional delay after
2924 * setting the link state to RxDetect.
2926 mdelay ( XHCI_LINK_STATE_DELAY_MS
);
2928 /* Record hub driver private data */
2929 usb_hub_set_drvdata ( hub
, xhci
);
2939 static void xhci_root_close ( struct usb_hub
*hub
) {
2941 /* Clear hub driver private data */
2942 usb_hub_set_drvdata ( hub
, NULL
);
2950 * @ret rc Return status code
2952 static int xhci_root_enable ( struct usb_hub
*hub
, struct usb_port
*port
) {
2953 struct xhci_device
*xhci
= usb_hub_get_drvdata ( hub
);
2958 portsc
= readl ( xhci
->op
+ XHCI_OP_PORTSC ( port
->address
) );
2959 portsc
&= XHCI_PORTSC_PRESERVE
;
2960 portsc
|= XHCI_PORTSC_PR
;
2961 writel ( portsc
, xhci
->op
+ XHCI_OP_PORTSC ( port
->address
) );
2963 /* Wait for port to become enabled */
2964 for ( i
= 0 ; i
< XHCI_PORT_RESET_MAX_WAIT_MS
; i
++ ) {
2966 /* Check port status */
2967 portsc
= readl ( xhci
->op
+ XHCI_OP_PORTSC ( port
->address
) );
2968 if ( portsc
& XHCI_PORTSC_PED
)
2975 DBGC ( xhci
, "XHCI %p timed out waiting for port %d to enable\n",
2976 xhci
, port
->address
);
2985 * @ret rc Return status code
2987 static int xhci_root_disable ( struct usb_hub
*hub
, struct usb_port
*port
) {
2988 struct xhci_device
*xhci
= usb_hub_get_drvdata ( hub
);
2992 portsc
= readl ( xhci
->op
+ XHCI_OP_PORTSC ( port
->address
) );
2993 portsc
&= XHCI_PORTSC_PRESERVE
;
2994 portsc
|= XHCI_PORTSC_PED
;
2995 writel ( portsc
, xhci
->op
+ XHCI_OP_PORTSC ( port
->address
) );
3001 * Update root hub port speed
3005 * @ret rc Return status code
3007 static int xhci_root_speed ( struct usb_hub
*hub
, struct usb_port
*port
) {
3008 struct xhci_device
*xhci
= usb_hub_get_drvdata ( hub
);
3016 /* Read port status */
3017 portsc
= readl ( xhci
->op
+ XHCI_OP_PORTSC ( port
->address
) );
3018 DBGC2 ( xhci
, "XHCI %p port %d status is %08x\n",
3019 xhci
, port
->address
, portsc
);
3021 /* Check whether or not port is connected */
3022 ccs
= ( portsc
& XHCI_PORTSC_CCS
);
3024 port
->speed
= USB_SPEED_NONE
;
3028 /* For USB2 ports, the PSIV field is not valid until the port
3029 * completes reset and becomes enabled.
3031 ped
= ( portsc
& XHCI_PORTSC_PED
);
3032 if ( ( port
->protocol
< USB_PROTO_3_0
) && ! ped
) {
3033 port
->speed
= USB_SPEED_FULL
;
3037 /* Get port speed and map to generic USB speed */
3038 psiv
= XHCI_PORTSC_PSIV ( portsc
);
3039 speed
= xhci_port_speed ( xhci
, port
->address
, psiv
);
3045 port
->speed
= speed
;
3050 * Clear transaction translator buffer
3054 * @v ep USB endpoint
3055 * @ret rc Return status code
3057 static int xhci_root_clear_tt ( struct usb_hub
*hub
, struct usb_port
*port
,
3058 struct usb_endpoint
*ep
) {
3059 struct ehci_device
*ehci
= usb_hub_get_drvdata ( hub
);
3061 /* Should never be called; this is a root hub */
3062 DBGC ( ehci
, "XHCI %p port %d nonsensical CLEAR_TT for %s endpoint "
3063 "%02x\n", ehci
, port
->address
, ep
->usb
->name
, ep
->address
);
3068 /******************************************************************************
3072 ******************************************************************************
3075 /** USB host controller operations */
3076 static struct usb_host_operations xhci_operations
= {
3078 .open
= xhci_endpoint_open
,
3079 .close
= xhci_endpoint_close
,
3080 .reset
= xhci_endpoint_reset
,
3081 .mtu
= xhci_endpoint_mtu
,
3082 .message
= xhci_endpoint_message
,
3083 .stream
= xhci_endpoint_stream
,
3086 .open
= xhci_device_open
,
3087 .close
= xhci_device_close
,
3088 .address
= xhci_device_address
,
3091 .open
= xhci_bus_open
,
3092 .close
= xhci_bus_close
,
3093 .poll
= xhci_bus_poll
,
3096 .open
= xhci_hub_open
,
3097 .close
= xhci_hub_close
,
3100 .open
= xhci_root_open
,
3101 .close
= xhci_root_close
,
3102 .enable
= xhci_root_enable
,
3103 .disable
= xhci_root_disable
,
3104 .speed
= xhci_root_speed
,
3105 .clear_tt
= xhci_root_clear_tt
,
3110 * Fix Intel PCH-specific quirks
3112 * @v xhci xHCI device
3115 static void xhci_pch_fix ( struct xhci_device
*xhci
, struct pci_device
*pci
) {
3116 struct xhci_pch
*pch
= &xhci
->pch
;
3122 /* Enable SuperSpeed capability. Do this before rerouting
3123 * USB2 ports, so that USB3 devices connect at SuperSpeed.
3125 pci_read_config_dword ( pci
, XHCI_PCH_USB3PSSEN
, &usb3pssen
);
3126 pci_read_config_dword ( pci
, XHCI_PCH_USB3PRM
, &usb3prm
);
3127 if ( usb3prm
& ~usb3pssen
) {
3128 DBGC ( xhci
, "XHCI %p enabling SuperSpeed on ports %08x\n",
3129 xhci
, ( usb3prm
& ~usb3pssen
) );
3131 pch
->usb3pssen
= usb3pssen
;
3132 usb3pssen
|= usb3prm
;
3133 pci_write_config_dword ( pci
, XHCI_PCH_USB3PSSEN
, usb3pssen
);
3135 /* Route USB2 ports from EHCI to xHCI */
3136 pci_read_config_dword ( pci
, XHCI_PCH_XUSB2PR
, &xusb2pr
);
3137 pci_read_config_dword ( pci
, XHCI_PCH_XUSB2PRM
, &xusb2prm
);
3138 if ( xusb2prm
& ~xusb2pr
) {
3139 DBGC ( xhci
, "XHCI %p routing ports %08x from EHCI to xHCI\n",
3140 xhci
, ( xusb2prm
& ~xusb2pr
) );
3142 pch
->xusb2pr
= xusb2pr
;
3143 xusb2pr
|= xusb2prm
;
3144 pci_write_config_dword ( pci
, XHCI_PCH_XUSB2PR
, xusb2pr
);
3148 * Undo Intel PCH-specific quirk fixes
3150 * @v xhci xHCI device
3153 static void xhci_pch_undo ( struct xhci_device
*xhci
, struct pci_device
*pci
) {
3154 struct xhci_pch
*pch
= &xhci
->pch
;
3156 /* Restore USB2 port routing to original state */
3157 pci_write_config_dword ( pci
, XHCI_PCH_XUSB2PR
, pch
->xusb2pr
);
3159 /* Restore SuperSpeed capability to original state */
3160 pci_write_config_dword ( pci
, XHCI_PCH_USB3PSSEN
, pch
->usb3pssen
);
3167 * @ret rc Return status code
3169 static int xhci_probe ( struct pci_device
*pci
) {
3170 struct xhci_device
*xhci
;
3171 struct usb_port
*port
;
3172 unsigned long bar_start
;
3177 /* Allocate and initialise structure */
3178 xhci
= zalloc ( sizeof ( *xhci
) );
3184 /* Fix up PCI device */
3185 adjust_pci_device ( pci
);
3188 bar_start
= pci_bar_start ( pci
, XHCI_BAR
);
3189 bar_size
= pci_bar_size ( pci
, XHCI_BAR
);
3190 xhci
->regs
= ioremap ( bar_start
, bar_size
);
3191 if ( ! xhci
->regs
) {
3196 /* Initialise xHCI device */
3197 xhci_init ( xhci
, xhci
->regs
);
3199 /* Initialise USB legacy support and claim ownership */
3200 xhci_legacy_init ( xhci
);
3201 xhci_legacy_claim ( xhci
);
3203 /* Fix Intel PCH-specific quirks, if applicable */
3204 if ( pci
->id
->driver_data
& XHCI_PCH
)
3205 xhci_pch_fix ( xhci
, pci
);
3208 if ( ( rc
= xhci_reset ( xhci
) ) != 0 )
3211 /* Allocate USB bus */
3212 xhci
->bus
= alloc_usb_bus ( &pci
->dev
, xhci
->ports
, XHCI_MTU
,
3214 if ( ! xhci
->bus
) {
3218 usb_bus_set_hostdata ( xhci
->bus
, xhci
);
3219 usb_hub_set_drvdata ( xhci
->bus
->hub
, xhci
);
3221 /* Set port protocols */
3222 for ( i
= 1 ; i
<= xhci
->ports
; i
++ ) {
3223 port
= usb_port ( xhci
->bus
->hub
, i
);
3224 port
->protocol
= xhci_port_protocol ( xhci
, i
);
3227 /* Register USB bus */
3228 if ( ( rc
= register_usb_bus ( xhci
->bus
) ) != 0 )
3231 pci_set_drvdata ( pci
, xhci
);
3234 unregister_usb_bus ( xhci
->bus
);
3236 free_usb_bus ( xhci
->bus
);
3238 xhci_reset ( xhci
);
3240 if ( pci
->id
->driver_data
& XHCI_PCH
)
3241 xhci_pch_undo ( xhci
, pci
);
3242 xhci_legacy_release ( xhci
);
3243 iounmap ( xhci
->regs
);
3255 static void xhci_remove ( struct pci_device
*pci
) {
3256 struct xhci_device
*xhci
= pci_get_drvdata ( pci
);
3257 struct usb_bus
*bus
= xhci
->bus
;
3259 unregister_usb_bus ( bus
);
3260 free_usb_bus ( bus
);
3261 xhci_reset ( xhci
);
3262 if ( pci
->id
->driver_data
& XHCI_PCH
)
3263 xhci_pch_undo ( xhci
, pci
);
3264 xhci_legacy_release ( xhci
);
3265 iounmap ( xhci
->regs
);
3269 /** XHCI PCI device IDs */
3270 static struct pci_device_id xhci_ids
[] = {
3271 PCI_ROM ( 0x8086, 0xffff, "xhci-pch", "xHCI (Intel PCH)", XHCI_PCH
),
3272 PCI_ROM ( 0xffff, 0xffff, "xhci", "xHCI", 0 ),
3275 /** XHCI PCI driver */
3276 struct pci_driver xhci_driver __pci_driver
= {
3278 .id_count
= ( sizeof ( xhci_ids
) / sizeof ( xhci_ids
[0] ) ),
3279 .class = PCI_CLASS ( PCI_CLASS_SERIAL
, PCI_CLASS_SERIAL_USB
,
3280 PCI_CLASS_SERIAL_USB_XHCI
),
3281 .probe
= xhci_probe
,
3282 .remove
= xhci_remove
,
3288 * @v booting System is shutting down for OS boot
3290 static void xhci_shutdown ( int booting
) {
3291 /* If we are shutting down to boot an OS, then prevent the
3292 * release of ownership back to BIOS.
3294 xhci_legacy_prevent_release
= booting
;
3297 /** Startup/shutdown function */
3298 struct startup_fn xhci_startup
__startup_fn ( STARTUP_LATE
) = {
3299 .shutdown
= xhci_shutdown
,