2 * Copyright (C) 2014 Michael Brown <mbrown@fensystems.co.uk>.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * You can also choose to distribute this program under the terms of
20 * the Unmodified Binary Distribution Licence (as given in the file
21 * COPYING.UBDL), provided that you have satisfied its requirements.
24 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL
);
33 #include <ipxe/malloc.h>
36 #include <ipxe/init.h>
37 #include <ipxe/profile.h>
42 * USB eXtensible Host Controller Interface (xHCI) driver
46 /** Message transfer profiler */
47 static struct profiler xhci_message_profiler __profiler
=
48 { .name
= "xhci.message" };
50 /** Stream transfer profiler */
51 static struct profiler xhci_stream_profiler __profiler
=
52 { .name
= "xhci.stream" };
54 /** Event ring profiler */
55 static struct profiler xhci_event_profiler __profiler
=
56 { .name
= "xhci.event" };
58 /** Transfer event profiler */
59 static struct profiler xhci_transfer_profiler __profiler
=
60 { .name
= "xhci.transfer" };
62 /* Disambiguate the various error causes */
64 __einfo_error ( EINFO_EIO_DATA )
65 #define EINFO_EIO_DATA \
66 __einfo_uniqify ( EINFO_EIO, ( 2 - 0 ), \
69 __einfo_error ( EINFO_EIO_BABBLE )
70 #define EINFO_EIO_BABBLE \
71 __einfo_uniqify ( EINFO_EIO, ( 3 - 0 ), \
74 __einfo_error ( EINFO_EIO_USB )
75 #define EINFO_EIO_USB \
76 __einfo_uniqify ( EINFO_EIO, ( 4 - 0 ), \
77 "USB transaction error" )
79 __einfo_error ( EINFO_EIO_TRB )
80 #define EINFO_EIO_TRB \
81 __einfo_uniqify ( EINFO_EIO, ( 5 - 0 ), \
84 __einfo_error ( EINFO_EIO_STALL )
85 #define EINFO_EIO_STALL \
86 __einfo_uniqify ( EINFO_EIO, ( 6 - 0 ), \
88 #define EIO_RESOURCE \
89 __einfo_error ( EINFO_EIO_RESOURCE )
90 #define EINFO_EIO_RESOURCE \
91 __einfo_uniqify ( EINFO_EIO, ( 7 - 0 ), \
93 #define EIO_BANDWIDTH \
94 __einfo_error ( EINFO_EIO_BANDWIDTH )
95 #define EINFO_EIO_BANDWIDTH \
96 __einfo_uniqify ( EINFO_EIO, ( 8 - 0 ), \
98 #define EIO_NO_SLOTS \
99 __einfo_error ( EINFO_EIO_NO_SLOTS )
100 #define EINFO_EIO_NO_SLOTS \
101 __einfo_uniqify ( EINFO_EIO, ( 9 - 0 ), \
102 "No slots available" )
103 #define EIO_STREAM_TYPE \
104 __einfo_error ( EINFO_EIO_STREAM_TYPE )
105 #define EINFO_EIO_STREAM_TYPE \
106 __einfo_uniqify ( EINFO_EIO, ( 10 - 0 ), \
107 "Invalid stream type" )
109 __einfo_error ( EINFO_EIO_SLOT )
110 #define EINFO_EIO_SLOT \
111 __einfo_uniqify ( EINFO_EIO, ( 11 - 0 ), \
113 #define EIO_ENDPOINT \
114 __einfo_error ( EINFO_EIO_ENDPOINT )
115 #define EINFO_EIO_ENDPOINT \
116 __einfo_uniqify ( EINFO_EIO, ( 12 - 0 ), \
117 "Endpoint not enabled" )
119 __einfo_error ( EINFO_EIO_SHORT )
120 #define EINFO_EIO_SHORT \
121 __einfo_uniqify ( EINFO_EIO, ( 13 - 0 ), \
123 #define EIO_UNDERRUN \
124 __einfo_error ( EINFO_EIO_UNDERRUN )
125 #define EINFO_EIO_UNDERRUN \
126 __einfo_uniqify ( EINFO_EIO, ( 14 - 0 ), \
128 #define EIO_OVERRUN \
129 __einfo_error ( EINFO_EIO_OVERRUN )
130 #define EINFO_EIO_OVERRUN \
131 __einfo_uniqify ( EINFO_EIO, ( 15 - 0 ), \
133 #define EIO_VF_RING_FULL \
134 __einfo_error ( EINFO_EIO_VF_RING_FULL )
135 #define EINFO_EIO_VF_RING_FULL \
136 __einfo_uniqify ( EINFO_EIO, ( 16 - 0 ), \
137 "Virtual function event ring full" )
138 #define EIO_PARAMETER \
139 __einfo_error ( EINFO_EIO_PARAMETER )
140 #define EINFO_EIO_PARAMETER \
141 __einfo_uniqify ( EINFO_EIO, ( 17 - 0 ), \
143 #define EIO_BANDWIDTH_OVERRUN \
144 __einfo_error ( EINFO_EIO_BANDWIDTH_OVERRUN )
145 #define EINFO_EIO_BANDWIDTH_OVERRUN \
146 __einfo_uniqify ( EINFO_EIO, ( 18 - 0 ), \
147 "Bandwidth overrun" )
148 #define EIO_CONTEXT \
149 __einfo_error ( EINFO_EIO_CONTEXT )
150 #define EINFO_EIO_CONTEXT \
151 __einfo_uniqify ( EINFO_EIO, ( 19 - 0 ), \
152 "Context state error" )
153 #define EIO_NO_PING \
154 __einfo_error ( EINFO_EIO_NO_PING )
155 #define EINFO_EIO_NO_PING \
156 __einfo_uniqify ( EINFO_EIO, ( 20 - 0 ), \
158 #define EIO_RING_FULL \
159 __einfo_error ( EINFO_EIO_RING_FULL )
160 #define EINFO_EIO_RING_FULL \
161 __einfo_uniqify ( EINFO_EIO, ( 21 - 0 ), \
163 #define EIO_INCOMPATIBLE \
164 __einfo_error ( EINFO_EIO_INCOMPATIBLE )
165 #define EINFO_EIO_INCOMPATIBLE \
166 __einfo_uniqify ( EINFO_EIO, ( 22 - 0 ), \
167 "Incompatible device" )
169 __einfo_error ( EINFO_EIO_MISSED )
170 #define EINFO_EIO_MISSED \
171 __einfo_uniqify ( EINFO_EIO, ( 23 - 0 ), \
172 "Missed service error" )
173 #define EIO_CMD_STOPPED \
174 __einfo_error ( EINFO_EIO_CMD_STOPPED )
175 #define EINFO_EIO_CMD_STOPPED \
176 __einfo_uniqify ( EINFO_EIO, ( 24 - 0 ), \
177 "Command ring stopped" )
178 #define EIO_CMD_ABORTED \
179 __einfo_error ( EINFO_EIO_CMD_ABORTED )
180 #define EINFO_EIO_CMD_ABORTED \
181 __einfo_uniqify ( EINFO_EIO, ( 25 - 0 ), \
184 __einfo_error ( EINFO_EIO_STOP )
185 #define EINFO_EIO_STOP \
186 __einfo_uniqify ( EINFO_EIO, ( 26 - 0 ), \
188 #define EIO_STOP_LEN \
189 __einfo_error ( EINFO_EIO_STOP_LEN )
190 #define EINFO_EIO_STOP_LEN \
191 __einfo_uniqify ( EINFO_EIO, ( 27 - 0 ), \
192 "Stopped - length invalid" )
193 #define EIO_STOP_SHORT \
194 __einfo_error ( EINFO_EIO_STOP_SHORT )
195 #define EINFO_EIO_STOP_SHORT \
196 __einfo_uniqify ( EINFO_EIO, ( 28 - 0 ), \
197 "Stopped - short packet" )
198 #define EIO_LATENCY \
199 __einfo_error ( EINFO_EIO_LATENCY )
200 #define EINFO_EIO_LATENCY \
201 __einfo_uniqify ( EINFO_EIO, ( 29 - 0 ), \
202 "Maximum exit latency too large" )
204 __einfo_error ( EINFO_EIO_ISOCH )
205 #define EINFO_EIO_ISOCH \
206 __einfo_uniqify ( EINFO_EIO, ( 31 - 0 ), \
207 "Isochronous buffer overrun" )
208 #define EPROTO_LOST \
209 __einfo_error ( EINFO_EPROTO_LOST )
210 #define EINFO_EPROTO_LOST \
211 __einfo_uniqify ( EINFO_EPROTO, ( 32 - 32 ), \
213 #define EPROTO_UNDEFINED \
214 __einfo_error ( EINFO_EPROTO_UNDEFINED )
215 #define EINFO_EPROTO_UNDEFINED \
216 __einfo_uniqify ( EINFO_EPROTO, ( 33 - 32 ), \
218 #define EPROTO_STREAM_ID \
219 __einfo_error ( EINFO_EPROTO_STREAM_ID )
220 #define EINFO_EPROTO_STREAM_ID \
221 __einfo_uniqify ( EINFO_EPROTO, ( 34 - 32 ), \
222 "Invalid stream ID" )
223 #define EPROTO_SECONDARY \
224 __einfo_error ( EINFO_EPROTO_SECONDARY )
225 #define EINFO_EPROTO_SECONDARY \
226 __einfo_uniqify ( EINFO_EPROTO, ( 35 - 32 ), \
227 "Secondary bandwidth error" )
228 #define EPROTO_SPLIT \
229 __einfo_error ( EINFO_EPROTO_SPLIT )
230 #define EINFO_EPROTO_SPLIT \
231 __einfo_uniqify ( EINFO_EPROTO, ( 36 - 32 ), \
232 "Split transaction error" )
233 #define ECODE(code) \
234 ( ( (code) < 32 ) ? \
235 EUNIQ ( EINFO_EIO, ( (code) & 31 ), EIO_DATA, EIO_BABBLE, \
236 EIO_USB, EIO_TRB, EIO_STALL, EIO_RESOURCE, \
237 EIO_BANDWIDTH, EIO_NO_SLOTS, EIO_STREAM_TYPE, \
238 EIO_SLOT, EIO_ENDPOINT, EIO_SHORT, EIO_UNDERRUN, \
239 EIO_OVERRUN, EIO_VF_RING_FULL, EIO_PARAMETER, \
240 EIO_BANDWIDTH_OVERRUN, EIO_CONTEXT, EIO_NO_PING, \
241 EIO_RING_FULL, EIO_INCOMPATIBLE, EIO_MISSED, \
242 EIO_CMD_STOPPED, EIO_CMD_ABORTED, EIO_STOP, \
243 EIO_STOP_LEN, EIO_STOP_SHORT, EIO_LATENCY, \
246 EUNIQ ( EINFO_EPROTO, ( (code) & 31 ), EPROTO_LOST, \
247 EPROTO_UNDEFINED, EPROTO_STREAM_ID, \
248 EPROTO_SECONDARY, EPROTO_SPLIT ) : \
251 /******************************************************************************
255 ******************************************************************************
261 * @v xhci xHCI device
262 * @v regs MMIO registers
264 static void xhci_init ( struct xhci_device
*xhci
, void *regs
) {
273 /* Locate capability, operational, runtime, and doorbell registers */
275 caplength
= readb ( xhci
->cap
+ XHCI_CAP_CAPLENGTH
);
276 rtsoff
= readl ( xhci
->cap
+ XHCI_CAP_RTSOFF
);
277 dboff
= readl ( xhci
->cap
+ XHCI_CAP_DBOFF
);
278 xhci
->op
= ( xhci
->cap
+ caplength
);
279 xhci
->run
= ( xhci
->cap
+ rtsoff
);
280 xhci
->db
= ( xhci
->cap
+ dboff
);
281 DBGC2 ( xhci
, "XHCI %s cap %08lx op %08lx run %08lx db %08lx\n",
282 xhci
->name
, virt_to_phys ( xhci
->cap
),
283 virt_to_phys ( xhci
->op
), virt_to_phys ( xhci
->run
),
284 virt_to_phys ( xhci
->db
) );
286 /* Read structural parameters 1 */
287 hcsparams1
= readl ( xhci
->cap
+ XHCI_CAP_HCSPARAMS1
);
288 xhci
->slots
= XHCI_HCSPARAMS1_SLOTS ( hcsparams1
);
289 xhci
->intrs
= XHCI_HCSPARAMS1_INTRS ( hcsparams1
);
290 xhci
->ports
= XHCI_HCSPARAMS1_PORTS ( hcsparams1
);
291 DBGC ( xhci
, "XHCI %s has %d slots %d intrs %d ports\n",
292 xhci
->name
, xhci
->slots
, xhci
->intrs
, xhci
->ports
);
294 /* Read structural parameters 2 */
295 hcsparams2
= readl ( xhci
->cap
+ XHCI_CAP_HCSPARAMS2
);
296 xhci
->scratch
.count
= XHCI_HCSPARAMS2_SCRATCHPADS ( hcsparams2
);
297 DBGC2 ( xhci
, "XHCI %s needs %d scratchpads\n",
298 xhci
->name
, xhci
->scratch
.count
);
300 /* Read capability parameters 1 */
301 hccparams1
= readl ( xhci
->cap
+ XHCI_CAP_HCCPARAMS1
);
302 xhci
->addr64
= XHCI_HCCPARAMS1_ADDR64 ( hccparams1
);
303 xhci
->csz_shift
= XHCI_HCCPARAMS1_CSZ_SHIFT ( hccparams1
);
304 xhci
->xecp
= XHCI_HCCPARAMS1_XECP ( hccparams1
);
307 pagesize
= readl ( xhci
->op
+ XHCI_OP_PAGESIZE
);
308 xhci
->pagesize
= XHCI_PAGESIZE ( pagesize
);
309 assert ( xhci
->pagesize
!= 0 );
310 assert ( ( ( xhci
->pagesize
) & ( xhci
->pagesize
- 1 ) ) == 0 );
311 DBGC2 ( xhci
, "XHCI %s page size %zd bytes\n",
312 xhci
->name
, xhci
->pagesize
);
316 * Find extended capability
318 * @v xhci xHCI device
319 * @v id Capability ID
320 * @v offset Offset to previous extended capability instance, or zero
321 * @ret offset Offset to extended capability, or zero if not found
323 static unsigned int xhci_extended_capability ( struct xhci_device
*xhci
,
325 unsigned int offset
) {
329 /* Locate the extended capability */
332 /* Locate first or next capability as applicable */
334 xecp
= readl ( xhci
->cap
+ offset
);
335 next
= XHCI_XECP_NEXT ( xecp
);
343 /* Check if this is the requested capability */
344 xecp
= readl ( xhci
->cap
+ offset
);
345 if ( XHCI_XECP_ID ( xecp
) == id
)
351 * Write potentially 64-bit register
353 * @v xhci xHCI device
355 * @v reg Register address
356 * @ret rc Return status code
358 static inline __attribute__ (( always_inline
)) int
359 xhci_writeq ( struct xhci_device
*xhci
, physaddr_t value
, void *reg
) {
361 /* If this is a 32-bit build, then this can never fail
362 * (allowing the compiler to optimise out the error path).
364 if ( sizeof ( value
) <= sizeof ( uint32_t ) ) {
365 writel ( value
, reg
);
366 writel ( 0, ( reg
+ sizeof ( uint32_t ) ) );
370 /* If the device does not support 64-bit addresses and this
371 * address is outside the 32-bit address space, then fail.
373 if ( ( value
& ~0xffffffffULL
) && ! xhci
->addr64
) {
374 DBGC ( xhci
, "XHCI %s cannot access address %lx\n",
379 /* If this is a 64-bit build, then writeq() is available */
380 writeq ( value
, reg
);
385 * Calculate buffer alignment
388 * @ret align Buffer alignment
390 * Determine alignment required for a buffer which must be aligned to
391 * at least XHCI_MIN_ALIGN and which must not cross a page boundary.
393 static inline size_t xhci_align ( size_t len
) {
396 /* Align to own length (rounded up to a power of two) */
397 align
= ( 1 << fls ( len
- 1 ) );
399 /* Round up to XHCI_MIN_ALIGN if needed */
400 if ( align
< XHCI_MIN_ALIGN
)
401 align
= XHCI_MIN_ALIGN
;
407 * Calculate device context offset
409 * @v xhci xHCI device
410 * @v ctx Context index
412 static inline size_t xhci_device_context_offset ( struct xhci_device
*xhci
,
415 return ( XHCI_DCI ( ctx
) << xhci
->csz_shift
);
419 * Calculate input context offset
421 * @v xhci xHCI device
422 * @v ctx Context index
424 static inline size_t xhci_input_context_offset ( struct xhci_device
*xhci
,
427 return ( XHCI_ICI ( ctx
) << xhci
->csz_shift
);
430 /******************************************************************************
434 ******************************************************************************
438 * Dump host controller registers
440 * @v xhci xHCI device
442 static inline void xhci_dump ( struct xhci_device
*xhci
) {
449 /* Do nothing unless debugging is enabled */
454 usbcmd
= readl ( xhci
->op
+ XHCI_OP_USBCMD
);
455 DBGC ( xhci
, "XHCI %s USBCMD %08x%s%s\n", xhci
->name
, usbcmd
,
456 ( ( usbcmd
& XHCI_USBCMD_RUN
) ?
" run" : "" ),
457 ( ( usbcmd
& XHCI_USBCMD_HCRST
) ?
" hcrst" : "" ) );
460 usbsts
= readl ( xhci
->op
+ XHCI_OP_USBSTS
);
461 DBGC ( xhci
, "XHCI %s USBSTS %08x%s\n", xhci
->name
, usbsts
,
462 ( ( usbsts
& XHCI_USBSTS_HCH
) ?
" hch" : "" ) );
465 pagesize
= readl ( xhci
->op
+ XHCI_OP_PAGESIZE
);
466 DBGC ( xhci
, "XHCI %s PAGESIZE %08x\n", xhci
->name
, pagesize
);
469 dnctrl
= readl ( xhci
->op
+ XHCI_OP_DNCTRL
);
470 DBGC ( xhci
, "XHCI %s DNCTRL %08x\n", xhci
->name
, dnctrl
);
473 config
= readl ( xhci
->op
+ XHCI_OP_CONFIG
);
474 DBGC ( xhci
, "XHCI %s CONFIG %08x\n", xhci
->name
, config
);
478 * Dump port registers
480 * @v xhci xHCI device
481 * @v port Port number
483 static inline void xhci_dump_port ( struct xhci_device
*xhci
,
484 unsigned int port
) {
490 /* Do nothing unless debugging is enabled */
495 portsc
= readl ( xhci
->op
+ XHCI_OP_PORTSC ( port
) );
496 DBGC ( xhci
, "XHCI %s-%d PORTSC %08x%s%s%s%s psiv=%d\n",
497 xhci
->name
, port
, portsc
,
498 ( ( portsc
& XHCI_PORTSC_CCS
) ?
" ccs" : "" ),
499 ( ( portsc
& XHCI_PORTSC_PED
) ?
" ped" : "" ),
500 ( ( portsc
& XHCI_PORTSC_PR
) ?
" pr" : "" ),
501 ( ( portsc
& XHCI_PORTSC_PP
) ?
" pp" : "" ),
502 XHCI_PORTSC_PSIV ( portsc
) );
505 portpmsc
= readl ( xhci
->op
+ XHCI_OP_PORTPMSC ( port
) );
506 DBGC ( xhci
, "XHCI %s-%d PORTPMSC %08x\n", xhci
->name
, port
, portpmsc
);
509 portli
= readl ( xhci
->op
+ XHCI_OP_PORTLI ( port
) );
510 DBGC ( xhci
, "XHCI %s-%d PORTLI %08x\n", xhci
->name
, port
, portli
);
513 porthlpmc
= readl ( xhci
->op
+ XHCI_OP_PORTHLPMC ( port
) );
514 DBGC ( xhci
, "XHCI %s-%d PORTHLPMC %08x\n",
515 xhci
->name
, port
, porthlpmc
);
518 /******************************************************************************
522 ******************************************************************************
525 /** Prevent the release of ownership back to BIOS */
526 static int xhci_legacy_prevent_release
;
529 * Initialise USB legacy support
531 * @v xhci xHCI device
533 static void xhci_legacy_init ( struct xhci_device
*xhci
) {
537 /* Locate USB legacy support capability (if present) */
538 legacy
= xhci_extended_capability ( xhci
, XHCI_XECP_ID_LEGACY
, 0 );
540 /* Not an error; capability may not be present */
541 DBGC ( xhci
, "XHCI %s has no USB legacy support capability\n",
546 /* Check if legacy USB support is enabled */
547 bios
= readb ( xhci
->cap
+ legacy
+ XHCI_USBLEGSUP_BIOS
);
548 if ( ! ( bios
& XHCI_USBLEGSUP_BIOS_OWNED
) ) {
549 /* Not an error; already owned by OS */
550 DBGC ( xhci
, "XHCI %s USB legacy support already disabled\n",
555 /* Record presence of USB legacy support capability */
556 xhci
->legacy
= legacy
;
560 * Claim ownership from BIOS
562 * @v xhci xHCI device
564 static void xhci_legacy_claim ( struct xhci_device
*xhci
) {
569 /* Do nothing unless legacy support capability is present */
570 if ( ! xhci
->legacy
)
573 /* Claim ownership */
574 writeb ( XHCI_USBLEGSUP_OS_OWNED
,
575 xhci
->cap
+ xhci
->legacy
+ XHCI_USBLEGSUP_OS
);
577 /* Wait for BIOS to release ownership */
578 for ( i
= 0 ; i
< XHCI_USBLEGSUP_MAX_WAIT_MS
; i
++ ) {
580 /* Check if BIOS has released ownership */
581 bios
= readb ( xhci
->cap
+ xhci
->legacy
+ XHCI_USBLEGSUP_BIOS
);
582 if ( ! ( bios
& XHCI_USBLEGSUP_BIOS_OWNED
) ) {
583 DBGC ( xhci
, "XHCI %s claimed ownership from BIOS\n",
585 ctlsts
= readl ( xhci
->cap
+ xhci
->legacy
+
586 XHCI_USBLEGSUP_CTLSTS
);
588 DBGC ( xhci
, "XHCI %s warning: BIOS retained "
589 "SMIs: %08x\n", xhci
->name
, ctlsts
);
598 /* BIOS did not release ownership. Claim it forcibly by
599 * disabling all SMIs.
601 DBGC ( xhci
, "XHCI %s could not claim ownership from BIOS: forcibly "
602 "disabling SMIs\n", xhci
->name
);
603 writel ( 0, xhci
->cap
+ xhci
->legacy
+ XHCI_USBLEGSUP_CTLSTS
);
607 * Release ownership back to BIOS
609 * @v xhci xHCI device
611 static void xhci_legacy_release ( struct xhci_device
*xhci
) {
613 /* Do nothing unless legacy support capability is present */
614 if ( ! xhci
->legacy
)
617 /* Do nothing if releasing ownership is prevented */
618 if ( xhci_legacy_prevent_release
) {
619 DBGC ( xhci
, "XHCI %s not releasing ownership to BIOS\n",
624 /* Release ownership */
625 writeb ( 0, xhci
->cap
+ xhci
->legacy
+ XHCI_USBLEGSUP_OS
);
626 DBGC ( xhci
, "XHCI %s released ownership to BIOS\n", xhci
->name
);
629 /******************************************************************************
631 * Supported protocols
633 ******************************************************************************
637 * Transcribe port speed (for debugging)
639 * @v psi Protocol speed ID
640 * @ret speed Transcribed speed
642 static inline const char * xhci_speed_name ( uint32_t psi
) {
643 static const char *exponents
[4] = { "", "k", "M", "G" };
644 static char buf
[ 10 /* "xxxxxXbps" + NUL */ ];
645 unsigned int mantissa
;
646 unsigned int exponent
;
648 /* Extract mantissa and exponent */
649 mantissa
= XHCI_SUPPORTED_PSI_MANTISSA ( psi
);
650 exponent
= XHCI_SUPPORTED_PSI_EXPONENT ( psi
);
652 /* Transcribe speed */
653 snprintf ( buf
, sizeof ( buf
), "%d%sbps",
654 mantissa
, exponents
[exponent
] );
659 * Find supported protocol extended capability for a port
661 * @v xhci xHCI device
662 * @v port Port number
663 * @ret supported Offset to extended capability, or zero if not found
665 static unsigned int xhci_supported_protocol ( struct xhci_device
*xhci
,
666 unsigned int port
) {
667 unsigned int supported
= 0;
672 /* Iterate over all supported protocol structures */
673 while ( ( supported
= xhci_extended_capability ( xhci
,
674 XHCI_XECP_ID_SUPPORTED
,
677 /* Determine port range */
678 ports
= readl ( xhci
->cap
+ supported
+ XHCI_SUPPORTED_PORTS
);
679 offset
= XHCI_SUPPORTED_PORTS_OFFSET ( ports
);
680 count
= XHCI_SUPPORTED_PORTS_COUNT ( ports
);
682 /* Check if port lies within this range */
683 if ( ( port
- offset
) < count
)
687 DBGC ( xhci
, "XHCI %s-%d has no supported protocol\n",
695 * @v xhci xHCI device
696 * @v port Port number
697 * @ret protocol USB protocol, or zero if not found
699 static unsigned int xhci_port_protocol ( struct xhci_device
*xhci
,
700 unsigned int port
) {
701 unsigned int supported
= xhci_supported_protocol ( xhci
, port
);
706 unsigned int protocol
;
716 /* Fail if there is no supported protocol */
720 /* Determine protocol version */
721 revision
= readl ( xhci
->cap
+ supported
+ XHCI_SUPPORTED_REVISION
);
722 protocol
= XHCI_SUPPORTED_REVISION_VER ( revision
);
724 /* Describe port protocol */
726 name
.raw
= cpu_to_le32 ( readl ( xhci
->cap
+ supported
+
727 XHCI_SUPPORTED_NAME
) );
729 slot
= readl ( xhci
->cap
+ supported
+ XHCI_SUPPORTED_SLOT
);
730 type
= XHCI_SUPPORTED_SLOT_TYPE ( slot
);
731 DBGC2 ( xhci
, "XHCI %s-%d %sv%04x type %d",
732 xhci
->name
, port
, name
.text
, protocol
, type
);
733 ports
= readl ( xhci
->cap
+ supported
+ XHCI_SUPPORTED_PORTS
);
734 psic
= XHCI_SUPPORTED_PORTS_PSIC ( ports
);
736 DBGC2 ( xhci
, " speeds" );
737 for ( i
= 0 ; i
< psic
; i
++ ) {
738 psi
= readl ( xhci
->cap
+ supported
+
739 XHCI_SUPPORTED_PSI ( i
) );
740 psiv
= XHCI_SUPPORTED_PSI_VALUE ( psi
);
741 DBGC2 ( xhci
, " %d:%s", psiv
,
742 xhci_speed_name ( psi
) );
745 if ( xhci
->quirks
& XHCI_BAD_PSIV
)
746 DBGC2 ( xhci
, " (ignored)" );
747 DBGC2 ( xhci
, "\n" );
754 * Find port slot type
756 * @v xhci xHCI device
757 * @v port Port number
758 * @ret type Slot type, or negative error
760 static int xhci_port_slot_type ( struct xhci_device
*xhci
, unsigned int port
) {
761 unsigned int supported
= xhci_supported_protocol ( xhci
, port
);
765 /* Fail if there is no supported protocol */
770 slot
= readl ( xhci
->cap
+ supported
+ XHCI_SUPPORTED_SLOT
);
771 type
= XHCI_SUPPORTED_SLOT_TYPE ( slot
);
779 * @v xhci xHCI device
780 * @v port Port number
781 * @v psiv Protocol speed ID value
782 * @ret speed Port speed, or negative error
784 static int xhci_port_speed ( struct xhci_device
*xhci
, unsigned int port
,
785 unsigned int psiv
) {
786 unsigned int supported
= xhci_supported_protocol ( xhci
, port
);
788 unsigned int mantissa
;
789 unsigned int exponent
;
795 /* Fail if there is no supported protocol */
799 /* Get protocol speed ID count */
800 ports
= readl ( xhci
->cap
+ supported
+ XHCI_SUPPORTED_PORTS
);
801 psic
= XHCI_SUPPORTED_PORTS_PSIC ( ports
);
803 /* Use protocol speed ID table unless device is known to be faulty */
804 if ( ! ( xhci
->quirks
& XHCI_BAD_PSIV
) ) {
806 /* Iterate over PSI dwords looking for a match */
807 for ( i
= 0 ; i
< psic
; i
++ ) {
808 psi
= readl ( xhci
->cap
+ supported
+
809 XHCI_SUPPORTED_PSI ( i
) );
810 if ( psiv
== XHCI_SUPPORTED_PSI_VALUE ( psi
) ) {
811 mantissa
= XHCI_SUPPORTED_PSI_MANTISSA ( psi
);
812 exponent
= XHCI_SUPPORTED_PSI_EXPONENT ( psi
);
813 speed
= USB_SPEED ( mantissa
, exponent
);
818 /* Record device as faulty if no match is found */
820 DBGC ( xhci
, "XHCI %s-%d spurious PSI value %d: "
821 "assuming PSI table is invalid\n",
822 xhci
->name
, port
, psiv
);
823 xhci
->quirks
|= XHCI_BAD_PSIV
;
827 /* Use the default mappings */
829 case XHCI_SPEED_LOW
: return USB_SPEED_LOW
;
830 case XHCI_SPEED_FULL
: return USB_SPEED_FULL
;
831 case XHCI_SPEED_HIGH
: return USB_SPEED_HIGH
;
832 case XHCI_SPEED_SUPER
: return USB_SPEED_SUPER
;
834 DBGC ( xhci
, "XHCI %s-%d unrecognised PSI value %d\n",
835 xhci
->name
, port
, psiv
);
841 * Find protocol speed ID value
843 * @v xhci xHCI device
844 * @v port Port number
846 * @ret psiv Protocol speed ID value, or negative error
848 static int xhci_port_psiv ( struct xhci_device
*xhci
, unsigned int port
,
849 unsigned int speed
) {
850 unsigned int supported
= xhci_supported_protocol ( xhci
, port
);
852 unsigned int mantissa
;
853 unsigned int exponent
;
859 /* Fail if there is no supported protocol */
863 /* Get protocol speed ID count */
864 ports
= readl ( xhci
->cap
+ supported
+ XHCI_SUPPORTED_PORTS
);
865 psic
= XHCI_SUPPORTED_PORTS_PSIC ( ports
);
867 /* Use the default mappings if applicable */
868 if ( ( psic
== 0 ) || ( xhci
->quirks
& XHCI_BAD_PSIV
) ) {
870 case USB_SPEED_LOW
: return XHCI_SPEED_LOW
;
871 case USB_SPEED_FULL
: return XHCI_SPEED_FULL
;
872 case USB_SPEED_HIGH
: return XHCI_SPEED_HIGH
;
873 case USB_SPEED_SUPER
: return XHCI_SPEED_SUPER
;
875 DBGC ( xhci
, "XHCI %s-%d non-standard speed %d\n",
876 xhci
->name
, port
, speed
);
881 /* Iterate over PSI dwords looking for a match */
882 for ( i
= 0 ; i
< psic
; i
++ ) {
883 psi
= readl ( xhci
->cap
+ supported
+ XHCI_SUPPORTED_PSI ( i
));
884 mantissa
= XHCI_SUPPORTED_PSI_MANTISSA ( psi
);
885 exponent
= XHCI_SUPPORTED_PSI_EXPONENT ( psi
);
886 if ( speed
== USB_SPEED ( mantissa
, exponent
) ) {
887 psiv
= XHCI_SUPPORTED_PSI_VALUE ( psi
);
892 DBGC ( xhci
, "XHCI %s-%d unrepresentable speed %#x\n",
893 xhci
->name
, port
, speed
);
897 /******************************************************************************
899 * Device context base address array
901 ******************************************************************************
905 * Allocate device context base address array
907 * @v xhci xHCI device
908 * @ret rc Return status code
910 static int xhci_dcbaa_alloc ( struct xhci_device
*xhci
) {
915 /* Allocate and initialise structure. Must be at least
916 * 64-byte aligned and must not cross a page boundary, so
917 * align on its own size (rounded up to a power of two and
918 * with a minimum of 64 bytes).
920 len
= ( ( xhci
->slots
+ 1 ) * sizeof ( xhci
->dcbaa
.context
[0] ) );
921 xhci
->dcbaa
.context
= dma_alloc ( xhci
->dma
, &xhci
->dcbaa
.map
, len
,
922 xhci_align ( len
) );
923 if ( ! xhci
->dcbaa
.context
) {
924 DBGC ( xhci
, "XHCI %s could not allocate DCBAA\n", xhci
->name
);
928 memset ( xhci
->dcbaa
.context
, 0, len
);
930 /* Program DCBAA pointer */
931 dcbaap
= dma ( &xhci
->dcbaa
.map
, xhci
->dcbaa
.context
);
932 if ( ( rc
= xhci_writeq ( xhci
, dcbaap
,
933 xhci
->op
+ XHCI_OP_DCBAAP
) ) != 0 )
936 DBGC2 ( xhci
, "XHCI %s DCBAA at [%08lx,%08lx)\n", xhci
->name
,
937 virt_to_phys ( xhci
->dcbaa
.context
),
938 ( virt_to_phys ( xhci
->dcbaa
.context
) + len
) );
942 dma_free ( &xhci
->dcbaa
.map
, xhci
->dcbaa
.context
, len
);
948 * Free device context base address array
950 * @v xhci xHCI device
952 static void xhci_dcbaa_free ( struct xhci_device
*xhci
) {
957 for ( i
= 0 ; i
<= xhci
->slots
; i
++ )
958 assert ( xhci
->dcbaa
.context
[i
] == 0 );
960 /* Clear DCBAA pointer */
961 xhci_writeq ( xhci
, 0, xhci
->op
+ XHCI_OP_DCBAAP
);
964 len
= ( ( xhci
->slots
+ 1 ) * sizeof ( xhci
->dcbaa
.context
[0] ) );
965 dma_free ( &xhci
->dcbaa
.map
, xhci
->dcbaa
.context
, len
);
968 /******************************************************************************
972 ******************************************************************************
976 * Allocate scratchpad buffers
978 * @v xhci xHCI device
979 * @ret rc Return status code
981 static int xhci_scratchpad_alloc ( struct xhci_device
*xhci
) {
982 struct xhci_scratchpad
*scratch
= &xhci
->scratch
;
989 /* Do nothing if no scratchpad buffers are used */
990 if ( ! scratch
->count
)
993 /* Allocate scratchpad buffers */
994 buffer_len
= ( scratch
->count
* xhci
->pagesize
);
995 scratch
->buffer
= dma_umalloc ( xhci
->dma
, &scratch
->buffer_map
,
996 buffer_len
, xhci
->pagesize
);
997 if ( ! scratch
->buffer
) {
998 DBGC ( xhci
, "XHCI %s could not allocate scratchpad buffers\n",
1003 memset_user ( scratch
->buffer
, 0, 0, buffer_len
);
1005 /* Allocate scratchpad array */
1006 array_len
= ( scratch
->count
* sizeof ( scratch
->array
[0] ) );
1007 scratch
->array
= dma_alloc ( xhci
->dma
, &scratch
->array_map
,
1008 array_len
, xhci_align ( array_len
) );
1009 if ( ! scratch
->array
) {
1010 DBGC ( xhci
, "XHCI %s could not allocate scratchpad buffer "
1011 "array\n", xhci
->name
);
1013 goto err_alloc_array
;
1016 /* Populate scratchpad array */
1017 addr
= dma_phys ( &scratch
->buffer_map
,
1018 user_to_phys ( scratch
->buffer
, 0 ) );
1019 for ( i
= 0 ; i
< scratch
->count
; i
++ ) {
1020 scratch
->array
[i
] = cpu_to_le64 ( addr
);
1021 addr
+= xhci
->pagesize
;
1024 /* Set scratchpad array pointer */
1025 assert ( xhci
->dcbaa
.context
!= NULL
);
1026 xhci
->dcbaa
.context
[0] = cpu_to_le64 ( dma ( &scratch
->array_map
,
1029 DBGC2 ( xhci
, "XHCI %s scratchpad [%08lx,%08lx) array [%08lx,%08lx)\n",
1030 xhci
->name
, user_to_phys ( scratch
->buffer
, 0 ),
1031 user_to_phys ( scratch
->buffer
, buffer_len
),
1032 virt_to_phys ( scratch
->array
),
1033 ( virt_to_phys ( scratch
->array
) + array_len
) );
1036 dma_free ( &scratch
->array_map
, scratch
->array
, array_len
);
1038 dma_ufree ( &scratch
->buffer_map
, scratch
->buffer
, buffer_len
);
1044 * Free scratchpad buffers
1046 * @v xhci xHCI device
1048 static void xhci_scratchpad_free ( struct xhci_device
*xhci
) {
1049 struct xhci_scratchpad
*scratch
= &xhci
->scratch
;
1053 /* Do nothing if no scratchpad buffers are used */
1054 if ( ! scratch
->count
)
1057 /* Clear scratchpad array pointer */
1058 assert ( xhci
->dcbaa
.context
!= NULL
);
1059 xhci
->dcbaa
.context
[0] = 0;
1061 /* Free scratchpad array */
1062 array_len
= ( scratch
->count
* sizeof ( scratch
->array
[0] ) );
1063 dma_free ( &scratch
->array_map
, scratch
->array
, array_len
);
1065 /* Free scratchpad buffers */
1066 buffer_len
= ( scratch
->count
* xhci
->pagesize
);
1067 dma_ufree ( &scratch
->buffer_map
, scratch
->buffer
, buffer_len
);
1070 /******************************************************************************
1072 * Run / stop / reset
1074 ******************************************************************************
1080 * @v xhci xHCI device
1082 static void xhci_run ( struct xhci_device
*xhci
) {
1086 /* Configure number of device slots */
1087 config
= readl ( xhci
->op
+ XHCI_OP_CONFIG
);
1088 config
&= ~XHCI_CONFIG_MAX_SLOTS_EN_MASK
;
1089 config
|= XHCI_CONFIG_MAX_SLOTS_EN ( xhci
->slots
);
1090 writel ( config
, xhci
->op
+ XHCI_OP_CONFIG
);
1092 /* Set run/stop bit */
1093 usbcmd
= readl ( xhci
->op
+ XHCI_OP_USBCMD
);
1094 usbcmd
|= XHCI_USBCMD_RUN
;
1095 writel ( usbcmd
, xhci
->op
+ XHCI_OP_USBCMD
);
1101 * @v xhci xHCI device
1102 * @ret rc Return status code
1104 static int xhci_stop ( struct xhci_device
*xhci
) {
1109 /* Clear run/stop bit */
1110 usbcmd
= readl ( xhci
->op
+ XHCI_OP_USBCMD
);
1111 usbcmd
&= ~XHCI_USBCMD_RUN
;
1112 writel ( usbcmd
, xhci
->op
+ XHCI_OP_USBCMD
);
1114 /* Wait for device to stop */
1115 for ( i
= 0 ; i
< XHCI_STOP_MAX_WAIT_MS
; i
++ ) {
1117 /* Check if device is stopped */
1118 usbsts
= readl ( xhci
->op
+ XHCI_OP_USBSTS
);
1119 if ( usbsts
& XHCI_USBSTS_HCH
)
1126 DBGC ( xhci
, "XHCI %s timed out waiting for stop\n", xhci
->name
);
1133 * @v xhci xHCI device
1134 * @ret rc Return status code
1136 static int xhci_reset ( struct xhci_device
*xhci
) {
1141 /* The xHCI specification states that resetting a running
1142 * device may result in undefined behaviour, so try stopping
1145 if ( ( rc
= xhci_stop ( xhci
) ) != 0 ) {
1146 /* Ignore errors and attempt to reset the device anyway */
1150 writel ( XHCI_USBCMD_HCRST
, xhci
->op
+ XHCI_OP_USBCMD
);
1152 /* Wait for reset to complete */
1153 for ( i
= 0 ; i
< XHCI_RESET_MAX_WAIT_MS
; i
++ ) {
1155 /* Check if reset is complete */
1156 usbcmd
= readl ( xhci
->op
+ XHCI_OP_USBCMD
);
1157 if ( ! ( usbcmd
& XHCI_USBCMD_HCRST
) )
1164 DBGC ( xhci
, "XHCI %s timed out waiting for reset\n", xhci
->name
);
1169 * Mark xHCI device as permanently failed
1171 * @v xhci xHCI device
1172 * @ret rc Return status code
1174 static int xhci_fail ( struct xhci_device
*xhci
) {
1178 /* Mark command mechanism as permanently failed */
1182 if ( ( rc
= xhci_reset ( xhci
) ) != 0 )
1185 /* Discard DCBAA entries since DCBAAP has been cleared */
1186 assert ( xhci
->dcbaa
.context
!= NULL
);
1187 len
= ( ( xhci
->slots
+ 1 ) * sizeof ( xhci
->dcbaa
.context
[0] ) );
1188 memset ( xhci
->dcbaa
.context
, 0, len
);
1193 /******************************************************************************
1195 * Transfer request blocks
1197 ******************************************************************************
1201 * Allocate transfer request block ring
1203 * @v xhci xHCI device
1205 * @v shift Ring size (log2)
1206 * @v slot Device slot
1207 * @v target Doorbell target
1208 * @v stream Doorbell stream ID
1209 * @ret rc Return status code
1211 static int xhci_ring_alloc ( struct xhci_device
*xhci
,
1212 struct xhci_trb_ring
*ring
,
1213 unsigned int shift
, unsigned int slot
,
1214 unsigned int target
, unsigned int stream
) {
1215 struct xhci_trb_link
*link
;
1220 assert ( shift
> 0 );
1222 /* Initialise structure */
1223 memset ( ring
, 0, sizeof ( *ring
) );
1224 ring
->shift
= shift
;
1225 count
= ( 1U << shift
);
1226 ring
->mask
= ( count
- 1 );
1227 ring
->len
= ( ( count
+ 1 /* Link TRB */ ) * sizeof ( ring
->trb
[0] ) );
1228 ring
->db
= ( xhci
->db
+ ( slot
* sizeof ( ring
->dbval
) ) );
1229 ring
->dbval
= XHCI_DBVAL ( target
, stream
);
1231 /* Allocate I/O buffers */
1232 ring
->iobuf
= zalloc ( count
* sizeof ( ring
->iobuf
[0] ) );
1233 if ( ! ring
->iobuf
) {
1235 goto err_alloc_iobuf
;
1239 ring
->trb
= dma_alloc ( xhci
->dma
, &ring
->map
, ring
->len
,
1240 xhci_align ( ring
->len
) );
1241 if ( ! ring
->trb
) {
1245 memset ( ring
->trb
, 0, ring
->len
);
1247 /* Initialise Link TRB */
1248 link
= &ring
->trb
[count
].link
;
1249 link
->next
= cpu_to_le64 ( dma ( &ring
->map
, ring
->trb
) );
1250 link
->flags
= XHCI_TRB_TC
;
1251 link
->type
= XHCI_TRB_LINK
;
1256 dma_free ( &ring
->map
, ring
->trb
, ring
->len
);
1258 free ( ring
->iobuf
);
1264 * Reset transfer request block ring
1268 static void xhci_ring_reset ( struct xhci_trb_ring
*ring
) {
1269 unsigned int count
= ( 1U << ring
->shift
);
1271 /* Reset producer and consumer counters */
1275 /* Reset TRBs (except Link TRB) */
1276 memset ( ring
->trb
, 0, ( count
* sizeof ( ring
->trb
[0] ) ) );
1280 * Free transfer request block ring
1284 static void xhci_ring_free ( struct xhci_trb_ring
*ring
) {
1285 unsigned int count
= ( 1U << ring
->shift
);
1289 assert ( ring
->cons
== ring
->prod
);
1290 for ( i
= 0 ; i
< count
; i
++ )
1291 assert ( ring
->iobuf
[i
] == NULL
);
1294 dma_free ( &ring
->map
, ring
->trb
, ring
->len
);
1296 /* Free I/O buffers */
1297 free ( ring
->iobuf
);
1301 * Enqueue a transfer request block
1304 * @v iobuf I/O buffer (if any)
1305 * @v trb Transfer request block (with empty Cycle flag)
1306 * @ret rc Return status code
1308 * This operation does not implicitly ring the doorbell register.
1310 static int xhci_enqueue ( struct xhci_trb_ring
*ring
, struct io_buffer
*iobuf
,
1311 const union xhci_trb
*trb
) {
1312 union xhci_trb
*dest
;
1319 assert ( ! ( trb
->common
.flags
& XHCI_TRB_C
) );
1321 /* Fail if ring is full */
1322 if ( ! xhci_ring_remaining ( ring
) )
1325 /* Update producer counter (and link TRB, if applicable) */
1326 prod
= ring
->prod
++;
1328 cycle
= ( ( ~( prod
>> ring
->shift
) ) & XHCI_TRB_C
);
1329 index
= ( prod
& mask
);
1331 ring
->link
->flags
= ( XHCI_TRB_TC
| ( cycle
^ XHCI_TRB_C
) );
1333 /* Record I/O buffer */
1334 ring
->iobuf
[index
] = iobuf
;
1337 dest
= &ring
->trb
[index
];
1338 dest
->template.parameter
= trb
->template.parameter
;
1339 dest
->template.status
= trb
->template.status
;
1341 dest
->template.control
= ( trb
->template.control
|
1342 cpu_to_le32 ( cycle
) );
1348 * Dequeue a transfer request block
1351 * @ret iobuf I/O buffer
1353 static struct io_buffer
* xhci_dequeue ( struct xhci_trb_ring
*ring
) {
1354 struct io_buffer
*iobuf
;
1360 assert ( xhci_ring_fill ( ring
) != 0 );
1362 /* Update consumer counter */
1363 cons
= ring
->cons
++;
1365 index
= ( cons
& mask
);
1367 /* Retrieve I/O buffer */
1368 iobuf
= ring
->iobuf
[index
];
1369 ring
->iobuf
[index
] = NULL
;
1375 * Enqueue multiple transfer request blocks
1378 * @v iobuf I/O buffer
1379 * @v trbs Transfer request blocks (with empty Cycle flag)
1380 * @v count Number of transfer request blocks
1381 * @ret rc Return status code
1383 * This operation does not implicitly ring the doorbell register.
1385 static int xhci_enqueue_multi ( struct xhci_trb_ring
*ring
,
1386 struct io_buffer
*iobuf
,
1387 const union xhci_trb
*trbs
,
1388 unsigned int count
) {
1389 const union xhci_trb
*trb
= trbs
;
1393 assert ( iobuf
!= NULL
);
1395 /* Fail if ring does not have sufficient space */
1396 if ( xhci_ring_remaining ( ring
) < count
)
1399 /* Enqueue each TRB, recording the I/O buffer with the final TRB */
1401 rc
= xhci_enqueue ( ring
, ( count ? NULL
: iobuf
), trb
++ );
1402 assert ( rc
== 0 ); /* Should never be able to fail */
1409 * Dequeue multiple transfer request blocks
1412 * @ret iobuf I/O buffer
1414 static struct io_buffer
* xhci_dequeue_multi ( struct xhci_trb_ring
*ring
) {
1415 struct io_buffer
*iobuf
;
1417 /* Dequeue TRBs until we reach the final TRB for an I/O buffer */
1419 iobuf
= xhci_dequeue ( ring
);
1420 } while ( iobuf
== NULL
);
1426 * Ring doorbell register
1430 static inline __attribute__ (( always_inline
)) void
1431 xhci_doorbell ( struct xhci_trb_ring
*ring
) {
1434 writel ( ring
->dbval
, ring
->db
);
1437 /******************************************************************************
1439 * Command and event rings
1441 ******************************************************************************
1445 * Allocate command ring
1447 * @v xhci xHCI device
1448 * @ret rc Return status code
1450 static int xhci_command_alloc ( struct xhci_device
*xhci
) {
1454 /* Allocate TRB ring */
1455 if ( ( rc
= xhci_ring_alloc ( xhci
, &xhci
->command
, XHCI_CMD_TRBS_LOG2
,
1457 goto err_ring_alloc
;
1459 /* Program command ring control register */
1460 crp
= dma ( &xhci
->command
.map
, xhci
->command
.trb
);
1461 if ( ( rc
= xhci_writeq ( xhci
, ( crp
| XHCI_CRCR_RCS
),
1462 xhci
->op
+ XHCI_OP_CRCR
) ) != 0 )
1465 DBGC2 ( xhci
, "XHCI %s CRCR at [%08lx,%08lx)\n", xhci
->name
,
1466 virt_to_phys ( xhci
->command
.trb
),
1467 ( virt_to_phys ( xhci
->command
.trb
) + xhci
->command
.len
) );
1471 xhci_ring_free ( &xhci
->command
);
1479 * @v xhci xHCI device
1481 static void xhci_command_free ( struct xhci_device
*xhci
) {
1484 assert ( ( readl ( xhci
->op
+ XHCI_OP_CRCR
) & XHCI_CRCR_CRR
) == 0 );
1486 /* Clear command ring control register */
1487 xhci_writeq ( xhci
, 0, xhci
->op
+ XHCI_OP_CRCR
);
1490 xhci_ring_free ( &xhci
->command
);
1494 * Allocate event ring
1496 * @v xhci xHCI device
1497 * @ret rc Return status code
1499 static int xhci_event_alloc ( struct xhci_device
*xhci
) {
1500 struct xhci_event_ring
*event
= &xhci
->event
;
1505 /* Allocate event ring */
1506 count
= ( 1 << XHCI_EVENT_TRBS_LOG2
);
1507 len
= ( count
* sizeof ( event
->trb
[0] ) );
1508 event
->trb
= dma_alloc ( xhci
->dma
, &event
->trb_map
, len
,
1509 xhci_align ( len
) );
1510 if ( ! event
->trb
) {
1514 memset ( event
->trb
, 0, len
);
1516 /* Allocate event ring segment table */
1517 event
->segment
= dma_alloc ( xhci
->dma
, &event
->segment_map
,
1518 sizeof ( event
->segment
[0] ),
1519 xhci_align ( sizeof (event
->segment
[0])));
1520 if ( ! event
->segment
) {
1522 goto err_alloc_segment
;
1524 memset ( event
->segment
, 0, sizeof ( event
->segment
[0] ) );
1525 event
->segment
[0].base
= cpu_to_le64 ( dma ( &event
->trb_map
,
1527 event
->segment
[0].count
= cpu_to_le32 ( count
);
1529 /* Program event ring registers */
1530 writel ( 1, xhci
->run
+ XHCI_RUN_ERSTSZ ( 0 ) );
1531 if ( ( rc
= xhci_writeq ( xhci
, dma ( &event
->trb_map
, event
->trb
),
1532 xhci
->run
+ XHCI_RUN_ERDP ( 0 ) ) ) != 0 )
1533 goto err_writeq_erdp
;
1534 if ( ( rc
= xhci_writeq ( xhci
,
1535 dma ( &event
->segment_map
, event
->segment
),
1536 xhci
->run
+ XHCI_RUN_ERSTBA ( 0 ) ) ) != 0 )
1537 goto err_writeq_erstba
;
1539 DBGC2 ( xhci
, "XHCI %s event ring [%08lx,%08lx) table [%08lx,%08lx)\n",
1540 xhci
->name
, virt_to_phys ( event
->trb
),
1541 ( virt_to_phys ( event
->trb
) + len
),
1542 virt_to_phys ( event
->segment
),
1543 ( virt_to_phys ( event
->segment
) +
1544 sizeof ( event
->segment
[0] ) ) );
1547 xhci_writeq ( xhci
, 0, xhci
->run
+ XHCI_RUN_ERSTBA ( 0 ) );
1549 xhci_writeq ( xhci
, 0, xhci
->run
+ XHCI_RUN_ERDP ( 0 ) );
1551 dma_free ( &event
->segment_map
, event
->segment
,
1552 sizeof ( event
->segment
[0] ) );
1554 dma_free ( &event
->trb_map
, event
->trb
, len
);
1562 * @v xhci xHCI device
1564 static void xhci_event_free ( struct xhci_device
*xhci
) {
1565 struct xhci_event_ring
*event
= &xhci
->event
;
1569 /* Clear event ring registers */
1570 writel ( 0, xhci
->run
+ XHCI_RUN_ERSTSZ ( 0 ) );
1571 xhci_writeq ( xhci
, 0, xhci
->run
+ XHCI_RUN_ERSTBA ( 0 ) );
1572 xhci_writeq ( xhci
, 0, xhci
->run
+ XHCI_RUN_ERDP ( 0 ) );
1574 /* Free event ring segment table */
1575 dma_free ( &event
->segment_map
, event
->segment
,
1576 sizeof ( event
->segment
[0] ) );
1578 /* Free event ring */
1579 count
= ( 1 << XHCI_EVENT_TRBS_LOG2
);
1580 len
= ( count
* sizeof ( event
->trb
[0] ) );
1581 dma_free ( &event
->trb_map
, event
->trb
, len
);
1585 * Handle transfer event
1587 * @v xhci xHCI device
1588 * @v trb Transfer event TRB
1590 static void xhci_transfer ( struct xhci_device
*xhci
,
1591 struct xhci_trb_transfer
*trb
) {
1592 struct xhci_slot
*slot
;
1593 struct xhci_endpoint
*endpoint
;
1594 struct io_buffer
*iobuf
;
1597 /* Profile transfer events */
1598 profile_start ( &xhci_transfer_profiler
);
1601 if ( ( trb
->slot
> xhci
->slots
) ||
1602 ( ( slot
= xhci
->slot
[trb
->slot
] ) == NULL
) ) {
1603 DBGC ( xhci
, "XHCI %s transfer event invalid slot %d:\n",
1604 xhci
->name
, trb
->slot
);
1605 DBGC_HDA ( xhci
, 0, trb
, sizeof ( *trb
) );
1609 /* Identify endpoint */
1610 if ( ( trb
->endpoint
>= XHCI_CTX_END
) ||
1611 ( ( endpoint
= slot
->endpoint
[trb
->endpoint
] ) == NULL
) ) {
1612 DBGC ( xhci
, "XHCI %s slot %d transfer event invalid epid "
1613 "%d:\n", xhci
->name
, slot
->id
, trb
->endpoint
);
1614 DBGC_HDA ( xhci
, 0, trb
, sizeof ( *trb
) );
1618 /* Dequeue TRB(s) */
1619 iobuf
= xhci_dequeue_multi ( &endpoint
->ring
);
1620 assert ( iobuf
!= NULL
);
1622 /* Unmap I/O buffer */
1623 iob_unmap ( iobuf
);
1625 /* Check for errors */
1626 if ( ! ( ( trb
->code
== XHCI_CMPLT_SUCCESS
) ||
1627 ( trb
->code
== XHCI_CMPLT_SHORT
) ) ) {
1629 /* Construct error */
1630 rc
= -ECODE ( trb
->code
);
1631 DBGC ( xhci
, "XHCI %s slot %d ctx %d failed (code %d): %s\n",
1632 xhci
->name
, slot
->id
, endpoint
->ctx
, trb
->code
,
1634 DBGC_HDA ( xhci
, 0, trb
, sizeof ( *trb
) );
1637 assert ( ( endpoint
->context
->state
& XHCI_ENDPOINT_STATE_MASK
)
1638 != XHCI_ENDPOINT_RUNNING
);
1640 /* Report failure to USB core */
1641 usb_complete_err ( endpoint
->ep
, iobuf
, rc
);
1645 /* Record actual transfer size */
1646 iob_unput ( iobuf
, le16_to_cpu ( trb
->residual
) );
1648 /* Sanity check (for successful completions only) */
1649 assert ( xhci_ring_consumed ( &endpoint
->ring
) ==
1650 le64_to_cpu ( trb
->transfer
) );
1652 /* Report completion to USB core */
1653 usb_complete ( endpoint
->ep
, iobuf
);
1654 profile_stop ( &xhci_transfer_profiler
);
1658 * Handle command completion event
1660 * @v xhci xHCI device
1661 * @v trb Command completion event
1663 static void xhci_complete ( struct xhci_device
*xhci
,
1664 struct xhci_trb_complete
*trb
) {
1667 /* Ignore "command ring stopped" notifications */
1668 if ( trb
->code
== XHCI_CMPLT_CMD_STOPPED
) {
1669 DBGC2 ( xhci
, "XHCI %s command ring stopped\n", xhci
->name
);
1673 /* Ignore unexpected completions */
1674 if ( ! xhci
->pending
) {
1675 rc
= -ECODE ( trb
->code
);
1676 DBGC ( xhci
, "XHCI %s unexpected completion (code %d): %s\n",
1677 xhci
->name
, trb
->code
, strerror ( rc
) );
1678 DBGC_HDA ( xhci
, 0, trb
, sizeof ( *trb
) );
1682 /* Dequeue command TRB */
1683 xhci_dequeue ( &xhci
->command
);
1686 assert ( xhci_ring_consumed ( &xhci
->command
) ==
1687 le64_to_cpu ( trb
->command
) );
1689 /* Record completion */
1690 memcpy ( xhci
->pending
, trb
, sizeof ( *xhci
->pending
) );
1691 xhci
->pending
= NULL
;
1695 * Handle port status event
1697 * @v xhci xHCI device
1698 * @v trb Port status event
1700 static void xhci_port_status ( struct xhci_device
*xhci
,
1701 struct xhci_trb_port_status
*trb
) {
1702 struct usb_port
*port
= usb_port ( xhci
->bus
->hub
, trb
->port
);
1706 assert ( ( trb
->port
> 0 ) && ( trb
->port
<= xhci
->ports
) );
1708 /* Record disconnections and clear changes */
1709 portsc
= readl ( xhci
->op
+ XHCI_OP_PORTSC ( trb
->port
) );
1710 port
->disconnected
|= ( portsc
& XHCI_PORTSC_CSC
);
1711 portsc
&= ( XHCI_PORTSC_PRESERVE
| XHCI_PORTSC_CHANGE
);
1712 writel ( portsc
, xhci
->op
+ XHCI_OP_PORTSC ( trb
->port
) );
1714 /* Report port status change */
1715 usb_port_changed ( port
);
1719 * Handle host controller event
1721 * @v xhci xHCI device
1722 * @v trb Host controller event
1724 static void xhci_host_controller ( struct xhci_device
*xhci
,
1725 struct xhci_trb_host_controller
*trb
) {
1728 /* Construct error */
1729 rc
= -ECODE ( trb
->code
);
1730 DBGC ( xhci
, "XHCI %s host controller event (code %d): %s\n",
1731 xhci
->name
, trb
->code
, strerror ( rc
) );
1737 * @v xhci xHCI device
1739 static void xhci_event_poll ( struct xhci_device
*xhci
) {
1740 struct xhci_event_ring
*event
= &xhci
->event
;
1741 union xhci_trb
*trb
;
1742 unsigned int shift
= XHCI_EVENT_TRBS_LOG2
;
1743 unsigned int count
= ( 1 << shift
);
1744 unsigned int mask
= ( count
- 1 );
1745 unsigned int consumed
;
1748 /* Do nothing if device has permanently failed */
1752 /* Poll for events */
1753 profile_start ( &xhci_event_profiler
);
1754 for ( consumed
= 0 ; ; consumed
++ ) {
1756 /* Stop if we reach an empty TRB */
1758 trb
= &event
->trb
[ event
->cons
& mask
];
1759 if ( ! ( ( trb
->common
.flags
^
1760 ( event
->cons
>> shift
) ) & XHCI_TRB_C
) )
1763 /* Consume this TRB */
1767 type
= ( trb
->common
.type
& XHCI_TRB_TYPE_MASK
);
1770 case XHCI_TRB_TRANSFER
:
1771 xhci_transfer ( xhci
, &trb
->transfer
);
1774 case XHCI_TRB_COMPLETE
:
1775 xhci_complete ( xhci
, &trb
->complete
);
1778 case XHCI_TRB_PORT_STATUS
:
1779 xhci_port_status ( xhci
, &trb
->port
);
1782 case XHCI_TRB_HOST_CONTROLLER
:
1783 xhci_host_controller ( xhci
, &trb
->host
);
1787 DBGC ( xhci
, "XHCI %s unrecognised event %#x\n:",
1788 xhci
->name
, ( event
->cons
- 1 ) );
1789 DBGC_HDA ( xhci
, virt_to_phys ( trb
),
1790 trb
, sizeof ( *trb
) );
1795 /* Update dequeue pointer if applicable */
1797 xhci_writeq ( xhci
, dma ( &event
->trb_map
, trb
),
1798 xhci
->run
+ XHCI_RUN_ERDP ( 0 ) );
1799 profile_stop ( &xhci_event_profiler
);
1806 * @v xhci xHCI device
1808 static void xhci_abort ( struct xhci_device
*xhci
) {
1812 /* Abort the command */
1813 DBGC2 ( xhci
, "XHCI %s aborting command\n", xhci
->name
);
1814 xhci_writeq ( xhci
, XHCI_CRCR_CA
, xhci
->op
+ XHCI_OP_CRCR
);
1816 /* Allow time for command to abort */
1817 mdelay ( XHCI_COMMAND_ABORT_DELAY_MS
);
1819 /* Check for failure to abort */
1820 crcr
= readl ( xhci
->op
+ XHCI_OP_CRCR
);
1821 if ( crcr
& XHCI_CRCR_CRR
) {
1823 /* Device has failed to abort a command and is almost
1824 * certainly beyond repair. Reset device, abandoning
1825 * all state, and mark device as failed to avoid
1826 * delays on any future command attempts.
1828 DBGC ( xhci
, "XHCI %s failed to abort command\n", xhci
->name
);
1832 /* Consume (and ignore) any final command status */
1833 xhci_event_poll ( xhci
);
1835 /* Reset the command ring control register */
1836 xhci_ring_reset ( &xhci
->command
);
1837 crp
= dma ( &xhci
->command
.map
, xhci
->command
.trb
);
1838 xhci_writeq ( xhci
, ( crp
| XHCI_CRCR_RCS
), xhci
->op
+ XHCI_OP_CRCR
);
1842 * Issue command and wait for completion
1844 * @v xhci xHCI device
1845 * @v trb Transfer request block (with empty Cycle flag)
1846 * @ret rc Return status code
1848 * On a successful completion, the TRB will be overwritten with the
1851 static int xhci_command ( struct xhci_device
*xhci
, union xhci_trb
*trb
) {
1852 struct xhci_trb_complete
*complete
= &trb
->complete
;
1856 /* Immediately fail all commands if command mechanism has failed */
1857 if ( xhci
->failed
) {
1863 if ( xhci
->pending
) {
1864 DBGC ( xhci
, "XHCI %s command ring busy\n", xhci
->name
);
1869 /* Record the pending command */
1870 xhci
->pending
= trb
;
1872 /* Enqueue the command */
1873 if ( ( rc
= xhci_enqueue ( &xhci
->command
, NULL
, trb
) ) != 0 )
1876 /* Ring the command doorbell */
1877 xhci_doorbell ( &xhci
->command
);
1879 /* Wait for the command to complete */
1880 for ( i
= 0 ; i
< XHCI_COMMAND_MAX_WAIT_MS
; i
++ ) {
1882 /* Poll event ring */
1883 xhci_event_poll ( xhci
);
1885 /* Check for completion */
1886 if ( ! xhci
->pending
) {
1887 if ( complete
->code
!= XHCI_CMPLT_SUCCESS
) {
1888 rc
= -ECODE ( complete
->code
);
1889 DBGC ( xhci
, "XHCI %s command failed (code "
1890 "%d): %s\n", xhci
->name
, complete
->code
,
1892 DBGC_HDA ( xhci
, 0, trb
, sizeof ( *trb
) );
1903 DBGC ( xhci
, "XHCI %s timed out waiting for completion\n", xhci
->name
);
1907 xhci_abort ( xhci
);
1910 xhci
->pending
= NULL
;
1917 * Issue NOP and wait for completion
1919 * @v xhci xHCI device
1920 * @ret rc Return status code
1922 static inline int xhci_nop ( struct xhci_device
*xhci
) {
1924 struct xhci_trb_common
*nop
= &trb
.common
;
1927 /* Construct command */
1928 memset ( nop
, 0, sizeof ( *nop
) );
1929 nop
->flags
= XHCI_TRB_IOC
;
1930 nop
->type
= XHCI_TRB_NOP_CMD
;
1932 /* Issue command and wait for completion */
1933 if ( ( rc
= xhci_command ( xhci
, &trb
) ) != 0 ) {
1934 DBGC ( xhci
, "XHCI %s NOP failed: %s\n",
1935 xhci
->name
, strerror ( rc
) );
1939 DBGC2 ( xhci
, "XHCI %s NOP completed successfully\n", xhci
->name
);
1946 * @v xhci xHCI device
1948 * @ret slot Device slot ID, or negative error
1950 static inline int xhci_enable_slot ( struct xhci_device
*xhci
,
1951 unsigned int type
) {
1953 struct xhci_trb_enable_slot
*enable
= &trb
.enable
;
1954 struct xhci_trb_complete
*enabled
= &trb
.complete
;
1958 /* Construct command */
1959 memset ( enable
, 0, sizeof ( *enable
) );
1960 enable
->slot
= type
;
1961 enable
->type
= XHCI_TRB_ENABLE_SLOT
;
1963 /* Issue command and wait for completion */
1964 if ( ( rc
= xhci_command ( xhci
, &trb
) ) != 0 ) {
1965 DBGC ( xhci
, "XHCI %s could not enable new slot: %s\n",
1966 xhci
->name
, strerror ( rc
) );
1970 /* Extract slot number */
1971 slot
= enabled
->slot
;
1973 DBGC2 ( xhci
, "XHCI %s slot %d enabled\n", xhci
->name
, slot
);
1980 * @v xhci xHCI device
1981 * @v slot Device slot
1982 * @ret rc Return status code
1984 static inline int xhci_disable_slot ( struct xhci_device
*xhci
,
1985 unsigned int slot
) {
1987 struct xhci_trb_disable_slot
*disable
= &trb
.disable
;
1990 /* Construct command */
1991 memset ( disable
, 0, sizeof ( *disable
) );
1992 disable
->type
= XHCI_TRB_DISABLE_SLOT
;
1993 disable
->slot
= slot
;
1995 /* Issue command and wait for completion */
1996 if ( ( rc
= xhci_command ( xhci
, &trb
) ) != 0 ) {
1997 DBGC ( xhci
, "XHCI %s could not disable slot %d: %s\n",
1998 xhci
->name
, slot
, strerror ( rc
) );
2002 DBGC2 ( xhci
, "XHCI %s slot %d disabled\n", xhci
->name
, slot
);
2007 * Issue context-based command and wait for completion
2009 * @v xhci xHCI device
2010 * @v slot Device slot
2011 * @v endpoint Endpoint
2013 * @v populate Input context populater
2014 * @ret rc Return status code
2016 static int xhci_context ( struct xhci_device
*xhci
, struct xhci_slot
*slot
,
2017 struct xhci_endpoint
*endpoint
, unsigned int type
,
2018 void ( * populate
) ( struct xhci_device
*xhci
,
2019 struct xhci_slot
*slot
,
2020 struct xhci_endpoint
*endpoint
,
2023 struct xhci_trb_context
*context
= &trb
.context
;
2024 struct dma_mapping map
;
2029 /* Allocate an input context */
2030 memset ( &map
, 0, sizeof ( map
) );
2031 len
= xhci_input_context_offset ( xhci
, XHCI_CTX_END
);
2032 input
= dma_alloc ( xhci
->dma
, &map
, len
, xhci_align ( len
) );
2037 memset ( input
, 0, len
);
2039 /* Populate input context */
2040 populate ( xhci
, slot
, endpoint
, input
);
2042 /* Construct command */
2043 memset ( context
, 0, sizeof ( *context
) );
2044 context
->type
= type
;
2045 context
->input
= cpu_to_le64 ( dma ( &map
, input
) );
2046 context
->slot
= slot
->id
;
2048 /* Issue command and wait for completion */
2049 if ( ( rc
= xhci_command ( xhci
, &trb
) ) != 0 )
2053 dma_free ( &map
, input
, len
);
2059 * Populate address device input context
2061 * @v xhci xHCI device
2062 * @v slot Device slot
2063 * @v endpoint Endpoint
2064 * @v input Input context
2066 static void xhci_address_device_input ( struct xhci_device
*xhci
,
2067 struct xhci_slot
*slot
,
2068 struct xhci_endpoint
*endpoint
,
2070 struct xhci_trb_ring
*ring
= &endpoint
->ring
;
2071 struct xhci_control_context
*control_ctx
;
2072 struct xhci_slot_context
*slot_ctx
;
2073 struct xhci_endpoint_context
*ep_ctx
;
2076 assert ( endpoint
->ctx
== XHCI_CTX_EP0
);
2078 /* Populate control context */
2079 control_ctx
= input
;
2080 control_ctx
->add
= cpu_to_le32 ( ( 1 << XHCI_CTX_SLOT
) |
2081 ( 1 << XHCI_CTX_EP0
) );
2083 /* Populate slot context */
2084 slot_ctx
= ( input
+ xhci_input_context_offset ( xhci
, XHCI_CTX_SLOT
));
2085 slot_ctx
->info
= cpu_to_le32 ( XHCI_SLOT_INFO ( 1, 0, slot
->psiv
,
2087 slot_ctx
->port
= slot
->port
;
2088 slot_ctx
->tt_id
= slot
->tt_id
;
2089 slot_ctx
->tt_port
= slot
->tt_port
;
2091 /* Populate control endpoint context */
2092 ep_ctx
= ( input
+ xhci_input_context_offset ( xhci
, XHCI_CTX_EP0
) );
2093 ep_ctx
->type
= XHCI_EP_TYPE_CONTROL
;
2094 ep_ctx
->burst
= endpoint
->ep
->burst
;
2095 ep_ctx
->mtu
= cpu_to_le16 ( endpoint
->ep
->mtu
);
2096 ep_ctx
->dequeue
= cpu_to_le64 ( dma ( &ring
->map
, ring
->trb
) |
2098 ep_ctx
->trb_len
= cpu_to_le16 ( XHCI_EP0_TRB_LEN
);
2104 * @v xhci xHCI device
2105 * @v slot Device slot
2106 * @ret rc Return status code
2108 static inline int xhci_address_device ( struct xhci_device
*xhci
,
2109 struct xhci_slot
*slot
) {
2110 struct usb_device
*usb
= slot
->usb
;
2111 struct xhci_slot_context
*slot_ctx
;
2114 /* Assign device address */
2115 if ( ( rc
= xhci_context ( xhci
, slot
, slot
->endpoint
[XHCI_CTX_EP0
],
2116 XHCI_TRB_ADDRESS_DEVICE
,
2117 xhci_address_device_input
) ) != 0 ) {
2118 DBGC ( xhci
, "XHCI %s slot %d could not assign address: %s\n",
2119 xhci
->name
, slot
->id
, strerror ( rc
) );
2123 /* Get assigned address */
2124 slot_ctx
= ( slot
->context
+
2125 xhci_device_context_offset ( xhci
, XHCI_CTX_SLOT
) );
2126 usb
->address
= slot_ctx
->address
;
2127 DBGC2 ( xhci
, "XHCI %s slot %d assigned address %d to %s\n",
2128 xhci
->name
, slot
->id
, usb
->address
, usb
->name
);
2134 * Populate configure endpoint input context
2136 * @v xhci xHCI device
2137 * @v slot Device slot
2138 * @v endpoint Endpoint
2139 * @v input Input context
2141 static void xhci_configure_endpoint_input ( struct xhci_device
*xhci
,
2142 struct xhci_slot
*slot
,
2143 struct xhci_endpoint
*endpoint
,
2145 struct xhci_trb_ring
*ring
= &endpoint
->ring
;
2146 struct xhci_control_context
*control_ctx
;
2147 struct xhci_slot_context
*slot_ctx
;
2148 struct xhci_endpoint_context
*ep_ctx
;
2150 /* Populate control context */
2151 control_ctx
= input
;
2152 control_ctx
->add
= cpu_to_le32 ( ( 1 << XHCI_CTX_SLOT
) |
2153 ( 1 << endpoint
->ctx
) );
2155 /* Populate slot context */
2156 slot_ctx
= ( input
+ xhci_input_context_offset ( xhci
, XHCI_CTX_SLOT
));
2157 slot_ctx
->info
= cpu_to_le32 ( XHCI_SLOT_INFO ( ( XHCI_CTX_END
- 1 ),
2158 ( slot
->ports ?
1 : 0 ),
2160 slot_ctx
->ports
= slot
->ports
;
2162 /* Populate endpoint context */
2163 ep_ctx
= ( input
+ xhci_input_context_offset ( xhci
, endpoint
->ctx
) );
2164 ep_ctx
->interval
= endpoint
->interval
;
2165 ep_ctx
->type
= endpoint
->type
;
2166 ep_ctx
->burst
= endpoint
->ep
->burst
;
2167 ep_ctx
->mtu
= cpu_to_le16 ( endpoint
->ep
->mtu
);
2168 ep_ctx
->dequeue
= cpu_to_le64 ( dma ( &ring
->map
, ring
->trb
) |
2170 ep_ctx
->trb_len
= cpu_to_le16 ( endpoint
->ep
->mtu
); /* best guess */
2174 * Configure endpoint
2176 * @v xhci xHCI device
2177 * @v slot Device slot
2178 * @v endpoint Endpoint
2179 * @ret rc Return status code
2181 static inline int xhci_configure_endpoint ( struct xhci_device
*xhci
,
2182 struct xhci_slot
*slot
,
2183 struct xhci_endpoint
*endpoint
) {
2186 /* Configure endpoint */
2187 if ( ( rc
= xhci_context ( xhci
, slot
, endpoint
,
2188 XHCI_TRB_CONFIGURE_ENDPOINT
,
2189 xhci_configure_endpoint_input
) ) != 0 ) {
2190 DBGC ( xhci
, "XHCI %s slot %d ctx %d could not configure: %s\n",
2191 xhci
->name
, slot
->id
, endpoint
->ctx
, strerror ( rc
) );
2195 DBGC2 ( xhci
, "XHCI %s slot %d ctx %d configured\n",
2196 xhci
->name
, slot
->id
, endpoint
->ctx
);
2201 * Populate deconfigure endpoint input context
2203 * @v xhci xHCI device
2204 * @v slot Device slot
2205 * @v endpoint Endpoint
2206 * @v input Input context
2209 xhci_deconfigure_endpoint_input ( struct xhci_device
*xhci __unused
,
2210 struct xhci_slot
*slot __unused
,
2211 struct xhci_endpoint
*endpoint
,
2213 struct xhci_control_context
*control_ctx
;
2214 struct xhci_slot_context
*slot_ctx
;
2216 /* Populate control context */
2217 control_ctx
= input
;
2218 control_ctx
->add
= cpu_to_le32 ( 1 << XHCI_CTX_SLOT
);
2219 control_ctx
->drop
= cpu_to_le32 ( 1 << endpoint
->ctx
);
2221 /* Populate slot context */
2222 slot_ctx
= ( input
+ xhci_input_context_offset ( xhci
, XHCI_CTX_SLOT
));
2223 slot_ctx
->info
= cpu_to_le32 ( XHCI_SLOT_INFO ( ( XHCI_CTX_END
- 1 ),
2228 * Deconfigure endpoint
2230 * @v xhci xHCI device
2231 * @v slot Device slot
2232 * @v endpoint Endpoint
2233 * @ret rc Return status code
2235 static inline int xhci_deconfigure_endpoint ( struct xhci_device
*xhci
,
2236 struct xhci_slot
*slot
,
2237 struct xhci_endpoint
*endpoint
) {
2240 /* Deconfigure endpoint */
2241 if ( ( rc
= xhci_context ( xhci
, slot
, endpoint
,
2242 XHCI_TRB_CONFIGURE_ENDPOINT
,
2243 xhci_deconfigure_endpoint_input
) ) != 0 ) {
2244 DBGC ( xhci
, "XHCI %s slot %d ctx %d could not deconfigure: "
2245 "%s\n", xhci
->name
, slot
->id
, endpoint
->ctx
,
2250 DBGC2 ( xhci
, "XHCI %s slot %d ctx %d deconfigured\n",
2251 xhci
->name
, slot
->id
, endpoint
->ctx
);
2256 * Populate evaluate context input context
2258 * @v xhci xHCI device
2259 * @v slot Device slot
2260 * @v endpoint Endpoint
2261 * @v input Input context
2263 static void xhci_evaluate_context_input ( struct xhci_device
*xhci
,
2264 struct xhci_slot
*slot __unused
,
2265 struct xhci_endpoint
*endpoint
,
2267 struct xhci_control_context
*control_ctx
;
2268 struct xhci_slot_context
*slot_ctx
;
2269 struct xhci_endpoint_context
*ep_ctx
;
2271 /* Populate control context */
2272 control_ctx
= input
;
2273 control_ctx
->add
= cpu_to_le32 ( ( 1 << XHCI_CTX_SLOT
) |
2274 ( 1 << endpoint
->ctx
) );
2276 /* Populate slot context */
2277 slot_ctx
= ( input
+ xhci_input_context_offset ( xhci
, XHCI_CTX_SLOT
));
2278 slot_ctx
->info
= cpu_to_le32 ( XHCI_SLOT_INFO ( ( XHCI_CTX_END
- 1 ),
2281 /* Populate endpoint context */
2282 ep_ctx
= ( input
+ xhci_input_context_offset ( xhci
, endpoint
->ctx
) );
2283 ep_ctx
->mtu
= cpu_to_le16 ( endpoint
->ep
->mtu
);
2289 * @v xhci xHCI device
2290 * @v slot Device slot
2291 * @v endpoint Endpoint
2292 * @ret rc Return status code
2294 static inline int xhci_evaluate_context ( struct xhci_device
*xhci
,
2295 struct xhci_slot
*slot
,
2296 struct xhci_endpoint
*endpoint
) {
2299 /* Configure endpoint */
2300 if ( ( rc
= xhci_context ( xhci
, slot
, endpoint
,
2301 XHCI_TRB_EVALUATE_CONTEXT
,
2302 xhci_evaluate_context_input
) ) != 0 ) {
2303 DBGC ( xhci
, "XHCI %s slot %d ctx %d could not (re-)evaluate: "
2304 "%s\n", xhci
->name
, slot
->id
, endpoint
->ctx
,
2309 DBGC2 ( xhci
, "XHCI %s slot %d ctx %d (re-)evaluated\n",
2310 xhci
->name
, slot
->id
, endpoint
->ctx
);
2317 * @v xhci xHCI device
2318 * @v slot Device slot
2319 * @v endpoint Endpoint
2320 * @ret rc Return status code
2322 static inline int xhci_reset_endpoint ( struct xhci_device
*xhci
,
2323 struct xhci_slot
*slot
,
2324 struct xhci_endpoint
*endpoint
) {
2326 struct xhci_trb_reset_endpoint
*reset
= &trb
.reset
;
2329 /* Construct command */
2330 memset ( reset
, 0, sizeof ( *reset
) );
2331 reset
->slot
= slot
->id
;
2332 reset
->endpoint
= endpoint
->ctx
;
2333 reset
->type
= XHCI_TRB_RESET_ENDPOINT
;
2335 /* Issue command and wait for completion */
2336 if ( ( rc
= xhci_command ( xhci
, &trb
) ) != 0 ) {
2337 DBGC ( xhci
, "XHCI %s slot %d ctx %d could not reset endpoint "
2338 "in state %d: %s\n", xhci
->name
, slot
->id
, endpoint
->ctx
,
2339 endpoint
->context
->state
, strerror ( rc
) );
2349 * @v xhci xHCI device
2350 * @v slot Device slot
2351 * @v endpoint Endpoint
2352 * @ret rc Return status code
2354 static inline int xhci_stop_endpoint ( struct xhci_device
*xhci
,
2355 struct xhci_slot
*slot
,
2356 struct xhci_endpoint
*endpoint
) {
2358 struct xhci_trb_stop_endpoint
*stop
= &trb
.stop
;
2361 /* Construct command */
2362 memset ( stop
, 0, sizeof ( *stop
) );
2363 stop
->slot
= slot
->id
;
2364 stop
->endpoint
= endpoint
->ctx
;
2365 stop
->type
= XHCI_TRB_STOP_ENDPOINT
;
2367 /* Issue command and wait for completion */
2368 if ( ( rc
= xhci_command ( xhci
, &trb
) ) != 0 ) {
2369 DBGC ( xhci
, "XHCI %s slot %d ctx %d could not stop endpoint "
2370 "in state %d: %s\n", xhci
->name
, slot
->id
, endpoint
->ctx
,
2371 endpoint
->context
->state
, strerror ( rc
) );
2379 * Set transfer ring dequeue pointer
2381 * @v xhci xHCI device
2382 * @v slot Device slot
2383 * @v endpoint Endpoint
2384 * @ret rc Return status code
2387 xhci_set_tr_dequeue_pointer ( struct xhci_device
*xhci
,
2388 struct xhci_slot
*slot
,
2389 struct xhci_endpoint
*endpoint
) {
2391 struct xhci_trb_set_tr_dequeue_pointer
*dequeue
= &trb
.dequeue
;
2392 struct xhci_trb_ring
*ring
= &endpoint
->ring
;
2400 /* Construct command */
2401 memset ( dequeue
, 0, sizeof ( *dequeue
) );
2404 dcs
= ( ( ~( cons
>> ring
->shift
) ) & XHCI_EP_DCS
);
2405 index
= ( cons
& mask
);
2406 addr
= dma ( &ring
->map
, &ring
->trb
[index
] );
2407 dequeue
->dequeue
= cpu_to_le64 ( addr
| dcs
);
2408 dequeue
->slot
= slot
->id
;
2409 dequeue
->endpoint
= endpoint
->ctx
;
2410 dequeue
->type
= XHCI_TRB_SET_TR_DEQUEUE_POINTER
;
2412 /* Issue command and wait for completion */
2413 if ( ( rc
= xhci_command ( xhci
, &trb
) ) != 0 ) {
2414 DBGC ( xhci
, "XHCI %s slot %d ctx %d could not set TR dequeue "
2415 "pointer in state %d: %s\n", xhci
->name
, slot
->id
,
2416 endpoint
->ctx
, endpoint
->context
->state
, strerror ( rc
));
2423 /******************************************************************************
2425 * Endpoint operations
2427 ******************************************************************************
2433 * @v ep USB endpoint
2434 * @ret rc Return status code
2436 static int xhci_endpoint_open ( struct usb_endpoint
*ep
) {
2437 struct usb_device
*usb
= ep
->usb
;
2438 struct xhci_slot
*slot
= usb_get_hostdata ( usb
);
2439 struct xhci_device
*xhci
= slot
->xhci
;
2440 struct xhci_endpoint
*endpoint
;
2443 unsigned int interval
;
2446 /* Calculate context index */
2447 ctx
= XHCI_CTX ( ep
->address
);
2448 assert ( slot
->endpoint
[ctx
] == NULL
);
2450 /* Calculate endpoint type */
2451 type
= XHCI_EP_TYPE ( ep
->attributes
& USB_ENDPOINT_ATTR_TYPE_MASK
);
2452 if ( type
== XHCI_EP_TYPE ( USB_ENDPOINT_ATTR_CONTROL
) )
2453 type
= XHCI_EP_TYPE_CONTROL
;
2454 if ( ep
->address
& USB_DIR_IN
)
2455 type
|= XHCI_EP_TYPE_IN
;
2457 /* Calculate interval */
2458 if ( type
& XHCI_EP_TYPE_PERIODIC
) {
2459 interval
= ( fls ( ep
->interval
) - 1 );
2461 interval
= ep
->interval
;
2464 /* Allocate and initialise structure */
2465 endpoint
= zalloc ( sizeof ( *endpoint
) );
2470 usb_endpoint_set_hostdata ( ep
, endpoint
);
2471 slot
->endpoint
[ctx
] = endpoint
;
2472 endpoint
->xhci
= xhci
;
2473 endpoint
->slot
= slot
;
2475 endpoint
->ctx
= ctx
;
2476 endpoint
->type
= type
;
2477 endpoint
->interval
= interval
;
2478 endpoint
->context
= ( ( ( void * ) slot
->context
) +
2479 xhci_device_context_offset ( xhci
, ctx
) );
2481 /* Allocate transfer ring */
2482 if ( ( rc
= xhci_ring_alloc ( xhci
, &endpoint
->ring
,
2483 XHCI_TRANSFER_TRBS_LOG2
,
2484 slot
->id
, ctx
, 0 ) ) != 0 )
2485 goto err_ring_alloc
;
2487 /* Configure endpoint, if applicable */
2488 if ( ( ctx
!= XHCI_CTX_EP0
) &&
2489 ( ( rc
= xhci_configure_endpoint ( xhci
, slot
, endpoint
) ) != 0 ))
2490 goto err_configure_endpoint
;
2492 DBGC2 ( xhci
, "XHCI %s slot %d ctx %d ring [%08lx,%08lx)\n",
2493 xhci
->name
, slot
->id
, ctx
, virt_to_phys ( endpoint
->ring
.trb
),
2494 ( virt_to_phys ( endpoint
->ring
.trb
) + endpoint
->ring
.len
) );
2497 xhci_deconfigure_endpoint ( xhci
, slot
, endpoint
);
2498 err_configure_endpoint
:
2499 xhci_ring_free ( &endpoint
->ring
);
2501 slot
->endpoint
[ctx
] = NULL
;
2510 * @v ep USB endpoint
2512 static void xhci_endpoint_close ( struct usb_endpoint
*ep
) {
2513 struct xhci_endpoint
*endpoint
= usb_endpoint_get_hostdata ( ep
);
2514 struct xhci_slot
*slot
= endpoint
->slot
;
2515 struct xhci_device
*xhci
= slot
->xhci
;
2516 struct io_buffer
*iobuf
;
2517 unsigned int ctx
= endpoint
->ctx
;
2519 /* Deconfigure endpoint, if applicable */
2520 if ( ctx
!= XHCI_CTX_EP0
)
2521 xhci_deconfigure_endpoint ( xhci
, slot
, endpoint
);
2523 /* Cancel any incomplete transfers */
2524 while ( xhci_ring_fill ( &endpoint
->ring
) ) {
2525 iobuf
= xhci_dequeue_multi ( &endpoint
->ring
);
2526 iob_unmap ( iobuf
);
2527 usb_complete_err ( ep
, iobuf
, -ECANCELED
);
2531 xhci_ring_free ( &endpoint
->ring
);
2532 slot
->endpoint
[ctx
] = NULL
;
2539 * @v ep USB endpoint
2540 * @ret rc Return status code
2542 static int xhci_endpoint_reset ( struct usb_endpoint
*ep
) {
2543 struct xhci_endpoint
*endpoint
= usb_endpoint_get_hostdata ( ep
);
2544 struct xhci_slot
*slot
= endpoint
->slot
;
2545 struct xhci_device
*xhci
= slot
->xhci
;
2548 /* Reset endpoint context */
2549 if ( ( rc
= xhci_reset_endpoint ( xhci
, slot
, endpoint
) ) != 0 )
2552 /* Set transfer ring dequeue pointer */
2553 if ( ( rc
= xhci_set_tr_dequeue_pointer ( xhci
, slot
, endpoint
) ) != 0)
2556 /* Ring doorbell to resume processing */
2557 xhci_doorbell ( &endpoint
->ring
);
2559 DBGC ( xhci
, "XHCI %s slot %d ctx %d reset\n",
2560 xhci
->name
, slot
->id
, endpoint
->ctx
);
2567 * @v ep USB endpoint
2568 * @ret rc Return status code
2570 static int xhci_endpoint_mtu ( struct usb_endpoint
*ep
) {
2571 struct xhci_endpoint
*endpoint
= usb_endpoint_get_hostdata ( ep
);
2572 struct xhci_slot
*slot
= endpoint
->slot
;
2573 struct xhci_device
*xhci
= slot
->xhci
;
2576 /* Evalulate context */
2577 if ( ( rc
= xhci_evaluate_context ( xhci
, slot
, endpoint
) ) != 0 )
2584 * Enqueue message transfer
2586 * @v ep USB endpoint
2587 * @v iobuf I/O buffer
2588 * @ret rc Return status code
2590 static int xhci_endpoint_message ( struct usb_endpoint
*ep
,
2591 struct io_buffer
*iobuf
) {
2592 struct xhci_endpoint
*endpoint
= usb_endpoint_get_hostdata ( ep
);
2593 struct xhci_device
*xhci
= endpoint
->xhci
;
2594 struct usb_setup_packet
*packet
;
2597 union xhci_trb trbs
[ 1 /* setup */ + 1 /* possible data */ +
2599 union xhci_trb
*trb
= trbs
;
2600 struct xhci_trb_setup
*setup
;
2601 struct xhci_trb_data
*data
;
2602 struct xhci_trb_status
*status
;
2605 /* Profile message transfers */
2606 profile_start ( &xhci_message_profiler
);
2608 /* Construct setup stage TRB */
2609 memset ( trbs
, 0, sizeof ( trbs
) );
2610 assert ( iob_len ( iobuf
) >= sizeof ( *packet
) );
2611 packet
= iobuf
->data
;
2612 iob_pull ( iobuf
, sizeof ( *packet
) );
2613 setup
= &(trb
++)->setup
;
2614 memcpy ( &setup
->packet
, packet
, sizeof ( setup
->packet
) );
2615 setup
->len
= cpu_to_le32 ( sizeof ( *packet
) );
2616 setup
->flags
= XHCI_TRB_IDT
;
2617 setup
->type
= XHCI_TRB_SETUP
;
2618 len
= iob_len ( iobuf
);
2619 input
= ( packet
->request
& cpu_to_le16 ( USB_DIR_IN
) );
2621 setup
->direction
= ( input ? XHCI_SETUP_IN
: XHCI_SETUP_OUT
);
2623 /* Map I/O buffer */
2624 if ( ( rc
= iob_map ( iobuf
, xhci
->dma
, len
,
2625 ( input ? DMA_RX
: DMA_TX
) ) ) != 0 )
2628 /* Construct data stage TRB, if applicable */
2630 data
= &(trb
++)->data
;
2631 data
->data
= cpu_to_le64 ( iob_dma ( iobuf
) );
2632 data
->len
= cpu_to_le32 ( len
);
2633 data
->type
= XHCI_TRB_DATA
;
2634 data
->direction
= ( input ? XHCI_DATA_IN
: XHCI_DATA_OUT
);
2637 /* Construct status stage TRB */
2638 status
= &(trb
++)->status
;
2639 status
->flags
= XHCI_TRB_IOC
;
2640 status
->type
= XHCI_TRB_STATUS
;
2642 ( ( len
&& input
) ? XHCI_STATUS_OUT
: XHCI_STATUS_IN
);
2645 if ( ( rc
= xhci_enqueue_multi ( &endpoint
->ring
, iobuf
, trbs
,
2646 ( trb
- trbs
) ) ) != 0 )
2649 /* Ring the doorbell */
2650 xhci_doorbell ( &endpoint
->ring
);
2652 profile_stop ( &xhci_message_profiler
);
2656 iob_unmap ( iobuf
);
2662 * Calculate number of TRBs
2664 * @v len Length of data
2665 * @v zlp Append a zero-length packet
2666 * @ret count Number of transfer descriptors
2668 static unsigned int xhci_endpoint_count ( size_t len
, int zlp
) {
2671 /* Split into 64kB TRBs */
2672 count
= ( ( len
+ XHCI_MTU
- 1 ) / XHCI_MTU
);
2674 /* Append a zero-length TRB if applicable */
2675 if ( zlp
|| ( count
== 0 ) )
2682 * Enqueue stream transfer
2684 * @v ep USB endpoint
2685 * @v iobuf I/O buffer
2686 * @v zlp Append a zero-length packet
2687 * @ret rc Return status code
2689 static int xhci_endpoint_stream ( struct usb_endpoint
*ep
,
2690 struct io_buffer
*iobuf
, int zlp
) {
2691 struct xhci_endpoint
*endpoint
= usb_endpoint_get_hostdata ( ep
);
2692 struct xhci_device
*xhci
= endpoint
->xhci
;
2693 size_t len
= iob_len ( iobuf
);
2694 unsigned int count
= xhci_endpoint_count ( len
, zlp
);
2695 union xhci_trb trbs
[count
];
2696 union xhci_trb
*trb
= trbs
;
2697 struct xhci_trb_normal
*normal
;
2703 /* Profile stream transfers */
2704 profile_start ( &xhci_stream_profiler
);
2706 /* Map I/O buffer */
2707 if ( ( rc
= iob_map ( iobuf
, xhci
->dma
, len
,
2708 ( ( ep
->address
& USB_DIR_IN
) ?
2709 DMA_RX
: DMA_TX
) ) ) != 0 )
2711 data
= iob_dma ( iobuf
);
2713 /* Construct normal TRBs */
2714 memset ( &trbs
, 0, sizeof ( trbs
) );
2715 for ( i
= 0 ; i
< count
; i
++ ) {
2717 /* Calculate TRB length */
2719 if ( trb_len
> len
)
2722 /* Construct normal TRB */
2723 normal
= &trb
->normal
;
2724 normal
->data
= cpu_to_le64 ( data
);
2725 normal
->len
= cpu_to_le32 ( trb_len
);
2726 normal
->type
= XHCI_TRB_NORMAL
;
2727 normal
->flags
= XHCI_TRB_CH
;
2729 /* Move to next TRB */
2735 /* Mark zero-length packet (if present) as a separate transfer */
2736 if ( zlp
&& ( count
> 1 ) )
2737 trb
[-2].normal
.flags
= 0;
2739 /* Generate completion for final TRB */
2740 trb
[-1].normal
.flags
= XHCI_TRB_IOC
;
2743 if ( ( rc
= xhci_enqueue_multi ( &endpoint
->ring
, iobuf
, trbs
,
2747 /* Ring the doorbell */
2748 xhci_doorbell ( &endpoint
->ring
);
2750 profile_stop ( &xhci_stream_profiler
);
2754 iob_unmap ( iobuf
);
2759 /******************************************************************************
2763 ******************************************************************************
2770 * @ret rc Return status code
2772 static int xhci_device_open ( struct usb_device
*usb
) {
2773 struct xhci_device
*xhci
= usb_bus_get_hostdata ( usb
->port
->hub
->bus
);
2774 struct usb_port
*tt
= usb_transaction_translator ( usb
);
2775 struct xhci_slot
*slot
;
2776 struct xhci_slot
*tt_slot
;
2782 /* Determine applicable slot type */
2783 type
= xhci_port_slot_type ( xhci
, usb
->port
->address
);
2786 DBGC ( xhci
, "XHCI %s-%d has no slot type\n",
2787 xhci
->name
, usb
->port
->address
);
2791 /* Allocate a device slot number */
2792 id
= xhci_enable_slot ( xhci
, type
);
2795 goto err_enable_slot
;
2797 assert ( ( id
> 0 ) && ( ( unsigned int ) id
<= xhci
->slots
) );
2798 assert ( xhci
->slot
[id
] == NULL
);
2800 /* Allocate and initialise structure */
2801 slot
= zalloc ( sizeof ( *slot
) );
2806 usb_set_hostdata ( usb
, slot
);
2807 xhci
->slot
[id
] = slot
;
2812 tt_slot
= usb_get_hostdata ( tt
->hub
->usb
);
2813 slot
->tt_id
= tt_slot
->id
;
2814 slot
->tt_port
= tt
->address
;
2817 /* Allocate a device context */
2818 len
= xhci_device_context_offset ( xhci
, XHCI_CTX_END
);
2819 slot
->context
= dma_alloc ( xhci
->dma
, &slot
->map
, len
,
2820 xhci_align ( len
) );
2821 if ( ! slot
->context
) {
2823 goto err_alloc_context
;
2825 memset ( slot
->context
, 0, len
);
2827 /* Set device context base address */
2828 assert ( xhci
->dcbaa
.context
[id
] == 0 );
2829 xhci
->dcbaa
.context
[id
] = cpu_to_le64 ( dma ( &slot
->map
,
2832 DBGC2 ( xhci
, "XHCI %s slot %d device context [%08lx,%08lx) for %s\n",
2833 xhci
->name
, slot
->id
, virt_to_phys ( slot
->context
),
2834 ( virt_to_phys ( slot
->context
) + len
), usb
->name
);
2837 xhci
->dcbaa
.context
[id
] = 0;
2838 dma_free ( &slot
->map
, slot
->context
, len
);
2840 xhci
->slot
[id
] = NULL
;
2843 xhci_disable_slot ( xhci
, id
);
2854 static void xhci_device_close ( struct usb_device
*usb
) {
2855 struct xhci_slot
*slot
= usb_get_hostdata ( usb
);
2856 struct xhci_device
*xhci
= slot
->xhci
;
2857 size_t len
= xhci_device_context_offset ( xhci
, XHCI_CTX_END
);
2858 unsigned int id
= slot
->id
;
2862 if ( ( rc
= xhci_disable_slot ( xhci
, id
) ) != 0 ) {
2863 /* Slot is still enabled. Leak the slot context,
2864 * since the controller may still write to this
2865 * memory, and leave the DCBAA entry intact.
2867 * If the controller later reports that this same slot
2868 * has been re-enabled, then some assertions will be
2871 DBGC ( xhci
, "XHCI %s slot %d leaking context memory\n",
2872 xhci
->name
, slot
->id
);
2873 slot
->context
= NULL
;
2877 if ( slot
->context
) {
2878 dma_free ( &slot
->map
, slot
->context
, len
);
2879 xhci
->dcbaa
.context
[id
] = 0;
2881 xhci
->slot
[id
] = NULL
;
2886 * Assign device address
2889 * @ret rc Return status code
2891 static int xhci_device_address ( struct usb_device
*usb
) {
2892 struct xhci_slot
*slot
= usb_get_hostdata ( usb
);
2893 struct xhci_device
*xhci
= slot
->xhci
;
2894 struct usb_port
*root_port
;
2898 /* Calculate route string */
2899 slot
->route
= usb_route_string ( usb
);
2901 /* Calculate root hub port number */
2902 root_port
= usb_root_hub_port ( usb
);
2903 slot
->port
= root_port
->address
;
2905 /* Calculate protocol speed ID */
2906 psiv
= xhci_port_psiv ( xhci
, slot
->port
, usb
->speed
);
2913 /* Address device */
2914 if ( ( rc
= xhci_address_device ( xhci
, slot
) ) != 0 )
2920 /******************************************************************************
2924 ******************************************************************************
2931 * @ret rc Return status code
2933 static int xhci_bus_open ( struct usb_bus
*bus
) {
2934 struct xhci_device
*xhci
= usb_bus_get_hostdata ( bus
);
2937 /* Allocate device slot array */
2938 xhci
->slot
= zalloc ( ( xhci
->slots
+ 1 ) * sizeof ( xhci
->slot
[0] ) );
2939 if ( ! xhci
->slot
) {
2941 goto err_slot_alloc
;
2944 /* Allocate device context base address array */
2945 if ( ( rc
= xhci_dcbaa_alloc ( xhci
) ) != 0 )
2946 goto err_dcbaa_alloc
;
2948 /* Allocate scratchpad buffers */
2949 if ( ( rc
= xhci_scratchpad_alloc ( xhci
) ) != 0 )
2950 goto err_scratchpad_alloc
;
2952 /* Allocate command ring */
2953 if ( ( rc
= xhci_command_alloc ( xhci
) ) != 0 )
2954 goto err_command_alloc
;
2956 /* Allocate event ring */
2957 if ( ( rc
= xhci_event_alloc ( xhci
) ) != 0 )
2958 goto err_event_alloc
;
2960 /* Start controller */
2966 xhci_event_free ( xhci
);
2968 xhci_command_free ( xhci
);
2970 xhci_scratchpad_free ( xhci
);
2971 err_scratchpad_alloc
:
2972 xhci_dcbaa_free ( xhci
);
2974 free ( xhci
->slot
);
2984 static void xhci_bus_close ( struct usb_bus
*bus
) {
2985 struct xhci_device
*xhci
= usb_bus_get_hostdata ( bus
);
2989 assert ( xhci
->slot
!= NULL
);
2990 for ( i
= 0 ; i
<= xhci
->slots
; i
++ )
2991 assert ( xhci
->slot
[i
] == NULL
);
2994 xhci_event_free ( xhci
);
2995 xhci_command_free ( xhci
);
2996 xhci_scratchpad_free ( xhci
);
2997 xhci_dcbaa_free ( xhci
);
2998 free ( xhci
->slot
);
3006 static void xhci_bus_poll ( struct usb_bus
*bus
) {
3007 struct xhci_device
*xhci
= usb_bus_get_hostdata ( bus
);
3009 /* Poll event ring */
3010 xhci_event_poll ( xhci
);
3013 /******************************************************************************
3017 ******************************************************************************
3024 * @ret rc Return status code
3026 static int xhci_hub_open ( struct usb_hub
*hub
) {
3027 struct xhci_slot
*slot
;
3029 /* Do nothing if this is the root hub */
3033 /* Get device slot */
3034 slot
= usb_get_hostdata ( hub
->usb
);
3036 /* Update device slot hub parameters. We don't inform the
3037 * hardware of this information until the hub's interrupt
3038 * endpoint is opened, since the only mechanism for so doing
3039 * provided by the xHCI specification is a Configure Endpoint
3040 * command, and we can't issue that command until we have a
3041 * non-EP0 endpoint to configure.
3043 slot
->ports
= hub
->ports
;
3053 static void xhci_hub_close ( struct usb_hub
*hub __unused
) {
3058 /******************************************************************************
3060 * Root hub operations
3062 ******************************************************************************
3069 * @ret rc Return status code
3071 static int xhci_root_open ( struct usb_hub
*hub
) {
3072 struct xhci_device
*xhci
= usb_hub_get_drvdata ( hub
);
3073 struct usb_port
*port
;
3077 /* Enable power to all ports */
3078 for ( i
= 1 ; i
<= xhci
->ports
; i
++ ) {
3079 portsc
= readl ( xhci
->op
+ XHCI_OP_PORTSC ( i
) );
3080 portsc
&= XHCI_PORTSC_PRESERVE
;
3081 portsc
|= XHCI_PORTSC_PP
;
3082 writel ( portsc
, xhci
->op
+ XHCI_OP_PORTSC ( i
) );
3085 /* xHCI spec requires us to potentially wait 20ms after
3086 * enabling power to a port.
3088 mdelay ( XHCI_PORT_POWER_DELAY_MS
);
3090 /* USB3 ports may power up as Disabled */
3091 for ( i
= 1 ; i
<= xhci
->ports
; i
++ ) {
3092 portsc
= readl ( xhci
->op
+ XHCI_OP_PORTSC ( i
) );
3093 port
= usb_port ( hub
, i
);
3094 if ( ( port
->protocol
>= USB_PROTO_3_0
) &&
3095 ( ( portsc
& XHCI_PORTSC_PLS_MASK
) ==
3096 XHCI_PORTSC_PLS_DISABLED
) ) {
3097 /* Force link state to RxDetect */
3098 portsc
&= XHCI_PORTSC_PRESERVE
;
3099 portsc
|= ( XHCI_PORTSC_PLS_RXDETECT
| XHCI_PORTSC_LWS
);
3100 writel ( portsc
, xhci
->op
+ XHCI_OP_PORTSC ( i
) );
3104 /* Some xHCI cards seem to require an additional delay after
3105 * setting the link state to RxDetect.
3107 mdelay ( XHCI_LINK_STATE_DELAY_MS
);
3117 static void xhci_root_close ( struct usb_hub
*hub __unused
) {
3127 * @ret rc Return status code
3129 static int xhci_root_enable ( struct usb_hub
*hub
, struct usb_port
*port
) {
3130 struct xhci_device
*xhci
= usb_hub_get_drvdata ( hub
);
3135 portsc
= readl ( xhci
->op
+ XHCI_OP_PORTSC ( port
->address
) );
3136 portsc
&= XHCI_PORTSC_PRESERVE
;
3137 portsc
|= XHCI_PORTSC_PR
;
3138 writel ( portsc
, xhci
->op
+ XHCI_OP_PORTSC ( port
->address
) );
3140 /* Wait for port to become enabled */
3141 for ( i
= 0 ; i
< XHCI_PORT_RESET_MAX_WAIT_MS
; i
++ ) {
3143 /* Check port status */
3144 portsc
= readl ( xhci
->op
+ XHCI_OP_PORTSC ( port
->address
) );
3145 if ( portsc
& XHCI_PORTSC_PED
)
3152 DBGC ( xhci
, "XHCI %s-%d timed out waiting for port to enable\n",
3153 xhci
->name
, port
->address
);
3162 * @ret rc Return status code
3164 static int xhci_root_disable ( struct usb_hub
*hub
, struct usb_port
*port
) {
3165 struct xhci_device
*xhci
= usb_hub_get_drvdata ( hub
);
3169 portsc
= readl ( xhci
->op
+ XHCI_OP_PORTSC ( port
->address
) );
3170 portsc
&= XHCI_PORTSC_PRESERVE
;
3171 portsc
|= XHCI_PORTSC_PED
;
3172 writel ( portsc
, xhci
->op
+ XHCI_OP_PORTSC ( port
->address
) );
3174 /* Allow time for link state to stabilise */
3175 mdelay ( XHCI_LINK_STATE_DELAY_MS
);
3177 /* Set link state to RxDetect for USB3 ports */
3178 if ( port
->protocol
>= USB_PROTO_3_0
) {
3179 portsc
&= XHCI_PORTSC_PRESERVE
;
3180 portsc
|= ( XHCI_PORTSC_PLS_RXDETECT
| XHCI_PORTSC_LWS
);
3181 writel ( portsc
, xhci
->op
+ XHCI_OP_PORTSC ( port
->address
) );
3184 /* Allow time for link state to stabilise */
3185 mdelay ( XHCI_LINK_STATE_DELAY_MS
);
3191 * Update root hub port speed
3195 * @ret rc Return status code
3197 static int xhci_root_speed ( struct usb_hub
*hub
, struct usb_port
*port
) {
3198 struct xhci_device
*xhci
= usb_hub_get_drvdata ( hub
);
3207 /* Read port status */
3208 portsc
= readl ( xhci
->op
+ XHCI_OP_PORTSC ( port
->address
) );
3209 DBGC2 ( xhci
, "XHCI %s-%d status is %08x\n",
3210 xhci
->name
, port
->address
, portsc
);
3211 ccs
= ( portsc
& XHCI_PORTSC_CCS
);
3212 ped
= ( portsc
& XHCI_PORTSC_PED
);
3213 csc
= ( portsc
& XHCI_PORTSC_CSC
);
3214 psiv
= XHCI_PORTSC_PSIV ( portsc
);
3216 /* Record disconnections and clear changes */
3217 port
->disconnected
|= csc
;
3218 portsc
&= ( XHCI_PORTSC_PRESERVE
| XHCI_PORTSC_CHANGE
);
3219 writel ( portsc
, xhci
->op
+ XHCI_OP_PORTSC ( port
->address
) );
3221 /* Port speed is not valid unless port is connected */
3223 port
->speed
= USB_SPEED_NONE
;
3227 /* For USB2 ports, the PSIV field is not valid until the port
3228 * completes reset and becomes enabled.
3230 if ( ( port
->protocol
< USB_PROTO_3_0
) && ! ped
) {
3231 port
->speed
= USB_SPEED_FULL
;
3235 /* Get port speed and map to generic USB speed */
3236 speed
= xhci_port_speed ( xhci
, port
->address
, psiv
);
3242 port
->speed
= speed
;
3247 * Clear transaction translator buffer
3251 * @v ep USB endpoint
3252 * @ret rc Return status code
3254 static int xhci_root_clear_tt ( struct usb_hub
*hub
, struct usb_port
*port
,
3255 struct usb_endpoint
*ep
) {
3256 struct xhci_device
*xhci
= usb_hub_get_drvdata ( hub
);
3258 /* Should never be called; this is a root hub */
3259 DBGC ( xhci
, "XHCI %s-%d nonsensical CLEAR_TT for %s %s\n", xhci
->name
,
3260 port
->address
, ep
->usb
->name
, usb_endpoint_name ( ep
) );
3265 /******************************************************************************
3269 ******************************************************************************
3272 /** USB host controller operations */
3273 static struct usb_host_operations xhci_operations
= {
3275 .open
= xhci_endpoint_open
,
3276 .close
= xhci_endpoint_close
,
3277 .reset
= xhci_endpoint_reset
,
3278 .mtu
= xhci_endpoint_mtu
,
3279 .message
= xhci_endpoint_message
,
3280 .stream
= xhci_endpoint_stream
,
3283 .open
= xhci_device_open
,
3284 .close
= xhci_device_close
,
3285 .address
= xhci_device_address
,
3288 .open
= xhci_bus_open
,
3289 .close
= xhci_bus_close
,
3290 .poll
= xhci_bus_poll
,
3293 .open
= xhci_hub_open
,
3294 .close
= xhci_hub_close
,
3297 .open
= xhci_root_open
,
3298 .close
= xhci_root_close
,
3299 .enable
= xhci_root_enable
,
3300 .disable
= xhci_root_disable
,
3301 .speed
= xhci_root_speed
,
3302 .clear_tt
= xhci_root_clear_tt
,
3307 * Fix Intel PCH-specific quirks
3309 * @v xhci xHCI device
3312 static void xhci_pch_fix ( struct xhci_device
*xhci
, struct pci_device
*pci
) {
3313 struct xhci_pch
*pch
= &xhci
->pch
;
3319 /* Enable SuperSpeed capability. Do this before rerouting
3320 * USB2 ports, so that USB3 devices connect at SuperSpeed.
3322 pci_read_config_dword ( pci
, XHCI_PCH_USB3PSSEN
, &usb3pssen
);
3323 pci_read_config_dword ( pci
, XHCI_PCH_USB3PRM
, &usb3prm
);
3324 if ( usb3prm
& ~usb3pssen
) {
3325 DBGC ( xhci
, "XHCI %s enabling SuperSpeed on ports %08x\n",
3326 xhci
->name
, ( usb3prm
& ~usb3pssen
) );
3328 pch
->usb3pssen
= usb3pssen
;
3329 usb3pssen
|= usb3prm
;
3330 pci_write_config_dword ( pci
, XHCI_PCH_USB3PSSEN
, usb3pssen
);
3332 /* Route USB2 ports from EHCI to xHCI */
3333 pci_read_config_dword ( pci
, XHCI_PCH_XUSB2PR
, &xusb2pr
);
3334 pci_read_config_dword ( pci
, XHCI_PCH_XUSB2PRM
, &xusb2prm
);
3335 if ( xusb2prm
& ~xusb2pr
) {
3336 DBGC ( xhci
, "XHCI %s routing ports %08x from EHCI to xHCI\n",
3337 xhci
->name
, ( xusb2prm
& ~xusb2pr
) );
3339 pch
->xusb2pr
= xusb2pr
;
3340 xusb2pr
|= xusb2prm
;
3341 pci_write_config_dword ( pci
, XHCI_PCH_XUSB2PR
, xusb2pr
);
3345 * Undo Intel PCH-specific quirk fixes
3347 * @v xhci xHCI device
3350 static void xhci_pch_undo ( struct xhci_device
*xhci
, struct pci_device
*pci
) {
3351 struct xhci_pch
*pch
= &xhci
->pch
;
3353 /* Restore USB2 port routing to original state */
3354 pci_write_config_dword ( pci
, XHCI_PCH_XUSB2PR
, pch
->xusb2pr
);
3356 /* Restore SuperSpeed capability to original state */
3357 pci_write_config_dword ( pci
, XHCI_PCH_USB3PSSEN
, pch
->usb3pssen
);
3364 * @ret rc Return status code
3366 static int xhci_probe ( struct pci_device
*pci
) {
3367 struct xhci_device
*xhci
;
3368 struct usb_port
*port
;
3369 unsigned long bar_start
;
3374 /* Allocate and initialise structure */
3375 xhci
= zalloc ( sizeof ( *xhci
) );
3380 xhci
->name
= pci
->dev
.name
;
3381 xhci
->quirks
= pci
->id
->driver_data
;
3383 /* Fix up PCI device */
3384 adjust_pci_device ( pci
);
3387 bar_start
= pci_bar_start ( pci
, XHCI_BAR
);
3388 bar_size
= pci_bar_size ( pci
, XHCI_BAR
);
3389 xhci
->regs
= pci_ioremap ( pci
, bar_start
, bar_size
);
3390 if ( ! xhci
->regs
) {
3395 /* Initialise xHCI device */
3396 xhci_init ( xhci
, xhci
->regs
);
3398 /* Configure DMA device */
3399 xhci
->dma
= &pci
->dma
;
3401 dma_set_mask_64bit ( xhci
->dma
);
3403 /* Initialise USB legacy support and claim ownership */
3404 xhci_legacy_init ( xhci
);
3405 xhci_legacy_claim ( xhci
);
3407 /* Fix Intel PCH-specific quirks, if applicable */
3408 if ( xhci
->quirks
& XHCI_PCH
)
3409 xhci_pch_fix ( xhci
, pci
);
3412 if ( ( rc
= xhci_reset ( xhci
) ) != 0 )
3415 /* Allocate USB bus */
3416 xhci
->bus
= alloc_usb_bus ( &pci
->dev
, xhci
->ports
, XHCI_MTU
,
3418 if ( ! xhci
->bus
) {
3422 usb_bus_set_hostdata ( xhci
->bus
, xhci
);
3423 usb_hub_set_drvdata ( xhci
->bus
->hub
, xhci
);
3425 /* Set port protocols */
3426 for ( i
= 1 ; i
<= xhci
->ports
; i
++ ) {
3427 port
= usb_port ( xhci
->bus
->hub
, i
);
3428 port
->protocol
= xhci_port_protocol ( xhci
, i
);
3431 /* Register USB bus */
3432 if ( ( rc
= register_usb_bus ( xhci
->bus
) ) != 0 )
3435 pci_set_drvdata ( pci
, xhci
);
3438 unregister_usb_bus ( xhci
->bus
);
3440 free_usb_bus ( xhci
->bus
);
3442 xhci_reset ( xhci
);
3444 if ( xhci
->quirks
& XHCI_PCH
)
3445 xhci_pch_undo ( xhci
, pci
);
3446 xhci_legacy_release ( xhci
);
3447 iounmap ( xhci
->regs
);
3459 static void xhci_remove ( struct pci_device
*pci
) {
3460 struct xhci_device
*xhci
= pci_get_drvdata ( pci
);
3461 struct usb_bus
*bus
= xhci
->bus
;
3464 /* Some systems are observed to disable bus mastering on
3465 * Thunderbolt controllers before we get a chance to shut
3466 * down. Detect this and avoid attempting any DMA operations,
3467 * which are guaranteed to fail and may end up spuriously
3468 * completing after the operating system kernel starts up.
3470 pci_read_config_word ( pci
, PCI_COMMAND
, &command
);
3471 if ( ! ( command
& PCI_COMMAND_MASTER
) ) {
3472 DBGC ( xhci
, "XHCI %s DMA was disabled\n", xhci
->name
);
3476 /* Unregister and free USB bus */
3477 unregister_usb_bus ( bus
);
3478 free_usb_bus ( bus
);
3480 /* Reset device and undo any PCH-specific fixes */
3481 xhci_reset ( xhci
);
3482 if ( xhci
->quirks
& XHCI_PCH
)
3483 xhci_pch_undo ( xhci
, pci
);
3485 /* Release ownership back to BIOS */
3486 xhci_legacy_release ( xhci
);
3488 /* Unmap registers */
3489 iounmap ( xhci
->regs
);
3495 /** XHCI PCI device IDs */
3496 static struct pci_device_id xhci_ids
[] = {
3497 PCI_ROM ( 0x8086, 0x9d2f, "xhci-skylake", "xHCI (Skylake)", ( XHCI_PCH
| XHCI_BAD_PSIV
) ),
3498 PCI_ROM ( 0x8086, 0xffff, "xhci-pch", "xHCI (Intel PCH)", XHCI_PCH
),
3499 PCI_ROM ( 0xffff, 0xffff, "xhci", "xHCI", 0 ),
3502 /** XHCI PCI driver */
3503 struct pci_driver xhci_driver __pci_driver
= {
3505 .id_count
= ( sizeof ( xhci_ids
) / sizeof ( xhci_ids
[0] ) ),
3506 .class = PCI_CLASS_ID ( PCI_CLASS_SERIAL
, PCI_CLASS_SERIAL_USB
,
3507 PCI_CLASS_SERIAL_USB_XHCI
),
3508 .probe
= xhci_probe
,
3509 .remove
= xhci_remove
,
3515 * @v booting System is shutting down for OS boot
3517 static void xhci_shutdown ( int booting
) {
3518 /* If we are shutting down to boot an OS, then prevent the
3519 * release of ownership back to BIOS.
3521 xhci_legacy_prevent_release
= booting
;
3524 /** Startup/shutdown function */
3525 struct startup_fn xhci_startup
__startup_fn ( STARTUP_LATE
) = {
3527 .shutdown
= xhci_shutdown
,