2 * Copyright (C) 2014 Michael Brown <mbrown@fensystems.co.uk>.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * You can also choose to distribute this program under the terms of
20 * the Unmodified Binary Distribution Licence (as given in the file
21 * COPYING.UBDL), provided that you have satisfied its requirements.
24 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL
);
33 #include <ipxe/malloc.h>
34 #include <ipxe/umalloc.h>
37 #include <ipxe/init.h>
38 #include <ipxe/profile.h>
43 * USB eXtensible Host Controller Interface (xHCI) driver
47 /** Message transfer profiler */
48 static struct profiler xhci_message_profiler __profiler
=
49 { .name
= "xhci.message" };
51 /** Stream transfer profiler */
52 static struct profiler xhci_stream_profiler __profiler
=
53 { .name
= "xhci.stream" };
55 /** Event ring profiler */
56 static struct profiler xhci_event_profiler __profiler
=
57 { .name
= "xhci.event" };
59 /** Transfer event profiler */
60 static struct profiler xhci_transfer_profiler __profiler
=
61 { .name
= "xhci.transfer" };
63 /* Disambiguate the various error causes */
65 __einfo_error ( EINFO_EIO_DATA )
66 #define EINFO_EIO_DATA \
67 __einfo_uniqify ( EINFO_EIO, ( 2 - 0 ), \
70 __einfo_error ( EINFO_EIO_BABBLE )
71 #define EINFO_EIO_BABBLE \
72 __einfo_uniqify ( EINFO_EIO, ( 3 - 0 ), \
75 __einfo_error ( EINFO_EIO_USB )
76 #define EINFO_EIO_USB \
77 __einfo_uniqify ( EINFO_EIO, ( 4 - 0 ), \
78 "USB transaction error" )
80 __einfo_error ( EINFO_EIO_TRB )
81 #define EINFO_EIO_TRB \
82 __einfo_uniqify ( EINFO_EIO, ( 5 - 0 ), \
85 __einfo_error ( EINFO_EIO_STALL )
86 #define EINFO_EIO_STALL \
87 __einfo_uniqify ( EINFO_EIO, ( 6 - 0 ), \
89 #define EIO_RESOURCE \
90 __einfo_error ( EINFO_EIO_RESOURCE )
91 #define EINFO_EIO_RESOURCE \
92 __einfo_uniqify ( EINFO_EIO, ( 7 - 0 ), \
94 #define EIO_BANDWIDTH \
95 __einfo_error ( EINFO_EIO_BANDWIDTH )
96 #define EINFO_EIO_BANDWIDTH \
97 __einfo_uniqify ( EINFO_EIO, ( 8 - 0 ), \
99 #define EIO_NO_SLOTS \
100 __einfo_error ( EINFO_EIO_NO_SLOTS )
101 #define EINFO_EIO_NO_SLOTS \
102 __einfo_uniqify ( EINFO_EIO, ( 9 - 0 ), \
103 "No slots available" )
104 #define EIO_STREAM_TYPE \
105 __einfo_error ( EINFO_EIO_STREAM_TYPE )
106 #define EINFO_EIO_STREAM_TYPE \
107 __einfo_uniqify ( EINFO_EIO, ( 10 - 0 ), \
108 "Invalid stream type" )
110 __einfo_error ( EINFO_EIO_SLOT )
111 #define EINFO_EIO_SLOT \
112 __einfo_uniqify ( EINFO_EIO, ( 11 - 0 ), \
114 #define EIO_ENDPOINT \
115 __einfo_error ( EINFO_EIO_ENDPOINT )
116 #define EINFO_EIO_ENDPOINT \
117 __einfo_uniqify ( EINFO_EIO, ( 12 - 0 ), \
118 "Endpoint not enabled" )
120 __einfo_error ( EINFO_EIO_SHORT )
121 #define EINFO_EIO_SHORT \
122 __einfo_uniqify ( EINFO_EIO, ( 13 - 0 ), \
124 #define EIO_UNDERRUN \
125 __einfo_error ( EINFO_EIO_UNDERRUN )
126 #define EINFO_EIO_UNDERRUN \
127 __einfo_uniqify ( EINFO_EIO, ( 14 - 0 ), \
129 #define EIO_OVERRUN \
130 __einfo_error ( EINFO_EIO_OVERRUN )
131 #define EINFO_EIO_OVERRUN \
132 __einfo_uniqify ( EINFO_EIO, ( 15 - 0 ), \
134 #define EIO_VF_RING_FULL \
135 __einfo_error ( EINFO_EIO_VF_RING_FULL )
136 #define EINFO_EIO_VF_RING_FULL \
137 __einfo_uniqify ( EINFO_EIO, ( 16 - 0 ), \
138 "Virtual function event ring full" )
139 #define EIO_PARAMETER \
140 __einfo_error ( EINFO_EIO_PARAMETER )
141 #define EINFO_EIO_PARAMETER \
142 __einfo_uniqify ( EINFO_EIO, ( 17 - 0 ), \
144 #define EIO_BANDWIDTH_OVERRUN \
145 __einfo_error ( EINFO_EIO_BANDWIDTH_OVERRUN )
146 #define EINFO_EIO_BANDWIDTH_OVERRUN \
147 __einfo_uniqify ( EINFO_EIO, ( 18 - 0 ), \
148 "Bandwidth overrun" )
149 #define EIO_CONTEXT \
150 __einfo_error ( EINFO_EIO_CONTEXT )
151 #define EINFO_EIO_CONTEXT \
152 __einfo_uniqify ( EINFO_EIO, ( 19 - 0 ), \
153 "Context state error" )
154 #define EIO_NO_PING \
155 __einfo_error ( EINFO_EIO_NO_PING )
156 #define EINFO_EIO_NO_PING \
157 __einfo_uniqify ( EINFO_EIO, ( 20 - 0 ), \
159 #define EIO_RING_FULL \
160 __einfo_error ( EINFO_EIO_RING_FULL )
161 #define EINFO_EIO_RING_FULL \
162 __einfo_uniqify ( EINFO_EIO, ( 21 - 0 ), \
164 #define EIO_INCOMPATIBLE \
165 __einfo_error ( EINFO_EIO_INCOMPATIBLE )
166 #define EINFO_EIO_INCOMPATIBLE \
167 __einfo_uniqify ( EINFO_EIO, ( 22 - 0 ), \
168 "Incompatible device" )
170 __einfo_error ( EINFO_EIO_MISSED )
171 #define EINFO_EIO_MISSED \
172 __einfo_uniqify ( EINFO_EIO, ( 23 - 0 ), \
173 "Missed service error" )
174 #define EIO_CMD_STOPPED \
175 __einfo_error ( EINFO_EIO_CMD_STOPPED )
176 #define EINFO_EIO_CMD_STOPPED \
177 __einfo_uniqify ( EINFO_EIO, ( 24 - 0 ), \
178 "Command ring stopped" )
179 #define EIO_CMD_ABORTED \
180 __einfo_error ( EINFO_EIO_CMD_ABORTED )
181 #define EINFO_EIO_CMD_ABORTED \
182 __einfo_uniqify ( EINFO_EIO, ( 25 - 0 ), \
185 __einfo_error ( EINFO_EIO_STOP )
186 #define EINFO_EIO_STOP \
187 __einfo_uniqify ( EINFO_EIO, ( 26 - 0 ), \
189 #define EIO_STOP_LEN \
190 __einfo_error ( EINFO_EIO_STOP_LEN )
191 #define EINFO_EIO_STOP_LEN \
192 __einfo_uniqify ( EINFO_EIO, ( 27 - 0 ), \
193 "Stopped - length invalid" )
194 #define EIO_STOP_SHORT \
195 __einfo_error ( EINFO_EIO_STOP_SHORT )
196 #define EINFO_EIO_STOP_SHORT \
197 __einfo_uniqify ( EINFO_EIO, ( 28 - 0 ), \
198 "Stopped - short packet" )
199 #define EIO_LATENCY \
200 __einfo_error ( EINFO_EIO_LATENCY )
201 #define EINFO_EIO_LATENCY \
202 __einfo_uniqify ( EINFO_EIO, ( 29 - 0 ), \
203 "Maximum exit latency too large" )
205 __einfo_error ( EINFO_EIO_ISOCH )
206 #define EINFO_EIO_ISOCH \
207 __einfo_uniqify ( EINFO_EIO, ( 31 - 0 ), \
208 "Isochronous buffer overrun" )
209 #define EPROTO_LOST \
210 __einfo_error ( EINFO_EPROTO_LOST )
211 #define EINFO_EPROTO_LOST \
212 __einfo_uniqify ( EINFO_EPROTO, ( 32 - 32 ), \
214 #define EPROTO_UNDEFINED \
215 __einfo_error ( EINFO_EPROTO_UNDEFINED )
216 #define EINFO_EPROTO_UNDEFINED \
217 __einfo_uniqify ( EINFO_EPROTO, ( 33 - 32 ), \
219 #define EPROTO_STREAM_ID \
220 __einfo_error ( EINFO_EPROTO_STREAM_ID )
221 #define EINFO_EPROTO_STREAM_ID \
222 __einfo_uniqify ( EINFO_EPROTO, ( 34 - 32 ), \
223 "Invalid stream ID" )
224 #define EPROTO_SECONDARY \
225 __einfo_error ( EINFO_EPROTO_SECONDARY )
226 #define EINFO_EPROTO_SECONDARY \
227 __einfo_uniqify ( EINFO_EPROTO, ( 35 - 32 ), \
228 "Secondary bandwidth error" )
229 #define EPROTO_SPLIT \
230 __einfo_error ( EINFO_EPROTO_SPLIT )
231 #define EINFO_EPROTO_SPLIT \
232 __einfo_uniqify ( EINFO_EPROTO, ( 36 - 32 ), \
233 "Split transaction error" )
234 #define ECODE(code) \
235 ( ( (code) < 32 ) ? \
236 EUNIQ ( EINFO_EIO, ( (code) & 31 ), EIO_DATA, EIO_BABBLE, \
237 EIO_USB, EIO_TRB, EIO_STALL, EIO_RESOURCE, \
238 EIO_BANDWIDTH, EIO_NO_SLOTS, EIO_STREAM_TYPE, \
239 EIO_SLOT, EIO_ENDPOINT, EIO_SHORT, EIO_UNDERRUN, \
240 EIO_OVERRUN, EIO_VF_RING_FULL, EIO_PARAMETER, \
241 EIO_BANDWIDTH_OVERRUN, EIO_CONTEXT, EIO_NO_PING, \
242 EIO_RING_FULL, EIO_INCOMPATIBLE, EIO_MISSED, \
243 EIO_CMD_STOPPED, EIO_CMD_ABORTED, EIO_STOP, \
244 EIO_STOP_LEN, EIO_STOP_SHORT, EIO_LATENCY, \
247 EUNIQ ( EINFO_EPROTO, ( (code) & 31 ), EPROTO_LOST, \
248 EPROTO_UNDEFINED, EPROTO_STREAM_ID, \
249 EPROTO_SECONDARY, EPROTO_SPLIT ) : \
252 /******************************************************************************
256 ******************************************************************************
262 * @v xhci xHCI device
263 * @v regs MMIO registers
265 static void xhci_init ( struct xhci_device
*xhci
, void *regs
) {
274 /* Locate capability, operational, runtime, and doorbell registers */
276 caplength
= readb ( xhci
->cap
+ XHCI_CAP_CAPLENGTH
);
277 rtsoff
= readl ( xhci
->cap
+ XHCI_CAP_RTSOFF
);
278 dboff
= readl ( xhci
->cap
+ XHCI_CAP_DBOFF
);
279 xhci
->op
= ( xhci
->cap
+ caplength
);
280 xhci
->run
= ( xhci
->cap
+ rtsoff
);
281 xhci
->db
= ( xhci
->cap
+ dboff
);
282 DBGC2 ( xhci
, "XHCI %s cap %08lx op %08lx run %08lx db %08lx\n",
283 xhci
->name
, virt_to_phys ( xhci
->cap
),
284 virt_to_phys ( xhci
->op
), virt_to_phys ( xhci
->run
),
285 virt_to_phys ( xhci
->db
) );
287 /* Read structural parameters 1 */
288 hcsparams1
= readl ( xhci
->cap
+ XHCI_CAP_HCSPARAMS1
);
289 xhci
->slots
= XHCI_HCSPARAMS1_SLOTS ( hcsparams1
);
290 xhci
->intrs
= XHCI_HCSPARAMS1_INTRS ( hcsparams1
);
291 xhci
->ports
= XHCI_HCSPARAMS1_PORTS ( hcsparams1
);
292 DBGC ( xhci
, "XHCI %s has %d slots %d intrs %d ports\n",
293 xhci
->name
, xhci
->slots
, xhci
->intrs
, xhci
->ports
);
295 /* Read structural parameters 2 */
296 hcsparams2
= readl ( xhci
->cap
+ XHCI_CAP_HCSPARAMS2
);
297 xhci
->scratchpads
= XHCI_HCSPARAMS2_SCRATCHPADS ( hcsparams2
);
298 DBGC2 ( xhci
, "XHCI %s needs %d scratchpads\n",
299 xhci
->name
, xhci
->scratchpads
);
301 /* Read capability parameters 1 */
302 hccparams1
= readl ( xhci
->cap
+ XHCI_CAP_HCCPARAMS1
);
303 xhci
->addr64
= XHCI_HCCPARAMS1_ADDR64 ( hccparams1
);
304 xhci
->csz_shift
= XHCI_HCCPARAMS1_CSZ_SHIFT ( hccparams1
);
305 xhci
->xecp
= XHCI_HCCPARAMS1_XECP ( hccparams1
);
308 pagesize
= readl ( xhci
->op
+ XHCI_OP_PAGESIZE
);
309 xhci
->pagesize
= XHCI_PAGESIZE ( pagesize
);
310 assert ( xhci
->pagesize
!= 0 );
311 assert ( ( ( xhci
->pagesize
) & ( xhci
->pagesize
- 1 ) ) == 0 );
312 DBGC2 ( xhci
, "XHCI %s page size %zd bytes\n",
313 xhci
->name
, xhci
->pagesize
);
317 * Find extended capability
319 * @v xhci xHCI device
320 * @v id Capability ID
321 * @v offset Offset to previous extended capability instance, or zero
322 * @ret offset Offset to extended capability, or zero if not found
324 static unsigned int xhci_extended_capability ( struct xhci_device
*xhci
,
326 unsigned int offset
) {
330 /* Locate the extended capability */
333 /* Locate first or next capability as applicable */
335 xecp
= readl ( xhci
->cap
+ offset
);
336 next
= XHCI_XECP_NEXT ( xecp
);
344 /* Check if this is the requested capability */
345 xecp
= readl ( xhci
->cap
+ offset
);
346 if ( XHCI_XECP_ID ( xecp
) == id
)
352 * Write potentially 64-bit register
354 * @v xhci xHCI device
356 * @v reg Register address
357 * @ret rc Return status code
359 static inline __attribute__ (( always_inline
)) int
360 xhci_writeq ( struct xhci_device
*xhci
, physaddr_t value
, void *reg
) {
362 /* If this is a 32-bit build, then this can never fail
363 * (allowing the compiler to optimise out the error path).
365 if ( sizeof ( value
) <= sizeof ( uint32_t ) ) {
366 writel ( value
, reg
);
367 writel ( 0, ( reg
+ sizeof ( uint32_t ) ) );
371 /* If the device does not support 64-bit addresses and this
372 * address is outside the 32-bit address space, then fail.
374 if ( ( value
& ~0xffffffffULL
) && ! xhci
->addr64
) {
375 DBGC ( xhci
, "XHCI %s cannot access address %lx\n",
380 /* If this is a 64-bit build, then writeq() is available */
381 writeq ( value
, reg
);
386 * Calculate buffer alignment
389 * @ret align Buffer alignment
391 * Determine alignment required for a buffer which must be aligned to
392 * at least XHCI_MIN_ALIGN and which must not cross a page boundary.
394 static inline size_t xhci_align ( size_t len
) {
397 /* Align to own length (rounded up to a power of two) */
398 align
= ( 1 << fls ( len
- 1 ) );
400 /* Round up to XHCI_MIN_ALIGN if needed */
401 if ( align
< XHCI_MIN_ALIGN
)
402 align
= XHCI_MIN_ALIGN
;
408 * Calculate device context offset
410 * @v xhci xHCI device
411 * @v ctx Context index
413 static inline size_t xhci_device_context_offset ( struct xhci_device
*xhci
,
416 return ( XHCI_DCI ( ctx
) << xhci
->csz_shift
);
420 * Calculate input context offset
422 * @v xhci xHCI device
423 * @v ctx Context index
425 static inline size_t xhci_input_context_offset ( struct xhci_device
*xhci
,
428 return ( XHCI_ICI ( ctx
) << xhci
->csz_shift
);
431 /******************************************************************************
435 ******************************************************************************
439 * Dump host controller registers
441 * @v xhci xHCI device
443 static inline void xhci_dump ( struct xhci_device
*xhci
) {
450 /* Do nothing unless debugging is enabled */
455 usbcmd
= readl ( xhci
->op
+ XHCI_OP_USBCMD
);
456 DBGC ( xhci
, "XHCI %s USBCMD %08x%s%s\n", xhci
->name
, usbcmd
,
457 ( ( usbcmd
& XHCI_USBCMD_RUN
) ?
" run" : "" ),
458 ( ( usbcmd
& XHCI_USBCMD_HCRST
) ?
" hcrst" : "" ) );
461 usbsts
= readl ( xhci
->op
+ XHCI_OP_USBSTS
);
462 DBGC ( xhci
, "XHCI %s USBSTS %08x%s\n", xhci
->name
, usbsts
,
463 ( ( usbsts
& XHCI_USBSTS_HCH
) ?
" hch" : "" ) );
466 pagesize
= readl ( xhci
->op
+ XHCI_OP_PAGESIZE
);
467 DBGC ( xhci
, "XHCI %s PAGESIZE %08x\n", xhci
->name
, pagesize
);
470 dnctrl
= readl ( xhci
->op
+ XHCI_OP_DNCTRL
);
471 DBGC ( xhci
, "XHCI %s DNCTRL %08x\n", xhci
->name
, dnctrl
);
474 config
= readl ( xhci
->op
+ XHCI_OP_CONFIG
);
475 DBGC ( xhci
, "XHCI %s CONFIG %08x\n", xhci
->name
, config
);
479 * Dump port registers
481 * @v xhci xHCI device
482 * @v port Port number
484 static inline void xhci_dump_port ( struct xhci_device
*xhci
,
485 unsigned int port
) {
491 /* Do nothing unless debugging is enabled */
496 portsc
= readl ( xhci
->op
+ XHCI_OP_PORTSC ( port
) );
497 DBGC ( xhci
, "XHCI %s-%d PORTSC %08x%s%s%s%s psiv=%d\n",
498 xhci
->name
, port
, portsc
,
499 ( ( portsc
& XHCI_PORTSC_CCS
) ?
" ccs" : "" ),
500 ( ( portsc
& XHCI_PORTSC_PED
) ?
" ped" : "" ),
501 ( ( portsc
& XHCI_PORTSC_PR
) ?
" pr" : "" ),
502 ( ( portsc
& XHCI_PORTSC_PP
) ?
" pp" : "" ),
503 XHCI_PORTSC_PSIV ( portsc
) );
506 portpmsc
= readl ( xhci
->op
+ XHCI_OP_PORTPMSC ( port
) );
507 DBGC ( xhci
, "XHCI %s-%d PORTPMSC %08x\n", xhci
->name
, port
, portpmsc
);
510 portli
= readl ( xhci
->op
+ XHCI_OP_PORTLI ( port
) );
511 DBGC ( xhci
, "XHCI %s-%d PORTLI %08x\n", xhci
->name
, port
, portli
);
514 porthlpmc
= readl ( xhci
->op
+ XHCI_OP_PORTHLPMC ( port
) );
515 DBGC ( xhci
, "XHCI %s-%d PORTHLPMC %08x\n",
516 xhci
->name
, port
, porthlpmc
);
519 /******************************************************************************
523 ******************************************************************************
526 /** Prevent the release of ownership back to BIOS */
527 static int xhci_legacy_prevent_release
;
530 * Initialise USB legacy support
532 * @v xhci xHCI device
534 static void xhci_legacy_init ( struct xhci_device
*xhci
) {
538 /* Locate USB legacy support capability (if present) */
539 legacy
= xhci_extended_capability ( xhci
, XHCI_XECP_ID_LEGACY
, 0 );
541 /* Not an error; capability may not be present */
542 DBGC ( xhci
, "XHCI %s has no USB legacy support capability\n",
547 /* Check if legacy USB support is enabled */
548 bios
= readb ( xhci
->cap
+ legacy
+ XHCI_USBLEGSUP_BIOS
);
549 if ( ! ( bios
& XHCI_USBLEGSUP_BIOS_OWNED
) ) {
550 /* Not an error; already owned by OS */
551 DBGC ( xhci
, "XHCI %s USB legacy support already disabled\n",
556 /* Record presence of USB legacy support capability */
557 xhci
->legacy
= legacy
;
561 * Claim ownership from BIOS
563 * @v xhci xHCI device
565 static void xhci_legacy_claim ( struct xhci_device
*xhci
) {
570 /* Do nothing unless legacy support capability is present */
571 if ( ! xhci
->legacy
)
574 /* Claim ownership */
575 writeb ( XHCI_USBLEGSUP_OS_OWNED
,
576 xhci
->cap
+ xhci
->legacy
+ XHCI_USBLEGSUP_OS
);
578 /* Wait for BIOS to release ownership */
579 for ( i
= 0 ; i
< XHCI_USBLEGSUP_MAX_WAIT_MS
; i
++ ) {
581 /* Check if BIOS has released ownership */
582 bios
= readb ( xhci
->cap
+ xhci
->legacy
+ XHCI_USBLEGSUP_BIOS
);
583 if ( ! ( bios
& XHCI_USBLEGSUP_BIOS_OWNED
) ) {
584 DBGC ( xhci
, "XHCI %s claimed ownership from BIOS\n",
586 ctlsts
= readl ( xhci
->cap
+ xhci
->legacy
+
587 XHCI_USBLEGSUP_CTLSTS
);
589 DBGC ( xhci
, "XHCI %s warning: BIOS retained "
590 "SMIs: %08x\n", xhci
->name
, ctlsts
);
599 /* BIOS did not release ownership. Claim it forcibly by
600 * disabling all SMIs.
602 DBGC ( xhci
, "XHCI %s could not claim ownership from BIOS: forcibly "
603 "disabling SMIs\n", xhci
->name
);
604 writel ( 0, xhci
->cap
+ xhci
->legacy
+ XHCI_USBLEGSUP_CTLSTS
);
608 * Release ownership back to BIOS
610 * @v xhci xHCI device
612 static void xhci_legacy_release ( struct xhci_device
*xhci
) {
614 /* Do nothing unless legacy support capability is present */
615 if ( ! xhci
->legacy
)
618 /* Do nothing if releasing ownership is prevented */
619 if ( xhci_legacy_prevent_release
) {
620 DBGC ( xhci
, "XHCI %s not releasing ownership to BIOS\n",
625 /* Release ownership */
626 writeb ( 0, xhci
->cap
+ xhci
->legacy
+ XHCI_USBLEGSUP_OS
);
627 DBGC ( xhci
, "XHCI %s released ownership to BIOS\n", xhci
->name
);
630 /******************************************************************************
632 * Supported protocols
634 ******************************************************************************
638 * Transcribe port speed (for debugging)
640 * @v psi Protocol speed ID
641 * @ret speed Transcribed speed
643 static inline const char * xhci_speed_name ( uint32_t psi
) {
644 static const char *exponents
[4] = { "", "k", "M", "G" };
645 static char buf
[ 10 /* "xxxxxXbps" + NUL */ ];
646 unsigned int mantissa
;
647 unsigned int exponent
;
649 /* Extract mantissa and exponent */
650 mantissa
= XHCI_SUPPORTED_PSI_MANTISSA ( psi
);
651 exponent
= XHCI_SUPPORTED_PSI_EXPONENT ( psi
);
653 /* Transcribe speed */
654 snprintf ( buf
, sizeof ( buf
), "%d%sbps",
655 mantissa
, exponents
[exponent
] );
660 * Find supported protocol extended capability for a port
662 * @v xhci xHCI device
663 * @v port Port number
664 * @ret supported Offset to extended capability, or zero if not found
666 static unsigned int xhci_supported_protocol ( struct xhci_device
*xhci
,
667 unsigned int port
) {
668 unsigned int supported
= 0;
673 /* Iterate over all supported protocol structures */
674 while ( ( supported
= xhci_extended_capability ( xhci
,
675 XHCI_XECP_ID_SUPPORTED
,
678 /* Determine port range */
679 ports
= readl ( xhci
->cap
+ supported
+ XHCI_SUPPORTED_PORTS
);
680 offset
= XHCI_SUPPORTED_PORTS_OFFSET ( ports
);
681 count
= XHCI_SUPPORTED_PORTS_COUNT ( ports
);
683 /* Check if port lies within this range */
684 if ( ( port
- offset
) < count
)
688 DBGC ( xhci
, "XHCI %s-%d has no supported protocol\n",
696 * @v xhci xHCI device
697 * @v port Port number
698 * @ret protocol USB protocol, or zero if not found
700 static unsigned int xhci_port_protocol ( struct xhci_device
*xhci
,
701 unsigned int port
) {
702 unsigned int supported
= xhci_supported_protocol ( xhci
, port
);
707 unsigned int protocol
;
717 /* Fail if there is no supported protocol */
721 /* Determine protocol version */
722 revision
= readl ( xhci
->cap
+ supported
+ XHCI_SUPPORTED_REVISION
);
723 protocol
= XHCI_SUPPORTED_REVISION_VER ( revision
);
725 /* Describe port protocol */
727 name
.raw
= cpu_to_le32 ( readl ( xhci
->cap
+ supported
+
728 XHCI_SUPPORTED_NAME
) );
730 slot
= readl ( xhci
->cap
+ supported
+ XHCI_SUPPORTED_SLOT
);
731 type
= XHCI_SUPPORTED_SLOT_TYPE ( slot
);
732 DBGC2 ( xhci
, "XHCI %s-%d %sv%04x type %d",
733 xhci
->name
, port
, name
.text
, protocol
, type
);
734 ports
= readl ( xhci
->cap
+ supported
+ XHCI_SUPPORTED_PORTS
);
735 psic
= XHCI_SUPPORTED_PORTS_PSIC ( ports
);
737 DBGC2 ( xhci
, " speeds" );
738 for ( i
= 0 ; i
< psic
; i
++ ) {
739 psi
= readl ( xhci
->cap
+ supported
+
740 XHCI_SUPPORTED_PSI ( i
) );
741 psiv
= XHCI_SUPPORTED_PSI_VALUE ( psi
);
742 DBGC2 ( xhci
, " %d:%s", psiv
,
743 xhci_speed_name ( psi
) );
746 if ( xhci
->quirks
& XHCI_BAD_PSIV
)
747 DBGC2 ( xhci
, " (ignored)" );
748 DBGC2 ( xhci
, "\n" );
755 * Find port slot type
757 * @v xhci xHCI device
758 * @v port Port number
759 * @ret type Slot type, or negative error
761 static int xhci_port_slot_type ( struct xhci_device
*xhci
, unsigned int port
) {
762 unsigned int supported
= xhci_supported_protocol ( xhci
, port
);
766 /* Fail if there is no supported protocol */
771 slot
= readl ( xhci
->cap
+ supported
+ XHCI_SUPPORTED_SLOT
);
772 type
= XHCI_SUPPORTED_SLOT_TYPE ( slot
);
780 * @v xhci xHCI device
781 * @v port Port number
782 * @v psiv Protocol speed ID value
783 * @ret speed Port speed, or negative error
785 static int xhci_port_speed ( struct xhci_device
*xhci
, unsigned int port
,
786 unsigned int psiv
) {
787 unsigned int supported
= xhci_supported_protocol ( xhci
, port
);
789 unsigned int mantissa
;
790 unsigned int exponent
;
796 /* Fail if there is no supported protocol */
800 /* Get protocol speed ID count */
801 ports
= readl ( xhci
->cap
+ supported
+ XHCI_SUPPORTED_PORTS
);
802 psic
= XHCI_SUPPORTED_PORTS_PSIC ( ports
);
804 /* Use the default mappings if applicable */
805 if ( ( psic
== 0 ) || ( xhci
->quirks
& XHCI_BAD_PSIV
) ) {
807 case XHCI_SPEED_LOW
: return USB_SPEED_LOW
;
808 case XHCI_SPEED_FULL
: return USB_SPEED_FULL
;
809 case XHCI_SPEED_HIGH
: return USB_SPEED_HIGH
;
810 case XHCI_SPEED_SUPER
: return USB_SPEED_SUPER
;
812 DBGC ( xhci
, "XHCI %s-%d non-standard PSI value %d\n",
813 xhci
->name
, port
, psiv
);
818 /* Iterate over PSI dwords looking for a match */
819 for ( i
= 0 ; i
< psic
; i
++ ) {
820 psi
= readl ( xhci
->cap
+ supported
+ XHCI_SUPPORTED_PSI ( i
));
821 if ( psiv
== XHCI_SUPPORTED_PSI_VALUE ( psi
) ) {
822 mantissa
= XHCI_SUPPORTED_PSI_MANTISSA ( psi
);
823 exponent
= XHCI_SUPPORTED_PSI_EXPONENT ( psi
);
824 speed
= USB_SPEED ( mantissa
, exponent
);
829 DBGC ( xhci
, "XHCI %s-%d spurious PSI value %d\n",
830 xhci
->name
, port
, psiv
);
835 * Find protocol speed ID value
837 * @v xhci xHCI device
838 * @v port Port number
840 * @ret psiv Protocol speed ID value, or negative error
842 static int xhci_port_psiv ( struct xhci_device
*xhci
, unsigned int port
,
843 unsigned int speed
) {
844 unsigned int supported
= xhci_supported_protocol ( xhci
, port
);
846 unsigned int mantissa
;
847 unsigned int exponent
;
853 /* Fail if there is no supported protocol */
857 /* Get protocol speed ID count */
858 ports
= readl ( xhci
->cap
+ supported
+ XHCI_SUPPORTED_PORTS
);
859 psic
= XHCI_SUPPORTED_PORTS_PSIC ( ports
);
861 /* Use the default mappings if applicable */
862 if ( ( psic
== 0 ) || ( xhci
->quirks
& XHCI_BAD_PSIV
) ) {
864 case USB_SPEED_LOW
: return XHCI_SPEED_LOW
;
865 case USB_SPEED_FULL
: return XHCI_SPEED_FULL
;
866 case USB_SPEED_HIGH
: return XHCI_SPEED_HIGH
;
867 case USB_SPEED_SUPER
: return XHCI_SPEED_SUPER
;
869 DBGC ( xhci
, "XHCI %s-%d non-standard speed %d\n",
870 xhci
->name
, port
, speed
);
875 /* Iterate over PSI dwords looking for a match */
876 for ( i
= 0 ; i
< psic
; i
++ ) {
877 psi
= readl ( xhci
->cap
+ supported
+ XHCI_SUPPORTED_PSI ( i
));
878 mantissa
= XHCI_SUPPORTED_PSI_MANTISSA ( psi
);
879 exponent
= XHCI_SUPPORTED_PSI_EXPONENT ( psi
);
880 if ( speed
== USB_SPEED ( mantissa
, exponent
) ) {
881 psiv
= XHCI_SUPPORTED_PSI_VALUE ( psi
);
886 DBGC ( xhci
, "XHCI %s-%d unrepresentable speed %#x\n",
887 xhci
->name
, port
, speed
);
891 /******************************************************************************
893 * Device context base address array
895 ******************************************************************************
899 * Allocate device context base address array
901 * @v xhci xHCI device
902 * @ret rc Return status code
904 static int xhci_dcbaa_alloc ( struct xhci_device
*xhci
) {
909 /* Allocate and initialise structure. Must be at least
910 * 64-byte aligned and must not cross a page boundary, so
911 * align on its own size (rounded up to a power of two and
912 * with a minimum of 64 bytes).
914 len
= ( ( xhci
->slots
+ 1 ) * sizeof ( xhci
->dcbaa
[0] ) );
915 xhci
->dcbaa
= malloc_dma ( len
, xhci_align ( len
) );
916 if ( ! xhci
->dcbaa
) {
917 DBGC ( xhci
, "XHCI %s could not allocate DCBAA\n", xhci
->name
);
921 memset ( xhci
->dcbaa
, 0, len
);
923 /* Program DCBAA pointer */
924 dcbaap
= virt_to_phys ( xhci
->dcbaa
);
925 if ( ( rc
= xhci_writeq ( xhci
, dcbaap
,
926 xhci
->op
+ XHCI_OP_DCBAAP
) ) != 0 )
929 DBGC2 ( xhci
, "XHCI %s DCBAA at [%08lx,%08lx)\n",
930 xhci
->name
, dcbaap
, ( dcbaap
+ len
) );
934 free_dma ( xhci
->dcbaa
, len
);
940 * Free device context base address array
942 * @v xhci xHCI device
944 static void xhci_dcbaa_free ( struct xhci_device
*xhci
) {
949 for ( i
= 0 ; i
<= xhci
->slots
; i
++ )
950 assert ( xhci
->dcbaa
[i
] == 0 );
952 /* Clear DCBAA pointer */
953 xhci_writeq ( xhci
, 0, xhci
->op
+ XHCI_OP_DCBAAP
);
956 len
= ( ( xhci
->slots
+ 1 ) * sizeof ( xhci
->dcbaa
[0] ) );
957 free_dma ( xhci
->dcbaa
, len
);
960 /******************************************************************************
964 ******************************************************************************
968 * Allocate scratchpad buffers
970 * @v xhci xHCI device
971 * @ret rc Return status code
973 static int xhci_scratchpad_alloc ( struct xhci_device
*xhci
) {
980 /* Do nothing if no scratchpad buffers are used */
981 if ( ! xhci
->scratchpads
)
984 /* Allocate scratchpads */
985 len
= ( xhci
->scratchpads
* xhci
->pagesize
);
986 xhci
->scratchpad
= umalloc ( len
);
987 if ( ! xhci
->scratchpad
) {
988 DBGC ( xhci
, "XHCI %s could not allocate scratchpad buffers\n",
993 memset_user ( xhci
->scratchpad
, 0, 0, len
);
995 /* Allocate scratchpad array */
996 array_len
= ( xhci
->scratchpads
* sizeof ( xhci
->scratchpad_array
[0] ));
997 xhci
->scratchpad_array
=
998 malloc_dma ( array_len
, xhci_align ( array_len
) );
999 if ( ! xhci
->scratchpad_array
) {
1000 DBGC ( xhci
, "XHCI %s could not allocate scratchpad buffer "
1001 "array\n", xhci
->name
);
1003 goto err_alloc_array
;
1006 /* Populate scratchpad array */
1007 for ( i
= 0 ; i
< xhci
->scratchpads
; i
++ ) {
1008 phys
= user_to_phys ( xhci
->scratchpad
, ( i
* xhci
->pagesize
));
1009 xhci
->scratchpad_array
[i
] = phys
;
1012 /* Set scratchpad array pointer */
1013 assert ( xhci
->dcbaa
!= NULL
);
1014 xhci
->dcbaa
[0] = cpu_to_le64 ( virt_to_phys ( xhci
->scratchpad_array
));
1016 DBGC2 ( xhci
, "XHCI %s scratchpad [%08lx,%08lx) array [%08lx,%08lx)\n",
1017 xhci
->name
, user_to_phys ( xhci
->scratchpad
, 0 ),
1018 user_to_phys ( xhci
->scratchpad
, len
),
1019 virt_to_phys ( xhci
->scratchpad_array
),
1020 ( virt_to_phys ( xhci
->scratchpad_array
) + array_len
) );
1023 free_dma ( xhci
->scratchpad_array
, array_len
);
1025 ufree ( xhci
->scratchpad
);
1031 * Free scratchpad buffers
1033 * @v xhci xHCI device
1035 static void xhci_scratchpad_free ( struct xhci_device
*xhci
) {
1038 /* Do nothing if no scratchpad buffers are used */
1039 if ( ! xhci
->scratchpads
)
1042 /* Clear scratchpad array pointer */
1043 assert ( xhci
->dcbaa
!= NULL
);
1046 /* Free scratchpad array */
1047 array_len
= ( xhci
->scratchpads
* sizeof ( xhci
->scratchpad_array
[0] ));
1048 free_dma ( xhci
->scratchpad_array
, array_len
);
1050 /* Free scratchpads */
1051 ufree ( xhci
->scratchpad
);
1054 /******************************************************************************
1056 * Run / stop / reset
1058 ******************************************************************************
1064 * @v xhci xHCI device
1066 static void xhci_run ( struct xhci_device
*xhci
) {
1070 /* Configure number of device slots */
1071 config
= readl ( xhci
->op
+ XHCI_OP_CONFIG
);
1072 config
&= ~XHCI_CONFIG_MAX_SLOTS_EN_MASK
;
1073 config
|= XHCI_CONFIG_MAX_SLOTS_EN ( xhci
->slots
);
1074 writel ( config
, xhci
->op
+ XHCI_OP_CONFIG
);
1076 /* Set run/stop bit */
1077 usbcmd
= readl ( xhci
->op
+ XHCI_OP_USBCMD
);
1078 usbcmd
|= XHCI_USBCMD_RUN
;
1079 writel ( usbcmd
, xhci
->op
+ XHCI_OP_USBCMD
);
1085 * @v xhci xHCI device
1086 * @ret rc Return status code
1088 static int xhci_stop ( struct xhci_device
*xhci
) {
1093 /* Clear run/stop bit */
1094 usbcmd
= readl ( xhci
->op
+ XHCI_OP_USBCMD
);
1095 usbcmd
&= ~XHCI_USBCMD_RUN
;
1096 writel ( usbcmd
, xhci
->op
+ XHCI_OP_USBCMD
);
1098 /* Wait for device to stop */
1099 for ( i
= 0 ; i
< XHCI_STOP_MAX_WAIT_MS
; i
++ ) {
1101 /* Check if device is stopped */
1102 usbsts
= readl ( xhci
->op
+ XHCI_OP_USBSTS
);
1103 if ( usbsts
& XHCI_USBSTS_HCH
)
1110 DBGC ( xhci
, "XHCI %s timed out waiting for stop\n", xhci
->name
);
1117 * @v xhci xHCI device
1118 * @ret rc Return status code
1120 static int xhci_reset ( struct xhci_device
*xhci
) {
1125 /* The xHCI specification states that resetting a running
1126 * device may result in undefined behaviour, so try stopping
1129 if ( ( rc
= xhci_stop ( xhci
) ) != 0 ) {
1130 /* Ignore errors and attempt to reset the device anyway */
1134 writel ( XHCI_USBCMD_HCRST
, xhci
->op
+ XHCI_OP_USBCMD
);
1136 /* Wait for reset to complete */
1137 for ( i
= 0 ; i
< XHCI_RESET_MAX_WAIT_MS
; i
++ ) {
1139 /* Check if reset is complete */
1140 usbcmd
= readl ( xhci
->op
+ XHCI_OP_USBCMD
);
1141 if ( ! ( usbcmd
& XHCI_USBCMD_HCRST
) )
1148 DBGC ( xhci
, "XHCI %s timed out waiting for reset\n", xhci
->name
);
1152 /******************************************************************************
1154 * Transfer request blocks
1156 ******************************************************************************
1160 * Allocate transfer request block ring
1162 * @v xhci xHCI device
1164 * @v shift Ring size (log2)
1165 * @v slot Device slot
1166 * @v target Doorbell target
1167 * @v stream Doorbell stream ID
1168 * @ret rc Return status code
1170 static int xhci_ring_alloc ( struct xhci_device
*xhci
,
1171 struct xhci_trb_ring
*ring
,
1172 unsigned int shift
, unsigned int slot
,
1173 unsigned int target
, unsigned int stream
) {
1174 struct xhci_trb_link
*link
;
1179 assert ( shift
> 0 );
1181 /* Initialise structure */
1182 memset ( ring
, 0, sizeof ( *ring
) );
1183 ring
->shift
= shift
;
1184 count
= ( 1U << shift
);
1185 ring
->mask
= ( count
- 1 );
1186 ring
->len
= ( ( count
+ 1 /* Link TRB */ ) * sizeof ( ring
->trb
[0] ) );
1187 ring
->db
= ( xhci
->db
+ ( slot
* sizeof ( ring
->dbval
) ) );
1188 ring
->dbval
= XHCI_DBVAL ( target
, stream
);
1190 /* Allocate I/O buffers */
1191 ring
->iobuf
= zalloc ( count
* sizeof ( ring
->iobuf
[0] ) );
1192 if ( ! ring
->iobuf
) {
1194 goto err_alloc_iobuf
;
1198 ring
->trb
= malloc_dma ( ring
->len
, xhci_align ( ring
->len
) );
1199 if ( ! ring
->trb
) {
1203 memset ( ring
->trb
, 0, ring
->len
);
1205 /* Initialise Link TRB */
1206 link
= &ring
->trb
[count
].link
;
1207 link
->next
= cpu_to_le64 ( virt_to_phys ( ring
->trb
) );
1208 link
->flags
= XHCI_TRB_TC
;
1209 link
->type
= XHCI_TRB_LINK
;
1214 free_dma ( ring
->trb
, ring
->len
);
1216 free ( ring
->iobuf
);
1222 * Reset transfer request block ring
1226 static void xhci_ring_reset ( struct xhci_trb_ring
*ring
) {
1227 unsigned int count
= ( 1U << ring
->shift
);
1229 /* Reset producer and consumer counters */
1233 /* Reset TRBs (except Link TRB) */
1234 memset ( ring
->trb
, 0, ( count
* sizeof ( ring
->trb
[0] ) ) );
1238 * Free transfer request block ring
1242 static void xhci_ring_free ( struct xhci_trb_ring
*ring
) {
1243 unsigned int count
= ( 1U << ring
->shift
);
1247 assert ( ring
->cons
== ring
->prod
);
1248 for ( i
= 0 ; i
< count
; i
++ )
1249 assert ( ring
->iobuf
[i
] == NULL
);
1252 free_dma ( ring
->trb
, ring
->len
);
1254 /* Free I/O buffers */
1255 free ( ring
->iobuf
);
1259 * Enqueue a transfer request block
1262 * @v iobuf I/O buffer (if any)
1263 * @v trb Transfer request block (with empty Cycle flag)
1264 * @ret rc Return status code
1266 * This operation does not implicitly ring the doorbell register.
1268 static int xhci_enqueue ( struct xhci_trb_ring
*ring
, struct io_buffer
*iobuf
,
1269 const union xhci_trb
*trb
) {
1270 union xhci_trb
*dest
;
1277 assert ( ! ( trb
->common
.flags
& XHCI_TRB_C
) );
1279 /* Fail if ring is full */
1280 if ( ! xhci_ring_remaining ( ring
) )
1283 /* Update producer counter (and link TRB, if applicable) */
1284 prod
= ring
->prod
++;
1286 cycle
= ( ( ~( prod
>> ring
->shift
) ) & XHCI_TRB_C
);
1287 index
= ( prod
& mask
);
1289 ring
->link
->flags
= ( XHCI_TRB_TC
| ( cycle
^ XHCI_TRB_C
) );
1291 /* Record I/O buffer */
1292 ring
->iobuf
[index
] = iobuf
;
1295 dest
= &ring
->trb
[index
];
1296 dest
->template.parameter
= trb
->template.parameter
;
1297 dest
->template.status
= trb
->template.status
;
1299 dest
->template.control
= ( trb
->template.control
|
1300 cpu_to_le32 ( cycle
) );
1306 * Dequeue a transfer request block
1309 * @ret iobuf I/O buffer
1311 static struct io_buffer
* xhci_dequeue ( struct xhci_trb_ring
*ring
) {
1312 struct io_buffer
*iobuf
;
1318 assert ( xhci_ring_fill ( ring
) != 0 );
1320 /* Update consumer counter */
1321 cons
= ring
->cons
++;
1323 index
= ( cons
& mask
);
1325 /* Retrieve I/O buffer */
1326 iobuf
= ring
->iobuf
[index
];
1327 ring
->iobuf
[index
] = NULL
;
1333 * Enqueue multiple transfer request blocks
1336 * @v iobuf I/O buffer
1337 * @v trbs Transfer request blocks (with empty Cycle flag)
1338 * @v count Number of transfer request blocks
1339 * @ret rc Return status code
1341 * This operation does not implicitly ring the doorbell register.
1343 static int xhci_enqueue_multi ( struct xhci_trb_ring
*ring
,
1344 struct io_buffer
*iobuf
,
1345 const union xhci_trb
*trbs
,
1346 unsigned int count
) {
1347 const union xhci_trb
*trb
= trbs
;
1351 assert ( iobuf
!= NULL
);
1353 /* Fail if ring does not have sufficient space */
1354 if ( xhci_ring_remaining ( ring
) < count
)
1357 /* Enqueue each TRB, recording the I/O buffer with the final TRB */
1359 rc
= xhci_enqueue ( ring
, ( count ? NULL
: iobuf
), trb
++ );
1360 assert ( rc
== 0 ); /* Should never be able to fail */
1367 * Dequeue multiple transfer request blocks
1370 * @ret iobuf I/O buffer
1372 static struct io_buffer
* xhci_dequeue_multi ( struct xhci_trb_ring
*ring
) {
1373 struct io_buffer
*iobuf
;
1375 /* Dequeue TRBs until we reach the final TRB for an I/O buffer */
1377 iobuf
= xhci_dequeue ( ring
);
1378 } while ( iobuf
== NULL
);
1384 * Ring doorbell register
1388 static inline __attribute__ (( always_inline
)) void
1389 xhci_doorbell ( struct xhci_trb_ring
*ring
) {
1392 writel ( ring
->dbval
, ring
->db
);
1395 /******************************************************************************
1397 * Command and event rings
1399 ******************************************************************************
1403 * Allocate command ring
1405 * @v xhci xHCI device
1406 * @ret rc Return status code
1408 static int xhci_command_alloc ( struct xhci_device
*xhci
) {
1412 /* Allocate TRB ring */
1413 if ( ( rc
= xhci_ring_alloc ( xhci
, &xhci
->command
, XHCI_CMD_TRBS_LOG2
,
1415 goto err_ring_alloc
;
1417 /* Program command ring control register */
1418 crp
= virt_to_phys ( xhci
->command
.trb
);
1419 if ( ( rc
= xhci_writeq ( xhci
, ( crp
| XHCI_CRCR_RCS
),
1420 xhci
->op
+ XHCI_OP_CRCR
) ) != 0 )
1423 DBGC2 ( xhci
, "XHCI %s CRCR at [%08lx,%08lx)\n",
1424 xhci
->name
, crp
, ( crp
+ xhci
->command
.len
) );
1428 xhci_ring_free ( &xhci
->command
);
1436 * @v xhci xHCI device
1438 static void xhci_command_free ( struct xhci_device
*xhci
) {
1441 assert ( ( readl ( xhci
->op
+ XHCI_OP_CRCR
) & XHCI_CRCR_CRR
) == 0 );
1443 /* Clear command ring control register */
1444 xhci_writeq ( xhci
, 0, xhci
->op
+ XHCI_OP_CRCR
);
1447 xhci_ring_free ( &xhci
->command
);
1451 * Allocate event ring
1453 * @v xhci xHCI device
1454 * @ret rc Return status code
1456 static int xhci_event_alloc ( struct xhci_device
*xhci
) {
1457 struct xhci_event_ring
*event
= &xhci
->event
;
1462 /* Allocate event ring */
1463 count
= ( 1 << XHCI_EVENT_TRBS_LOG2
);
1464 len
= ( count
* sizeof ( event
->trb
[0] ) );
1465 event
->trb
= malloc_dma ( len
, xhci_align ( len
) );
1466 if ( ! event
->trb
) {
1470 memset ( event
->trb
, 0, len
);
1472 /* Allocate event ring segment table */
1473 event
->segment
= malloc_dma ( sizeof ( event
->segment
[0] ),
1474 xhci_align ( sizeof (event
->segment
[0])));
1475 if ( ! event
->segment
) {
1477 goto err_alloc_segment
;
1479 memset ( event
->segment
, 0, sizeof ( event
->segment
[0] ) );
1480 event
->segment
[0].base
= cpu_to_le64 ( virt_to_phys ( event
->trb
) );
1481 event
->segment
[0].count
= cpu_to_le32 ( count
);
1483 /* Program event ring registers */
1484 writel ( 1, xhci
->run
+ XHCI_RUN_ERSTSZ ( 0 ) );
1485 if ( ( rc
= xhci_writeq ( xhci
, virt_to_phys ( event
->trb
),
1486 xhci
->run
+ XHCI_RUN_ERDP ( 0 ) ) ) != 0 )
1487 goto err_writeq_erdp
;
1488 if ( ( rc
= xhci_writeq ( xhci
, virt_to_phys ( event
->segment
),
1489 xhci
->run
+ XHCI_RUN_ERSTBA ( 0 ) ) ) != 0 )
1490 goto err_writeq_erstba
;
1492 DBGC2 ( xhci
, "XHCI %s event ring [%08lx,%08lx) table [%08lx,%08lx)\n",
1493 xhci
->name
, virt_to_phys ( event
->trb
),
1494 ( virt_to_phys ( event
->trb
) + len
),
1495 virt_to_phys ( event
->segment
),
1496 ( virt_to_phys ( event
->segment
) +
1497 sizeof (event
->segment
[0] ) ) );
1500 xhci_writeq ( xhci
, 0, xhci
->run
+ XHCI_RUN_ERSTBA ( 0 ) );
1502 xhci_writeq ( xhci
, 0, xhci
->run
+ XHCI_RUN_ERDP ( 0 ) );
1504 free_dma ( event
->trb
, len
);
1506 free_dma ( event
->segment
, sizeof ( event
->segment
[0] ) );
1514 * @v xhci xHCI device
1516 static void xhci_event_free ( struct xhci_device
*xhci
) {
1517 struct xhci_event_ring
*event
= &xhci
->event
;
1521 /* Clear event ring registers */
1522 writel ( 0, xhci
->run
+ XHCI_RUN_ERSTSZ ( 0 ) );
1523 xhci_writeq ( xhci
, 0, xhci
->run
+ XHCI_RUN_ERSTBA ( 0 ) );
1524 xhci_writeq ( xhci
, 0, xhci
->run
+ XHCI_RUN_ERDP ( 0 ) );
1526 /* Free event ring segment table */
1527 free_dma ( event
->segment
, sizeof ( event
->segment
[0] ) );
1529 /* Free event ring */
1530 count
= ( 1 << XHCI_EVENT_TRBS_LOG2
);
1531 len
= ( count
* sizeof ( event
->trb
[0] ) );
1532 free_dma ( event
->trb
, len
);
1536 * Handle transfer event
1538 * @v xhci xHCI device
1539 * @v trb Transfer event TRB
1541 static void xhci_transfer ( struct xhci_device
*xhci
,
1542 struct xhci_trb_transfer
*trb
) {
1543 struct xhci_slot
*slot
;
1544 struct xhci_endpoint
*endpoint
;
1545 struct io_buffer
*iobuf
;
1548 /* Profile transfer events */
1549 profile_start ( &xhci_transfer_profiler
);
1552 if ( ( trb
->slot
> xhci
->slots
) ||
1553 ( ( slot
= xhci
->slot
[trb
->slot
] ) == NULL
) ) {
1554 DBGC ( xhci
, "XHCI %s transfer event invalid slot %d:\n",
1555 xhci
->name
, trb
->slot
);
1556 DBGC_HDA ( xhci
, 0, trb
, sizeof ( *trb
) );
1560 /* Identify endpoint */
1561 if ( ( trb
->endpoint
> XHCI_CTX_END
) ||
1562 ( ( endpoint
= slot
->endpoint
[trb
->endpoint
] ) == NULL
) ) {
1563 DBGC ( xhci
, "XHCI %s slot %d transfer event invalid epid "
1564 "%d:\n", xhci
->name
, slot
->id
, trb
->endpoint
);
1565 DBGC_HDA ( xhci
, 0, trb
, sizeof ( *trb
) );
1569 /* Dequeue TRB(s) */
1570 iobuf
= xhci_dequeue_multi ( &endpoint
->ring
);
1571 assert ( iobuf
!= NULL
);
1573 /* Check for errors */
1574 if ( ! ( ( trb
->code
== XHCI_CMPLT_SUCCESS
) ||
1575 ( trb
->code
== XHCI_CMPLT_SHORT
) ) ) {
1577 /* Construct error */
1578 rc
= -ECODE ( trb
->code
);
1579 DBGC ( xhci
, "XHCI %s slot %d ctx %d failed (code %d): %s\n",
1580 xhci
->name
, slot
->id
, endpoint
->ctx
, trb
->code
,
1582 DBGC_HDA ( xhci
, 0, trb
, sizeof ( *trb
) );
1585 assert ( ( endpoint
->context
->state
& XHCI_ENDPOINT_STATE_MASK
)
1586 != XHCI_ENDPOINT_RUNNING
);
1588 /* Report failure to USB core */
1589 usb_complete_err ( endpoint
->ep
, iobuf
, rc
);
1593 /* Record actual transfer size */
1594 iob_unput ( iobuf
, le16_to_cpu ( trb
->residual
) );
1596 /* Sanity check (for successful completions only) */
1597 assert ( xhci_ring_consumed ( &endpoint
->ring
) ==
1598 le64_to_cpu ( trb
->transfer
) );
1600 /* Report completion to USB core */
1601 usb_complete ( endpoint
->ep
, iobuf
);
1602 profile_stop ( &xhci_transfer_profiler
);
1606 * Handle command completion event
1608 * @v xhci xHCI device
1609 * @v trb Command completion event
1611 static void xhci_complete ( struct xhci_device
*xhci
,
1612 struct xhci_trb_complete
*trb
) {
1615 /* Ignore "command ring stopped" notifications */
1616 if ( trb
->code
== XHCI_CMPLT_CMD_STOPPED
) {
1617 DBGC2 ( xhci
, "XHCI %s command ring stopped\n", xhci
->name
);
1621 /* Ignore unexpected completions */
1622 if ( ! xhci
->pending
) {
1623 rc
= -ECODE ( trb
->code
);
1624 DBGC ( xhci
, "XHCI %s unexpected completion (code %d): %s\n",
1625 xhci
->name
, trb
->code
, strerror ( rc
) );
1626 DBGC_HDA ( xhci
, 0, trb
, sizeof ( *trb
) );
1630 /* Dequeue command TRB */
1631 xhci_dequeue ( &xhci
->command
);
1634 assert ( xhci_ring_consumed ( &xhci
->command
) ==
1635 le64_to_cpu ( trb
->command
) );
1637 /* Record completion */
1638 memcpy ( xhci
->pending
, trb
, sizeof ( *xhci
->pending
) );
1639 xhci
->pending
= NULL
;
1643 * Handle port status event
1645 * @v xhci xHCI device
1646 * @v trb Port status event
1648 static void xhci_port_status ( struct xhci_device
*xhci
,
1649 struct xhci_trb_port_status
*trb
) {
1650 struct usb_port
*port
= usb_port ( xhci
->bus
->hub
, trb
->port
);
1654 assert ( ( trb
->port
> 0 ) && ( trb
->port
<= xhci
->ports
) );
1656 /* Record disconnections and clear changes */
1657 portsc
= readl ( xhci
->op
+ XHCI_OP_PORTSC ( trb
->port
) );
1658 port
->disconnected
|= ( portsc
& XHCI_PORTSC_CSC
);
1659 portsc
&= ( XHCI_PORTSC_PRESERVE
| XHCI_PORTSC_CHANGE
);
1660 writel ( portsc
, xhci
->op
+ XHCI_OP_PORTSC ( trb
->port
) );
1662 /* Report port status change */
1663 usb_port_changed ( port
);
1667 * Handle host controller event
1669 * @v xhci xHCI device
1670 * @v trb Host controller event
1672 static void xhci_host_controller ( struct xhci_device
*xhci
,
1673 struct xhci_trb_host_controller
*trb
) {
1676 /* Construct error */
1677 rc
= -ECODE ( trb
->code
);
1678 DBGC ( xhci
, "XHCI %s host controller event (code %d): %s\n",
1679 xhci
->name
, trb
->code
, strerror ( rc
) );
1685 * @v xhci xHCI device
1687 static void xhci_event_poll ( struct xhci_device
*xhci
) {
1688 struct xhci_event_ring
*event
= &xhci
->event
;
1689 union xhci_trb
*trb
;
1690 unsigned int shift
= XHCI_EVENT_TRBS_LOG2
;
1691 unsigned int count
= ( 1 << shift
);
1692 unsigned int mask
= ( count
- 1 );
1693 unsigned int consumed
;
1696 /* Poll for events */
1697 profile_start ( &xhci_event_profiler
);
1698 for ( consumed
= 0 ; ; consumed
++ ) {
1700 /* Stop if we reach an empty TRB */
1702 trb
= &event
->trb
[ event
->cons
& mask
];
1703 if ( ! ( ( trb
->common
.flags
^
1704 ( event
->cons
>> shift
) ) & XHCI_TRB_C
) )
1708 type
= ( trb
->common
.type
& XHCI_TRB_TYPE_MASK
);
1711 case XHCI_TRB_TRANSFER
:
1712 xhci_transfer ( xhci
, &trb
->transfer
);
1715 case XHCI_TRB_COMPLETE
:
1716 xhci_complete ( xhci
, &trb
->complete
);
1719 case XHCI_TRB_PORT_STATUS
:
1720 xhci_port_status ( xhci
, &trb
->port
);
1723 case XHCI_TRB_HOST_CONTROLLER
:
1724 xhci_host_controller ( xhci
, &trb
->host
);
1728 DBGC ( xhci
, "XHCI %s unrecognised event %#x\n:",
1729 xhci
->name
, event
->cons
);
1730 DBGC_HDA ( xhci
, virt_to_phys ( trb
),
1731 trb
, sizeof ( *trb
) );
1735 /* Consume this TRB */
1739 /* Update dequeue pointer if applicable */
1741 xhci_writeq ( xhci
, virt_to_phys ( trb
),
1742 xhci
->run
+ XHCI_RUN_ERDP ( 0 ) );
1743 profile_stop ( &xhci_event_profiler
);
1750 * @v xhci xHCI device
1752 static void xhci_abort ( struct xhci_device
*xhci
) {
1755 /* Abort the command */
1756 DBGC2 ( xhci
, "XHCI %s aborting command\n", xhci
->name
);
1757 xhci_writeq ( xhci
, XHCI_CRCR_CA
, xhci
->op
+ XHCI_OP_CRCR
);
1759 /* Allow time for command to abort */
1760 mdelay ( XHCI_COMMAND_ABORT_DELAY_MS
);
1763 assert ( ( readl ( xhci
->op
+ XHCI_OP_CRCR
) & XHCI_CRCR_CRR
) == 0 );
1765 /* Consume (and ignore) any final command status */
1766 xhci_event_poll ( xhci
);
1768 /* Reset the command ring control register */
1769 xhci_ring_reset ( &xhci
->command
);
1770 crp
= virt_to_phys ( xhci
->command
.trb
);
1771 xhci_writeq ( xhci
, ( crp
| XHCI_CRCR_RCS
), xhci
->op
+ XHCI_OP_CRCR
);
1775 * Issue command and wait for completion
1777 * @v xhci xHCI device
1778 * @v trb Transfer request block (with empty Cycle flag)
1779 * @ret rc Return status code
1781 * On a successful completion, the TRB will be overwritten with the
1784 static int xhci_command ( struct xhci_device
*xhci
, union xhci_trb
*trb
) {
1785 struct xhci_trb_complete
*complete
= &trb
->complete
;
1789 /* Record the pending command */
1790 xhci
->pending
= trb
;
1792 /* Enqueue the command */
1793 if ( ( rc
= xhci_enqueue ( &xhci
->command
, NULL
, trb
) ) != 0 )
1796 /* Ring the command doorbell */
1797 xhci_doorbell ( &xhci
->command
);
1799 /* Wait for the command to complete */
1800 for ( i
= 0 ; i
< XHCI_COMMAND_MAX_WAIT_MS
; i
++ ) {
1802 /* Poll event ring */
1803 xhci_event_poll ( xhci
);
1805 /* Check for completion */
1806 if ( ! xhci
->pending
) {
1807 if ( complete
->code
!= XHCI_CMPLT_SUCCESS
) {
1808 rc
= -ECODE ( complete
->code
);
1809 DBGC ( xhci
, "XHCI %s command failed (code "
1810 "%d): %s\n", xhci
->name
, complete
->code
,
1812 DBGC_HDA ( xhci
, 0, trb
, sizeof ( *trb
) );
1823 DBGC ( xhci
, "XHCI %s timed out waiting for completion\n", xhci
->name
);
1827 xhci_abort ( xhci
);
1830 xhci
->pending
= NULL
;
1835 * Issue NOP and wait for completion
1837 * @v xhci xHCI device
1838 * @ret rc Return status code
1840 static inline int xhci_nop ( struct xhci_device
*xhci
) {
1842 struct xhci_trb_common
*nop
= &trb
.common
;
1845 /* Construct command */
1846 memset ( nop
, 0, sizeof ( *nop
) );
1847 nop
->flags
= XHCI_TRB_IOC
;
1848 nop
->type
= XHCI_TRB_NOP_CMD
;
1850 /* Issue command and wait for completion */
1851 if ( ( rc
= xhci_command ( xhci
, &trb
) ) != 0 )
1860 * @v xhci xHCI device
1862 * @ret slot Device slot ID, or negative error
1864 static inline int xhci_enable_slot ( struct xhci_device
*xhci
,
1865 unsigned int type
) {
1867 struct xhci_trb_enable_slot
*enable
= &trb
.enable
;
1868 struct xhci_trb_complete
*enabled
= &trb
.complete
;
1872 /* Construct command */
1873 memset ( enable
, 0, sizeof ( *enable
) );
1874 enable
->slot
= type
;
1875 enable
->type
= XHCI_TRB_ENABLE_SLOT
;
1877 /* Issue command and wait for completion */
1878 if ( ( rc
= xhci_command ( xhci
, &trb
) ) != 0 ) {
1879 DBGC ( xhci
, "XHCI %s could not enable new slot: %s\n",
1880 xhci
->name
, strerror ( rc
) );
1884 /* Extract slot number */
1885 slot
= enabled
->slot
;
1887 DBGC2 ( xhci
, "XHCI %s slot %d enabled\n", xhci
->name
, slot
);
1894 * @v xhci xHCI device
1895 * @v slot Device slot
1896 * @ret rc Return status code
1898 static inline int xhci_disable_slot ( struct xhci_device
*xhci
,
1899 unsigned int slot
) {
1901 struct xhci_trb_disable_slot
*disable
= &trb
.disable
;
1904 /* Construct command */
1905 memset ( disable
, 0, sizeof ( *disable
) );
1906 disable
->type
= XHCI_TRB_DISABLE_SLOT
;
1907 disable
->slot
= slot
;
1909 /* Issue command and wait for completion */
1910 if ( ( rc
= xhci_command ( xhci
, &trb
) ) != 0 ) {
1911 DBGC ( xhci
, "XHCI %s could not disable slot %d: %s\n",
1912 xhci
->name
, slot
, strerror ( rc
) );
1916 DBGC2 ( xhci
, "XHCI %s slot %d disabled\n", xhci
->name
, slot
);
1921 * Issue context-based command and wait for completion
1923 * @v xhci xHCI device
1924 * @v slot Device slot
1925 * @v endpoint Endpoint
1927 * @v populate Input context populater
1928 * @ret rc Return status code
1930 static int xhci_context ( struct xhci_device
*xhci
, struct xhci_slot
*slot
,
1931 struct xhci_endpoint
*endpoint
, unsigned int type
,
1932 void ( * populate
) ( struct xhci_device
*xhci
,
1933 struct xhci_slot
*slot
,
1934 struct xhci_endpoint
*endpoint
,
1937 struct xhci_trb_context
*context
= &trb
.context
;
1942 /* Allocate an input context */
1943 len
= xhci_input_context_offset ( xhci
, XHCI_CTX_END
);
1944 input
= malloc_dma ( len
, xhci_align ( len
) );
1949 memset ( input
, 0, len
);
1951 /* Populate input context */
1952 populate ( xhci
, slot
, endpoint
, input
);
1954 /* Construct command */
1955 memset ( context
, 0, sizeof ( *context
) );
1956 context
->type
= type
;
1957 context
->input
= cpu_to_le64 ( virt_to_phys ( input
) );
1958 context
->slot
= slot
->id
;
1960 /* Issue command and wait for completion */
1961 if ( ( rc
= xhci_command ( xhci
, &trb
) ) != 0 )
1965 free_dma ( input
, len
);
1971 * Populate address device input context
1973 * @v xhci xHCI device
1974 * @v slot Device slot
1975 * @v endpoint Endpoint
1976 * @v input Input context
1978 static void xhci_address_device_input ( struct xhci_device
*xhci
,
1979 struct xhci_slot
*slot
,
1980 struct xhci_endpoint
*endpoint
,
1982 struct xhci_control_context
*control_ctx
;
1983 struct xhci_slot_context
*slot_ctx
;
1984 struct xhci_endpoint_context
*ep_ctx
;
1987 assert ( endpoint
->ctx
== XHCI_CTX_EP0
);
1989 /* Populate control context */
1990 control_ctx
= input
;
1991 control_ctx
->add
= cpu_to_le32 ( ( 1 << XHCI_CTX_SLOT
) |
1992 ( 1 << XHCI_CTX_EP0
) );
1994 /* Populate slot context */
1995 slot_ctx
= ( input
+ xhci_input_context_offset ( xhci
, XHCI_CTX_SLOT
));
1996 slot_ctx
->info
= cpu_to_le32 ( XHCI_SLOT_INFO ( 1, 0, slot
->psiv
,
1998 slot_ctx
->port
= slot
->port
;
1999 slot_ctx
->tt_id
= slot
->tt_id
;
2000 slot_ctx
->tt_port
= slot
->tt_port
;
2002 /* Populate control endpoint context */
2003 ep_ctx
= ( input
+ xhci_input_context_offset ( xhci
, XHCI_CTX_EP0
) );
2004 ep_ctx
->type
= XHCI_EP_TYPE_CONTROL
;
2005 ep_ctx
->burst
= endpoint
->ep
->burst
;
2006 ep_ctx
->mtu
= cpu_to_le16 ( endpoint
->ep
->mtu
);
2007 ep_ctx
->dequeue
= cpu_to_le64 ( virt_to_phys ( endpoint
->ring
.trb
) |
2009 ep_ctx
->trb_len
= cpu_to_le16 ( XHCI_EP0_TRB_LEN
);
2015 * @v xhci xHCI device
2016 * @v slot Device slot
2017 * @ret rc Return status code
2019 static inline int xhci_address_device ( struct xhci_device
*xhci
,
2020 struct xhci_slot
*slot
) {
2021 struct usb_device
*usb
= slot
->usb
;
2022 struct xhci_slot_context
*slot_ctx
;
2025 /* Assign device address */
2026 if ( ( rc
= xhci_context ( xhci
, slot
, slot
->endpoint
[XHCI_CTX_EP0
],
2027 XHCI_TRB_ADDRESS_DEVICE
,
2028 xhci_address_device_input
) ) != 0 )
2031 /* Get assigned address */
2032 slot_ctx
= ( slot
->context
+
2033 xhci_device_context_offset ( xhci
, XHCI_CTX_SLOT
) );
2034 usb
->address
= slot_ctx
->address
;
2035 DBGC2 ( xhci
, "XHCI %s assigned address %d to %s\n",
2036 xhci
->name
, usb
->address
, usb
->name
);
2042 * Populate configure endpoint input context
2044 * @v xhci xHCI device
2045 * @v slot Device slot
2046 * @v endpoint Endpoint
2047 * @v input Input context
2049 static void xhci_configure_endpoint_input ( struct xhci_device
*xhci
,
2050 struct xhci_slot
*slot
,
2051 struct xhci_endpoint
*endpoint
,
2053 struct xhci_control_context
*control_ctx
;
2054 struct xhci_slot_context
*slot_ctx
;
2055 struct xhci_endpoint_context
*ep_ctx
;
2057 /* Populate control context */
2058 control_ctx
= input
;
2059 control_ctx
->add
= cpu_to_le32 ( ( 1 << XHCI_CTX_SLOT
) |
2060 ( 1 << endpoint
->ctx
) );
2062 /* Populate slot context */
2063 slot_ctx
= ( input
+ xhci_input_context_offset ( xhci
, XHCI_CTX_SLOT
));
2064 slot_ctx
->info
= cpu_to_le32 ( XHCI_SLOT_INFO ( ( XHCI_CTX_END
- 1 ),
2065 ( slot
->ports ?
1 : 0 ),
2067 slot_ctx
->ports
= slot
->ports
;
2069 /* Populate endpoint context */
2070 ep_ctx
= ( input
+ xhci_input_context_offset ( xhci
, endpoint
->ctx
) );
2071 ep_ctx
->interval
= endpoint
->interval
;
2072 ep_ctx
->type
= endpoint
->type
;
2073 ep_ctx
->burst
= endpoint
->ep
->burst
;
2074 ep_ctx
->mtu
= cpu_to_le16 ( endpoint
->ep
->mtu
);
2075 ep_ctx
->dequeue
= cpu_to_le64 ( virt_to_phys ( endpoint
->ring
.trb
) |
2077 ep_ctx
->trb_len
= cpu_to_le16 ( endpoint
->ep
->mtu
); /* best guess */
2081 * Configure endpoint
2083 * @v xhci xHCI device
2084 * @v slot Device slot
2085 * @v endpoint Endpoint
2086 * @ret rc Return status code
2088 static inline int xhci_configure_endpoint ( struct xhci_device
*xhci
,
2089 struct xhci_slot
*slot
,
2090 struct xhci_endpoint
*endpoint
) {
2093 /* Configure endpoint */
2094 if ( ( rc
= xhci_context ( xhci
, slot
, endpoint
,
2095 XHCI_TRB_CONFIGURE_ENDPOINT
,
2096 xhci_configure_endpoint_input
) ) != 0 )
2099 DBGC2 ( xhci
, "XHCI %s slot %d ctx %d configured\n",
2100 xhci
->name
, slot
->id
, endpoint
->ctx
);
2105 * Populate deconfigure endpoint input context
2107 * @v xhci xHCI device
2108 * @v slot Device slot
2109 * @v endpoint Endpoint
2110 * @v input Input context
2113 xhci_deconfigure_endpoint_input ( struct xhci_device
*xhci __unused
,
2114 struct xhci_slot
*slot __unused
,
2115 struct xhci_endpoint
*endpoint
,
2117 struct xhci_control_context
*control_ctx
;
2118 struct xhci_slot_context
*slot_ctx
;
2120 /* Populate control context */
2121 control_ctx
= input
;
2122 control_ctx
->add
= cpu_to_le32 ( 1 << XHCI_CTX_SLOT
);
2123 control_ctx
->drop
= cpu_to_le32 ( 1 << endpoint
->ctx
);
2125 /* Populate slot context */
2126 slot_ctx
= ( input
+ xhci_input_context_offset ( xhci
, XHCI_CTX_SLOT
));
2127 slot_ctx
->info
= cpu_to_le32 ( XHCI_SLOT_INFO ( ( XHCI_CTX_END
- 1 ),
2132 * Deconfigure endpoint
2134 * @v xhci xHCI device
2135 * @v slot Device slot
2136 * @v endpoint Endpoint
2137 * @ret rc Return status code
2139 static inline int xhci_deconfigure_endpoint ( struct xhci_device
*xhci
,
2140 struct xhci_slot
*slot
,
2141 struct xhci_endpoint
*endpoint
) {
2144 /* Deconfigure endpoint */
2145 if ( ( rc
= xhci_context ( xhci
, slot
, endpoint
,
2146 XHCI_TRB_CONFIGURE_ENDPOINT
,
2147 xhci_deconfigure_endpoint_input
) ) != 0 )
2150 DBGC2 ( xhci
, "XHCI %s slot %d ctx %d deconfigured\n",
2151 xhci
->name
, slot
->id
, endpoint
->ctx
);
2156 * Populate evaluate context input context
2158 * @v xhci xHCI device
2159 * @v slot Device slot
2160 * @v endpoint Endpoint
2161 * @v input Input context
2163 static void xhci_evaluate_context_input ( struct xhci_device
*xhci
,
2164 struct xhci_slot
*slot __unused
,
2165 struct xhci_endpoint
*endpoint
,
2167 struct xhci_control_context
*control_ctx
;
2168 struct xhci_slot_context
*slot_ctx
;
2169 struct xhci_endpoint_context
*ep_ctx
;
2171 /* Populate control context */
2172 control_ctx
= input
;
2173 control_ctx
->add
= cpu_to_le32 ( ( 1 << XHCI_CTX_SLOT
) |
2174 ( 1 << endpoint
->ctx
) );
2176 /* Populate slot context */
2177 slot_ctx
= ( input
+ xhci_input_context_offset ( xhci
, XHCI_CTX_SLOT
));
2178 slot_ctx
->info
= cpu_to_le32 ( XHCI_SLOT_INFO ( ( XHCI_CTX_END
- 1 ),
2181 /* Populate endpoint context */
2182 ep_ctx
= ( input
+ xhci_input_context_offset ( xhci
, endpoint
->ctx
) );
2183 ep_ctx
->mtu
= cpu_to_le16 ( endpoint
->ep
->mtu
);
2189 * @v xhci xHCI device
2190 * @v slot Device slot
2191 * @v endpoint Endpoint
2192 * @ret rc Return status code
2194 static inline int xhci_evaluate_context ( struct xhci_device
*xhci
,
2195 struct xhci_slot
*slot
,
2196 struct xhci_endpoint
*endpoint
) {
2199 /* Configure endpoint */
2200 if ( ( rc
= xhci_context ( xhci
, slot
, endpoint
,
2201 XHCI_TRB_EVALUATE_CONTEXT
,
2202 xhci_evaluate_context_input
) ) != 0 )
2205 DBGC2 ( xhci
, "XHCI %s slot %d ctx %d (re-)evaluated\n",
2206 xhci
->name
, slot
->id
, endpoint
->ctx
);
2213 * @v xhci xHCI device
2214 * @v slot Device slot
2215 * @v endpoint Endpoint
2216 * @ret rc Return status code
2218 static inline int xhci_reset_endpoint ( struct xhci_device
*xhci
,
2219 struct xhci_slot
*slot
,
2220 struct xhci_endpoint
*endpoint
) {
2222 struct xhci_trb_reset_endpoint
*reset
= &trb
.reset
;
2225 /* Construct command */
2226 memset ( reset
, 0, sizeof ( *reset
) );
2227 reset
->slot
= slot
->id
;
2228 reset
->endpoint
= endpoint
->ctx
;
2229 reset
->type
= XHCI_TRB_RESET_ENDPOINT
;
2231 /* Issue command and wait for completion */
2232 if ( ( rc
= xhci_command ( xhci
, &trb
) ) != 0 ) {
2233 DBGC ( xhci
, "XHCI %s slot %d ctx %d could not reset endpoint "
2234 "in state %d: %s\n", xhci
->name
, slot
->id
, endpoint
->ctx
,
2235 endpoint
->context
->state
, strerror ( rc
) );
2245 * @v xhci xHCI device
2246 * @v slot Device slot
2247 * @v endpoint Endpoint
2248 * @ret rc Return status code
2250 static inline int xhci_stop_endpoint ( struct xhci_device
*xhci
,
2251 struct xhci_slot
*slot
,
2252 struct xhci_endpoint
*endpoint
) {
2254 struct xhci_trb_stop_endpoint
*stop
= &trb
.stop
;
2257 /* Construct command */
2258 memset ( stop
, 0, sizeof ( *stop
) );
2259 stop
->slot
= slot
->id
;
2260 stop
->endpoint
= endpoint
->ctx
;
2261 stop
->type
= XHCI_TRB_STOP_ENDPOINT
;
2263 /* Issue command and wait for completion */
2264 if ( ( rc
= xhci_command ( xhci
, &trb
) ) != 0 ) {
2265 DBGC ( xhci
, "XHCI %s slot %d ctx %d could not stop endpoint "
2266 "in state %d: %s\n", xhci
->name
, slot
->id
, endpoint
->ctx
,
2267 endpoint
->context
->state
, strerror ( rc
) );
2275 * Set transfer ring dequeue pointer
2277 * @v xhci xHCI device
2278 * @v slot Device slot
2279 * @v endpoint Endpoint
2280 * @ret rc Return status code
2283 xhci_set_tr_dequeue_pointer ( struct xhci_device
*xhci
,
2284 struct xhci_slot
*slot
,
2285 struct xhci_endpoint
*endpoint
) {
2287 struct xhci_trb_set_tr_dequeue_pointer
*dequeue
= &trb
.dequeue
;
2288 struct xhci_trb_ring
*ring
= &endpoint
->ring
;
2295 /* Construct command */
2296 memset ( dequeue
, 0, sizeof ( *dequeue
) );
2299 dcs
= ( ( ~( cons
>> ring
->shift
) ) & XHCI_EP_DCS
);
2300 index
= ( cons
& mask
);
2302 cpu_to_le64 ( virt_to_phys ( &ring
->trb
[index
] ) | dcs
);
2303 dequeue
->slot
= slot
->id
;
2304 dequeue
->endpoint
= endpoint
->ctx
;
2305 dequeue
->type
= XHCI_TRB_SET_TR_DEQUEUE_POINTER
;
2307 /* Issue command and wait for completion */
2308 if ( ( rc
= xhci_command ( xhci
, &trb
) ) != 0 ) {
2309 DBGC ( xhci
, "XHCI %s slot %d ctx %d could not set TR dequeue "
2310 "pointer in state %d: %s\n", xhci
->name
, slot
->id
,
2311 endpoint
->ctx
, endpoint
->context
->state
, strerror ( rc
));
2318 /******************************************************************************
2320 * Endpoint operations
2322 ******************************************************************************
2328 * @v ep USB endpoint
2329 * @ret rc Return status code
2331 static int xhci_endpoint_open ( struct usb_endpoint
*ep
) {
2332 struct usb_device
*usb
= ep
->usb
;
2333 struct xhci_slot
*slot
= usb_get_hostdata ( usb
);
2334 struct xhci_device
*xhci
= slot
->xhci
;
2335 struct xhci_endpoint
*endpoint
;
2338 unsigned int interval
;
2341 /* Calculate context index */
2342 ctx
= XHCI_CTX ( ep
->address
);
2343 assert ( slot
->endpoint
[ctx
] == NULL
);
2345 /* Calculate endpoint type */
2346 type
= XHCI_EP_TYPE ( ep
->attributes
& USB_ENDPOINT_ATTR_TYPE_MASK
);
2347 if ( type
== XHCI_EP_TYPE ( USB_ENDPOINT_ATTR_CONTROL
) )
2348 type
= XHCI_EP_TYPE_CONTROL
;
2349 if ( ep
->address
& USB_DIR_IN
)
2350 type
|= XHCI_EP_TYPE_IN
;
2352 /* Calculate interval */
2353 if ( type
& XHCI_EP_TYPE_PERIODIC
) {
2354 interval
= ( fls ( ep
->interval
) - 1 );
2356 interval
= ep
->interval
;
2359 /* Allocate and initialise structure */
2360 endpoint
= zalloc ( sizeof ( *endpoint
) );
2365 usb_endpoint_set_hostdata ( ep
, endpoint
);
2366 slot
->endpoint
[ctx
] = endpoint
;
2367 endpoint
->xhci
= xhci
;
2368 endpoint
->slot
= slot
;
2370 endpoint
->ctx
= ctx
;
2371 endpoint
->type
= type
;
2372 endpoint
->interval
= interval
;
2373 endpoint
->context
= ( ( ( void * ) slot
->context
) +
2374 xhci_device_context_offset ( xhci
, ctx
) );
2376 /* Allocate transfer ring */
2377 if ( ( rc
= xhci_ring_alloc ( xhci
, &endpoint
->ring
,
2378 XHCI_TRANSFER_TRBS_LOG2
,
2379 slot
->id
, ctx
, 0 ) ) != 0 )
2380 goto err_ring_alloc
;
2382 /* Configure endpoint, if applicable */
2383 if ( ( ctx
!= XHCI_CTX_EP0
) &&
2384 ( ( rc
= xhci_configure_endpoint ( xhci
, slot
, endpoint
) ) != 0 ))
2385 goto err_configure_endpoint
;
2387 DBGC2 ( xhci
, "XHCI %s slot %d ctx %d ring [%08lx,%08lx)\n",
2388 xhci
->name
, slot
->id
, ctx
, virt_to_phys ( endpoint
->ring
.trb
),
2389 ( virt_to_phys ( endpoint
->ring
.trb
) + endpoint
->ring
.len
) );
2392 xhci_deconfigure_endpoint ( xhci
, slot
, endpoint
);
2393 err_configure_endpoint
:
2394 xhci_ring_free ( &endpoint
->ring
);
2396 slot
->endpoint
[ctx
] = NULL
;
2405 * @v ep USB endpoint
2407 static void xhci_endpoint_close ( struct usb_endpoint
*ep
) {
2408 struct xhci_endpoint
*endpoint
= usb_endpoint_get_hostdata ( ep
);
2409 struct xhci_slot
*slot
= endpoint
->slot
;
2410 struct xhci_device
*xhci
= slot
->xhci
;
2411 struct io_buffer
*iobuf
;
2412 unsigned int ctx
= endpoint
->ctx
;
2414 /* Deconfigure endpoint, if applicable */
2415 if ( ctx
!= XHCI_CTX_EP0
)
2416 xhci_deconfigure_endpoint ( xhci
, slot
, endpoint
);
2418 /* Cancel any incomplete transfers */
2419 while ( xhci_ring_fill ( &endpoint
->ring
) ) {
2420 iobuf
= xhci_dequeue_multi ( &endpoint
->ring
);
2421 usb_complete_err ( ep
, iobuf
, -ECANCELED
);
2425 xhci_ring_free ( &endpoint
->ring
);
2426 slot
->endpoint
[ctx
] = NULL
;
2433 * @v ep USB endpoint
2434 * @ret rc Return status code
2436 static int xhci_endpoint_reset ( struct usb_endpoint
*ep
) {
2437 struct xhci_endpoint
*endpoint
= usb_endpoint_get_hostdata ( ep
);
2438 struct xhci_slot
*slot
= endpoint
->slot
;
2439 struct xhci_device
*xhci
= slot
->xhci
;
2442 /* Reset endpoint context */
2443 if ( ( rc
= xhci_reset_endpoint ( xhci
, slot
, endpoint
) ) != 0 )
2446 /* Set transfer ring dequeue pointer */
2447 if ( ( rc
= xhci_set_tr_dequeue_pointer ( xhci
, slot
, endpoint
) ) != 0)
2450 /* Ring doorbell to resume processing */
2451 xhci_doorbell ( &endpoint
->ring
);
2453 DBGC ( xhci
, "XHCI %s slot %d ctx %d reset\n",
2454 xhci
->name
, slot
->id
, endpoint
->ctx
);
2461 * @v ep USB endpoint
2462 * @ret rc Return status code
2464 static int xhci_endpoint_mtu ( struct usb_endpoint
*ep
) {
2465 struct xhci_endpoint
*endpoint
= usb_endpoint_get_hostdata ( ep
);
2466 struct xhci_slot
*slot
= endpoint
->slot
;
2467 struct xhci_device
*xhci
= slot
->xhci
;
2470 /* Evalulate context */
2471 if ( ( rc
= xhci_evaluate_context ( xhci
, slot
, endpoint
) ) != 0 )
2478 * Enqueue message transfer
2480 * @v ep USB endpoint
2481 * @v iobuf I/O buffer
2482 * @ret rc Return status code
2484 static int xhci_endpoint_message ( struct usb_endpoint
*ep
,
2485 struct io_buffer
*iobuf
) {
2486 struct xhci_endpoint
*endpoint
= usb_endpoint_get_hostdata ( ep
);
2487 struct usb_setup_packet
*packet
;
2490 union xhci_trb trbs
[ 1 /* setup */ + 1 /* possible data */ +
2492 union xhci_trb
*trb
= trbs
;
2493 struct xhci_trb_setup
*setup
;
2494 struct xhci_trb_data
*data
;
2495 struct xhci_trb_status
*status
;
2498 /* Profile message transfers */
2499 profile_start ( &xhci_message_profiler
);
2501 /* Construct setup stage TRB */
2502 memset ( trbs
, 0, sizeof ( trbs
) );
2503 assert ( iob_len ( iobuf
) >= sizeof ( *packet
) );
2504 packet
= iobuf
->data
;
2505 iob_pull ( iobuf
, sizeof ( *packet
) );
2506 setup
= &(trb
++)->setup
;
2507 memcpy ( &setup
->packet
, packet
, sizeof ( setup
->packet
) );
2508 setup
->len
= cpu_to_le32 ( sizeof ( *packet
) );
2509 setup
->flags
= XHCI_TRB_IDT
;
2510 setup
->type
= XHCI_TRB_SETUP
;
2511 len
= iob_len ( iobuf
);
2512 input
= ( packet
->request
& cpu_to_le16 ( USB_DIR_IN
) );
2514 setup
->direction
= ( input ? XHCI_SETUP_IN
: XHCI_SETUP_OUT
);
2516 /* Construct data stage TRB, if applicable */
2518 data
= &(trb
++)->data
;
2519 data
->data
= cpu_to_le64 ( virt_to_phys ( iobuf
->data
) );
2520 data
->len
= cpu_to_le32 ( len
);
2521 data
->type
= XHCI_TRB_DATA
;
2522 data
->direction
= ( input ? XHCI_DATA_IN
: XHCI_DATA_OUT
);
2525 /* Construct status stage TRB */
2526 status
= &(trb
++)->status
;
2527 status
->flags
= XHCI_TRB_IOC
;
2528 status
->type
= XHCI_TRB_STATUS
;
2530 ( ( len
&& input
) ? XHCI_STATUS_OUT
: XHCI_STATUS_IN
);
2533 if ( ( rc
= xhci_enqueue_multi ( &endpoint
->ring
, iobuf
, trbs
,
2534 ( trb
- trbs
) ) ) != 0 )
2537 /* Ring the doorbell */
2538 xhci_doorbell ( &endpoint
->ring
);
2540 profile_stop ( &xhci_message_profiler
);
2545 * Enqueue stream transfer
2547 * @v ep USB endpoint
2548 * @v iobuf I/O buffer
2549 * @v terminate Terminate using a short packet
2550 * @ret rc Return status code
2552 static int xhci_endpoint_stream ( struct usb_endpoint
*ep
,
2553 struct io_buffer
*iobuf
, int terminate
) {
2554 struct xhci_endpoint
*endpoint
= usb_endpoint_get_hostdata ( ep
);
2555 union xhci_trb trbs
[ 1 /* Normal */ + 1 /* Possible zero-length */ ];
2556 union xhci_trb
*trb
= trbs
;
2557 struct xhci_trb_normal
*normal
;
2558 size_t len
= iob_len ( iobuf
);
2561 /* Profile stream transfers */
2562 profile_start ( &xhci_stream_profiler
);
2564 /* Construct normal TRBs */
2565 memset ( &trbs
, 0, sizeof ( trbs
) );
2566 normal
= &(trb
++)->normal
;
2567 normal
->data
= cpu_to_le64 ( virt_to_phys ( iobuf
->data
) );
2568 normal
->len
= cpu_to_le32 ( len
);
2569 normal
->type
= XHCI_TRB_NORMAL
;
2570 if ( terminate
&& ( ( len
& ( ep
->mtu
- 1 ) ) == 0 ) ) {
2571 normal
->flags
= XHCI_TRB_CH
;
2572 normal
= &(trb
++)->normal
;
2573 normal
->type
= XHCI_TRB_NORMAL
;
2575 normal
->flags
= XHCI_TRB_IOC
;
2578 if ( ( rc
= xhci_enqueue_multi ( &endpoint
->ring
, iobuf
, trbs
,
2579 ( trb
- trbs
) ) ) != 0 )
2582 /* Ring the doorbell */
2583 xhci_doorbell ( &endpoint
->ring
);
2585 profile_stop ( &xhci_stream_profiler
);
2589 /******************************************************************************
2593 ******************************************************************************
2600 * @ret rc Return status code
2602 static int xhci_device_open ( struct usb_device
*usb
) {
2603 struct xhci_device
*xhci
= usb_bus_get_hostdata ( usb
->port
->hub
->bus
);
2604 struct usb_port
*tt
= usb_transaction_translator ( usb
);
2605 struct xhci_slot
*slot
;
2606 struct xhci_slot
*tt_slot
;
2612 /* Determine applicable slot type */
2613 type
= xhci_port_slot_type ( xhci
, usb
->port
->address
);
2616 DBGC ( xhci
, "XHCI %s-%d has no slot type\n",
2617 xhci
->name
, usb
->port
->address
);
2621 /* Allocate a device slot number */
2622 id
= xhci_enable_slot ( xhci
, type
);
2625 goto err_enable_slot
;
2627 assert ( ( id
> 0 ) && ( ( unsigned int ) id
<= xhci
->slots
) );
2628 assert ( xhci
->slot
[id
] == NULL
);
2630 /* Allocate and initialise structure */
2631 slot
= zalloc ( sizeof ( *slot
) );
2636 usb_set_hostdata ( usb
, slot
);
2637 xhci
->slot
[id
] = slot
;
2642 tt_slot
= usb_get_hostdata ( tt
->hub
->usb
);
2643 slot
->tt_id
= tt_slot
->id
;
2644 slot
->tt_port
= tt
->address
;
2647 /* Allocate a device context */
2648 len
= xhci_device_context_offset ( xhci
, XHCI_CTX_END
);
2649 slot
->context
= malloc_dma ( len
, xhci_align ( len
) );
2650 if ( ! slot
->context
) {
2652 goto err_alloc_context
;
2654 memset ( slot
->context
, 0, len
);
2656 /* Set device context base address */
2657 assert ( xhci
->dcbaa
[id
] == 0 );
2658 xhci
->dcbaa
[id
] = cpu_to_le64 ( virt_to_phys ( slot
->context
) );
2660 DBGC2 ( xhci
, "XHCI %s slot %d device context [%08lx,%08lx) for %s\n",
2661 xhci
->name
, slot
->id
, virt_to_phys ( slot
->context
),
2662 ( virt_to_phys ( slot
->context
) + len
), usb
->name
);
2665 xhci
->dcbaa
[id
] = 0;
2666 free_dma ( slot
->context
, len
);
2668 xhci
->slot
[id
] = NULL
;
2671 xhci_disable_slot ( xhci
, id
);
2682 static void xhci_device_close ( struct usb_device
*usb
) {
2683 struct xhci_slot
*slot
= usb_get_hostdata ( usb
);
2684 struct xhci_device
*xhci
= slot
->xhci
;
2685 size_t len
= xhci_device_context_offset ( xhci
, XHCI_CTX_END
);
2686 unsigned int id
= slot
->id
;
2690 if ( ( rc
= xhci_disable_slot ( xhci
, id
) ) != 0 ) {
2691 /* Slot is still enabled. Leak the slot context,
2692 * since the controller may still write to this
2693 * memory, and leave the DCBAA entry intact.
2695 * If the controller later reports that this same slot
2696 * has been re-enabled, then some assertions will be
2699 DBGC ( xhci
, "XHCI %s slot %d leaking context memory\n",
2700 xhci
->name
, slot
->id
);
2701 slot
->context
= NULL
;
2705 if ( slot
->context
) {
2706 free_dma ( slot
->context
, len
);
2707 xhci
->dcbaa
[id
] = 0;
2709 xhci
->slot
[id
] = NULL
;
2714 * Assign device address
2717 * @ret rc Return status code
2719 static int xhci_device_address ( struct usb_device
*usb
) {
2720 struct xhci_slot
*slot
= usb_get_hostdata ( usb
);
2721 struct xhci_device
*xhci
= slot
->xhci
;
2722 struct usb_port
*port
= usb
->port
;
2723 struct usb_port
*root_port
;
2727 /* Calculate route string */
2728 slot
->route
= usb_route_string ( usb
);
2730 /* Calculate root hub port number */
2731 root_port
= usb_root_hub_port ( usb
);
2732 slot
->port
= root_port
->address
;
2734 /* Calculate protocol speed ID */
2735 psiv
= xhci_port_psiv ( xhci
, slot
->port
, port
->speed
);
2742 /* Address device */
2743 if ( ( rc
= xhci_address_device ( xhci
, slot
) ) != 0 )
2749 /******************************************************************************
2753 ******************************************************************************
2760 * @ret rc Return status code
2762 static int xhci_bus_open ( struct usb_bus
*bus
) {
2763 struct xhci_device
*xhci
= usb_bus_get_hostdata ( bus
);
2766 /* Allocate device slot array */
2767 xhci
->slot
= zalloc ( ( xhci
->slots
+ 1 ) * sizeof ( xhci
->slot
[0] ) );
2768 if ( ! xhci
->slot
) {
2770 goto err_slot_alloc
;
2773 /* Allocate device context base address array */
2774 if ( ( rc
= xhci_dcbaa_alloc ( xhci
) ) != 0 )
2775 goto err_dcbaa_alloc
;
2777 /* Allocate scratchpad buffers */
2778 if ( ( rc
= xhci_scratchpad_alloc ( xhci
) ) != 0 )
2779 goto err_scratchpad_alloc
;
2781 /* Allocate command ring */
2782 if ( ( rc
= xhci_command_alloc ( xhci
) ) != 0 )
2783 goto err_command_alloc
;
2785 /* Allocate event ring */
2786 if ( ( rc
= xhci_event_alloc ( xhci
) ) != 0 )
2787 goto err_event_alloc
;
2789 /* Start controller */
2795 xhci_event_free ( xhci
);
2797 xhci_command_free ( xhci
);
2799 xhci_scratchpad_free ( xhci
);
2800 err_scratchpad_alloc
:
2801 xhci_dcbaa_free ( xhci
);
2803 free ( xhci
->slot
);
2813 static void xhci_bus_close ( struct usb_bus
*bus
) {
2814 struct xhci_device
*xhci
= usb_bus_get_hostdata ( bus
);
2818 assert ( xhci
->slot
!= NULL
);
2819 for ( i
= 0 ; i
<= xhci
->slots
; i
++ )
2820 assert ( xhci
->slot
[i
] == NULL
);
2823 xhci_event_free ( xhci
);
2824 xhci_command_free ( xhci
);
2825 xhci_scratchpad_free ( xhci
);
2826 xhci_dcbaa_free ( xhci
);
2827 free ( xhci
->slot
);
2835 static void xhci_bus_poll ( struct usb_bus
*bus
) {
2836 struct xhci_device
*xhci
= usb_bus_get_hostdata ( bus
);
2838 /* Poll event ring */
2839 xhci_event_poll ( xhci
);
2842 /******************************************************************************
2846 ******************************************************************************
2853 * @ret rc Return status code
2855 static int xhci_hub_open ( struct usb_hub
*hub
) {
2856 struct xhci_slot
*slot
;
2858 /* Do nothing if this is the root hub */
2862 /* Get device slot */
2863 slot
= usb_get_hostdata ( hub
->usb
);
2865 /* Update device slot hub parameters. We don't inform the
2866 * hardware of this information until the hub's interrupt
2867 * endpoint is opened, since the only mechanism for so doing
2868 * provided by the xHCI specification is a Configure Endpoint
2869 * command, and we can't issue that command until we have a
2870 * non-EP0 endpoint to configure.
2872 slot
->ports
= hub
->ports
;
2882 static void xhci_hub_close ( struct usb_hub
*hub __unused
) {
2887 /******************************************************************************
2889 * Root hub operations
2891 ******************************************************************************
2898 * @ret rc Return status code
2900 static int xhci_root_open ( struct usb_hub
*hub
) {
2901 struct usb_bus
*bus
= hub
->bus
;
2902 struct xhci_device
*xhci
= usb_bus_get_hostdata ( bus
);
2903 struct usb_port
*port
;
2907 /* Enable power to all ports */
2908 for ( i
= 1 ; i
<= xhci
->ports
; i
++ ) {
2909 portsc
= readl ( xhci
->op
+ XHCI_OP_PORTSC ( i
) );
2910 portsc
&= XHCI_PORTSC_PRESERVE
;
2911 portsc
|= XHCI_PORTSC_PP
;
2912 writel ( portsc
, xhci
->op
+ XHCI_OP_PORTSC ( i
) );
2915 /* xHCI spec requires us to potentially wait 20ms after
2916 * enabling power to a port.
2918 mdelay ( XHCI_PORT_POWER_DELAY_MS
);
2920 /* USB3 ports may power up as Disabled */
2921 for ( i
= 1 ; i
<= xhci
->ports
; i
++ ) {
2922 portsc
= readl ( xhci
->op
+ XHCI_OP_PORTSC ( i
) );
2923 port
= usb_port ( hub
, i
);
2924 if ( ( port
->protocol
>= USB_PROTO_3_0
) &&
2925 ( ( portsc
& XHCI_PORTSC_PLS_MASK
) ==
2926 XHCI_PORTSC_PLS_DISABLED
) ) {
2927 /* Force link state to RxDetect */
2928 portsc
&= XHCI_PORTSC_PRESERVE
;
2929 portsc
|= ( XHCI_PORTSC_PLS_RXDETECT
| XHCI_PORTSC_LWS
);
2930 writel ( portsc
, xhci
->op
+ XHCI_OP_PORTSC ( i
) );
2934 /* Some xHCI cards seem to require an additional delay after
2935 * setting the link state to RxDetect.
2937 mdelay ( XHCI_LINK_STATE_DELAY_MS
);
2939 /* Record hub driver private data */
2940 usb_hub_set_drvdata ( hub
, xhci
);
2950 static void xhci_root_close ( struct usb_hub
*hub
) {
2952 /* Clear hub driver private data */
2953 usb_hub_set_drvdata ( hub
, NULL
);
2961 * @ret rc Return status code
2963 static int xhci_root_enable ( struct usb_hub
*hub
, struct usb_port
*port
) {
2964 struct xhci_device
*xhci
= usb_hub_get_drvdata ( hub
);
2969 portsc
= readl ( xhci
->op
+ XHCI_OP_PORTSC ( port
->address
) );
2970 portsc
&= XHCI_PORTSC_PRESERVE
;
2971 portsc
|= XHCI_PORTSC_PR
;
2972 writel ( portsc
, xhci
->op
+ XHCI_OP_PORTSC ( port
->address
) );
2974 /* Wait for port to become enabled */
2975 for ( i
= 0 ; i
< XHCI_PORT_RESET_MAX_WAIT_MS
; i
++ ) {
2977 /* Check port status */
2978 portsc
= readl ( xhci
->op
+ XHCI_OP_PORTSC ( port
->address
) );
2979 if ( portsc
& XHCI_PORTSC_PED
)
2986 DBGC ( xhci
, "XHCI %s-%d timed out waiting for port to enable\n",
2987 xhci
->name
, port
->address
);
2996 * @ret rc Return status code
2998 static int xhci_root_disable ( struct usb_hub
*hub
, struct usb_port
*port
) {
2999 struct xhci_device
*xhci
= usb_hub_get_drvdata ( hub
);
3003 portsc
= readl ( xhci
->op
+ XHCI_OP_PORTSC ( port
->address
) );
3004 portsc
&= XHCI_PORTSC_PRESERVE
;
3005 portsc
|= XHCI_PORTSC_PED
;
3006 writel ( portsc
, xhci
->op
+ XHCI_OP_PORTSC ( port
->address
) );
3012 * Update root hub port speed
3016 * @ret rc Return status code
3018 static int xhci_root_speed ( struct usb_hub
*hub
, struct usb_port
*port
) {
3019 struct xhci_device
*xhci
= usb_hub_get_drvdata ( hub
);
3028 /* Read port status */
3029 portsc
= readl ( xhci
->op
+ XHCI_OP_PORTSC ( port
->address
) );
3030 DBGC2 ( xhci
, "XHCI %s-%d status is %08x\n",
3031 xhci
->name
, port
->address
, portsc
);
3032 ccs
= ( portsc
& XHCI_PORTSC_CCS
);
3033 ped
= ( portsc
& XHCI_PORTSC_PED
);
3034 csc
= ( portsc
& XHCI_PORTSC_CSC
);
3035 psiv
= XHCI_PORTSC_PSIV ( portsc
);
3037 /* Record disconnections and clear changes */
3038 port
->disconnected
|= csc
;
3039 portsc
&= ( XHCI_PORTSC_PRESERVE
| XHCI_PORTSC_CHANGE
);
3040 writel ( portsc
, xhci
->op
+ XHCI_OP_PORTSC ( port
->address
) );
3042 /* Port speed is not valid unless port is connected */
3044 port
->speed
= USB_SPEED_NONE
;
3048 /* For USB2 ports, the PSIV field is not valid until the port
3049 * completes reset and becomes enabled.
3051 if ( ( port
->protocol
< USB_PROTO_3_0
) && ! ped
) {
3052 port
->speed
= USB_SPEED_FULL
;
3056 /* Get port speed and map to generic USB speed */
3057 speed
= xhci_port_speed ( xhci
, port
->address
, psiv
);
3063 port
->speed
= speed
;
3068 * Clear transaction translator buffer
3072 * @v ep USB endpoint
3073 * @ret rc Return status code
3075 static int xhci_root_clear_tt ( struct usb_hub
*hub
, struct usb_port
*port
,
3076 struct usb_endpoint
*ep
) {
3077 struct xhci_device
*xhci
= usb_hub_get_drvdata ( hub
);
3079 /* Should never be called; this is a root hub */
3080 DBGC ( xhci
, "XHCI %s-%d nonsensical CLEAR_TT for %s %s\n", xhci
->name
,
3081 port
->address
, ep
->usb
->name
, usb_endpoint_name ( ep
) );
3086 /******************************************************************************
3090 ******************************************************************************
3093 /** USB host controller operations */
3094 static struct usb_host_operations xhci_operations
= {
3096 .open
= xhci_endpoint_open
,
3097 .close
= xhci_endpoint_close
,
3098 .reset
= xhci_endpoint_reset
,
3099 .mtu
= xhci_endpoint_mtu
,
3100 .message
= xhci_endpoint_message
,
3101 .stream
= xhci_endpoint_stream
,
3104 .open
= xhci_device_open
,
3105 .close
= xhci_device_close
,
3106 .address
= xhci_device_address
,
3109 .open
= xhci_bus_open
,
3110 .close
= xhci_bus_close
,
3111 .poll
= xhci_bus_poll
,
3114 .open
= xhci_hub_open
,
3115 .close
= xhci_hub_close
,
3118 .open
= xhci_root_open
,
3119 .close
= xhci_root_close
,
3120 .enable
= xhci_root_enable
,
3121 .disable
= xhci_root_disable
,
3122 .speed
= xhci_root_speed
,
3123 .clear_tt
= xhci_root_clear_tt
,
3128 * Fix Intel PCH-specific quirks
3130 * @v xhci xHCI device
3133 static void xhci_pch_fix ( struct xhci_device
*xhci
, struct pci_device
*pci
) {
3134 struct xhci_pch
*pch
= &xhci
->pch
;
3140 /* Enable SuperSpeed capability. Do this before rerouting
3141 * USB2 ports, so that USB3 devices connect at SuperSpeed.
3143 pci_read_config_dword ( pci
, XHCI_PCH_USB3PSSEN
, &usb3pssen
);
3144 pci_read_config_dword ( pci
, XHCI_PCH_USB3PRM
, &usb3prm
);
3145 if ( usb3prm
& ~usb3pssen
) {
3146 DBGC ( xhci
, "XHCI %s enabling SuperSpeed on ports %08x\n",
3147 xhci
->name
, ( usb3prm
& ~usb3pssen
) );
3149 pch
->usb3pssen
= usb3pssen
;
3150 usb3pssen
|= usb3prm
;
3151 pci_write_config_dword ( pci
, XHCI_PCH_USB3PSSEN
, usb3pssen
);
3153 /* Route USB2 ports from EHCI to xHCI */
3154 pci_read_config_dword ( pci
, XHCI_PCH_XUSB2PR
, &xusb2pr
);
3155 pci_read_config_dword ( pci
, XHCI_PCH_XUSB2PRM
, &xusb2prm
);
3156 if ( xusb2prm
& ~xusb2pr
) {
3157 DBGC ( xhci
, "XHCI %s routing ports %08x from EHCI to xHCI\n",
3158 xhci
->name
, ( xusb2prm
& ~xusb2pr
) );
3160 pch
->xusb2pr
= xusb2pr
;
3161 xusb2pr
|= xusb2prm
;
3162 pci_write_config_dword ( pci
, XHCI_PCH_XUSB2PR
, xusb2pr
);
3166 * Undo Intel PCH-specific quirk fixes
3168 * @v xhci xHCI device
3171 static void xhci_pch_undo ( struct xhci_device
*xhci
, struct pci_device
*pci
) {
3172 struct xhci_pch
*pch
= &xhci
->pch
;
3174 /* Restore USB2 port routing to original state */
3175 pci_write_config_dword ( pci
, XHCI_PCH_XUSB2PR
, pch
->xusb2pr
);
3177 /* Restore SuperSpeed capability to original state */
3178 pci_write_config_dword ( pci
, XHCI_PCH_USB3PSSEN
, pch
->usb3pssen
);
3185 * @ret rc Return status code
3187 static int xhci_probe ( struct pci_device
*pci
) {
3188 struct xhci_device
*xhci
;
3189 struct usb_port
*port
;
3190 unsigned long bar_start
;
3195 /* Allocate and initialise structure */
3196 xhci
= zalloc ( sizeof ( *xhci
) );
3201 xhci
->name
= pci
->dev
.name
;
3202 xhci
->quirks
= pci
->id
->driver_data
;
3204 /* Fix up PCI device */
3205 adjust_pci_device ( pci
);
3208 bar_start
= pci_bar_start ( pci
, XHCI_BAR
);
3209 bar_size
= pci_bar_size ( pci
, XHCI_BAR
);
3210 xhci
->regs
= ioremap ( bar_start
, bar_size
);
3211 if ( ! xhci
->regs
) {
3216 /* Initialise xHCI device */
3217 xhci_init ( xhci
, xhci
->regs
);
3219 /* Initialise USB legacy support and claim ownership */
3220 xhci_legacy_init ( xhci
);
3221 xhci_legacy_claim ( xhci
);
3223 /* Fix Intel PCH-specific quirks, if applicable */
3224 if ( xhci
->quirks
& XHCI_PCH
)
3225 xhci_pch_fix ( xhci
, pci
);
3228 if ( ( rc
= xhci_reset ( xhci
) ) != 0 )
3231 /* Allocate USB bus */
3232 xhci
->bus
= alloc_usb_bus ( &pci
->dev
, xhci
->ports
, XHCI_MTU
,
3234 if ( ! xhci
->bus
) {
3238 usb_bus_set_hostdata ( xhci
->bus
, xhci
);
3239 usb_hub_set_drvdata ( xhci
->bus
->hub
, xhci
);
3241 /* Set port protocols */
3242 for ( i
= 1 ; i
<= xhci
->ports
; i
++ ) {
3243 port
= usb_port ( xhci
->bus
->hub
, i
);
3244 port
->protocol
= xhci_port_protocol ( xhci
, i
);
3247 /* Register USB bus */
3248 if ( ( rc
= register_usb_bus ( xhci
->bus
) ) != 0 )
3251 pci_set_drvdata ( pci
, xhci
);
3254 unregister_usb_bus ( xhci
->bus
);
3256 free_usb_bus ( xhci
->bus
);
3258 xhci_reset ( xhci
);
3260 if ( xhci
->quirks
& XHCI_PCH
)
3261 xhci_pch_undo ( xhci
, pci
);
3262 xhci_legacy_release ( xhci
);
3263 iounmap ( xhci
->regs
);
3275 static void xhci_remove ( struct pci_device
*pci
) {
3276 struct xhci_device
*xhci
= pci_get_drvdata ( pci
);
3277 struct usb_bus
*bus
= xhci
->bus
;
3279 unregister_usb_bus ( bus
);
3280 free_usb_bus ( bus
);
3281 xhci_reset ( xhci
);
3282 if ( xhci
->quirks
& XHCI_PCH
)
3283 xhci_pch_undo ( xhci
, pci
);
3284 xhci_legacy_release ( xhci
);
3285 iounmap ( xhci
->regs
);
3289 /** XHCI PCI device IDs */
3290 static struct pci_device_id xhci_ids
[] = {
3291 PCI_ROM ( 0x8086, 0x9d2f, "xhci-skylake", "xHCI (Skylake)", ( XHCI_PCH
| XHCI_BAD_PSIV
) ),
3292 PCI_ROM ( 0x8086, 0xffff, "xhci-pch", "xHCI (Intel PCH)", XHCI_PCH
),
3293 PCI_ROM ( 0xffff, 0xffff, "xhci", "xHCI", 0 ),
3296 /** XHCI PCI driver */
3297 struct pci_driver xhci_driver __pci_driver
= {
3299 .id_count
= ( sizeof ( xhci_ids
) / sizeof ( xhci_ids
[0] ) ),
3300 .class = PCI_CLASS_ID ( PCI_CLASS_SERIAL
, PCI_CLASS_SERIAL_USB
,
3301 PCI_CLASS_SERIAL_USB_XHCI
),
3302 .probe
= xhci_probe
,
3303 .remove
= xhci_remove
,
3309 * @v booting System is shutting down for OS boot
3311 static void xhci_shutdown ( int booting
) {
3312 /* If we are shutting down to boot an OS, then prevent the
3313 * release of ownership back to BIOS.
3315 xhci_legacy_prevent_release
= booting
;
3318 /** Startup/shutdown function */
3319 struct startup_fn xhci_startup
__startup_fn ( STARTUP_LATE
) = {
3320 .shutdown
= xhci_shutdown
,