[xhci] Always reset root hub ports
[ipxe.git] / src / drivers / usb / xhci.c
1 /*
2 * Copyright (C) 2014 Michael Brown <mbrown@fensystems.co.uk>.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA.
18 *
19 * You can also choose to distribute this program under the terms of
20 * the Unmodified Binary Distribution Licence (as given in the file
21 * COPYING.UBDL), provided that you have satisfied its requirements.
22 */
23
24 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
25
26 #include <stdlib.h>
27 #include <stdio.h>
28 #include <unistd.h>
29 #include <string.h>
30 #include <strings.h>
31 #include <errno.h>
32 #include <byteswap.h>
33 #include <ipxe/malloc.h>
34 #include <ipxe/umalloc.h>
35 #include <ipxe/pci.h>
36 #include <ipxe/usb.h>
37 #include <ipxe/init.h>
38 #include <ipxe/profile.h>
39 #include "xhci.h"
40
41 /** @file
42 *
43 * USB eXtensible Host Controller Interface (xHCI) driver
44 *
45 */
46
47 /** Message transfer profiler */
48 static struct profiler xhci_message_profiler __profiler =
49 { .name = "xhci.message" };
50
51 /** Stream transfer profiler */
52 static struct profiler xhci_stream_profiler __profiler =
53 { .name = "xhci.stream" };
54
55 /** Event ring profiler */
56 static struct profiler xhci_event_profiler __profiler =
57 { .name = "xhci.event" };
58
59 /** Transfer event profiler */
60 static struct profiler xhci_transfer_profiler __profiler =
61 { .name = "xhci.transfer" };
62
63 /* Disambiguate the various error causes */
64 #define EIO_DATA \
65 __einfo_error ( EINFO_EIO_DATA )
66 #define EINFO_EIO_DATA \
67 __einfo_uniqify ( EINFO_EIO, ( 2 - 0 ), \
68 "Data buffer error" )
69 #define EIO_BABBLE \
70 __einfo_error ( EINFO_EIO_BABBLE )
71 #define EINFO_EIO_BABBLE \
72 __einfo_uniqify ( EINFO_EIO, ( 3 - 0 ), \
73 "Babble detected" )
74 #define EIO_USB \
75 __einfo_error ( EINFO_EIO_USB )
76 #define EINFO_EIO_USB \
77 __einfo_uniqify ( EINFO_EIO, ( 4 - 0 ), \
78 "USB transaction error" )
79 #define EIO_TRB \
80 __einfo_error ( EINFO_EIO_TRB )
81 #define EINFO_EIO_TRB \
82 __einfo_uniqify ( EINFO_EIO, ( 5 - 0 ), \
83 "TRB error" )
84 #define EIO_STALL \
85 __einfo_error ( EINFO_EIO_STALL )
86 #define EINFO_EIO_STALL \
87 __einfo_uniqify ( EINFO_EIO, ( 6 - 0 ), \
88 "Stall error" )
89 #define EIO_RESOURCE \
90 __einfo_error ( EINFO_EIO_RESOURCE )
91 #define EINFO_EIO_RESOURCE \
92 __einfo_uniqify ( EINFO_EIO, ( 7 - 0 ), \
93 "Resource error" )
94 #define EIO_BANDWIDTH \
95 __einfo_error ( EINFO_EIO_BANDWIDTH )
96 #define EINFO_EIO_BANDWIDTH \
97 __einfo_uniqify ( EINFO_EIO, ( 8 - 0 ), \
98 "Bandwidth error" )
99 #define EIO_NO_SLOTS \
100 __einfo_error ( EINFO_EIO_NO_SLOTS )
101 #define EINFO_EIO_NO_SLOTS \
102 __einfo_uniqify ( EINFO_EIO, ( 9 - 0 ), \
103 "No slots available" )
104 #define EIO_STREAM_TYPE \
105 __einfo_error ( EINFO_EIO_STREAM_TYPE )
106 #define EINFO_EIO_STREAM_TYPE \
107 __einfo_uniqify ( EINFO_EIO, ( 10 - 0 ), \
108 "Invalid stream type" )
109 #define EIO_SLOT \
110 __einfo_error ( EINFO_EIO_SLOT )
111 #define EINFO_EIO_SLOT \
112 __einfo_uniqify ( EINFO_EIO, ( 11 - 0 ), \
113 "Slot not enabled" )
114 #define EIO_ENDPOINT \
115 __einfo_error ( EINFO_EIO_ENDPOINT )
116 #define EINFO_EIO_ENDPOINT \
117 __einfo_uniqify ( EINFO_EIO, ( 12 - 0 ), \
118 "Endpoint not enabled" )
119 #define EIO_SHORT \
120 __einfo_error ( EINFO_EIO_SHORT )
121 #define EINFO_EIO_SHORT \
122 __einfo_uniqify ( EINFO_EIO, ( 13 - 0 ), \
123 "Short packet" )
124 #define EIO_UNDERRUN \
125 __einfo_error ( EINFO_EIO_UNDERRUN )
126 #define EINFO_EIO_UNDERRUN \
127 __einfo_uniqify ( EINFO_EIO, ( 14 - 0 ), \
128 "Ring underrun" )
129 #define EIO_OVERRUN \
130 __einfo_error ( EINFO_EIO_OVERRUN )
131 #define EINFO_EIO_OVERRUN \
132 __einfo_uniqify ( EINFO_EIO, ( 15 - 0 ), \
133 "Ring overrun" )
134 #define EIO_VF_RING_FULL \
135 __einfo_error ( EINFO_EIO_VF_RING_FULL )
136 #define EINFO_EIO_VF_RING_FULL \
137 __einfo_uniqify ( EINFO_EIO, ( 16 - 0 ), \
138 "Virtual function event ring full" )
139 #define EIO_PARAMETER \
140 __einfo_error ( EINFO_EIO_PARAMETER )
141 #define EINFO_EIO_PARAMETER \
142 __einfo_uniqify ( EINFO_EIO, ( 17 - 0 ), \
143 "Parameter error" )
144 #define EIO_BANDWIDTH_OVERRUN \
145 __einfo_error ( EINFO_EIO_BANDWIDTH_OVERRUN )
146 #define EINFO_EIO_BANDWIDTH_OVERRUN \
147 __einfo_uniqify ( EINFO_EIO, ( 18 - 0 ), \
148 "Bandwidth overrun" )
149 #define EIO_CONTEXT \
150 __einfo_error ( EINFO_EIO_CONTEXT )
151 #define EINFO_EIO_CONTEXT \
152 __einfo_uniqify ( EINFO_EIO, ( 19 - 0 ), \
153 "Context state error" )
154 #define EIO_NO_PING \
155 __einfo_error ( EINFO_EIO_NO_PING )
156 #define EINFO_EIO_NO_PING \
157 __einfo_uniqify ( EINFO_EIO, ( 20 - 0 ), \
158 "No ping response" )
159 #define EIO_RING_FULL \
160 __einfo_error ( EINFO_EIO_RING_FULL )
161 #define EINFO_EIO_RING_FULL \
162 __einfo_uniqify ( EINFO_EIO, ( 21 - 0 ), \
163 "Event ring full" )
164 #define EIO_INCOMPATIBLE \
165 __einfo_error ( EINFO_EIO_INCOMPATIBLE )
166 #define EINFO_EIO_INCOMPATIBLE \
167 __einfo_uniqify ( EINFO_EIO, ( 22 - 0 ), \
168 "Incompatible device" )
169 #define EIO_MISSED \
170 __einfo_error ( EINFO_EIO_MISSED )
171 #define EINFO_EIO_MISSED \
172 __einfo_uniqify ( EINFO_EIO, ( 23 - 0 ), \
173 "Missed service error" )
174 #define EIO_CMD_STOPPED \
175 __einfo_error ( EINFO_EIO_CMD_STOPPED )
176 #define EINFO_EIO_CMD_STOPPED \
177 __einfo_uniqify ( EINFO_EIO, ( 24 - 0 ), \
178 "Command ring stopped" )
179 #define EIO_CMD_ABORTED \
180 __einfo_error ( EINFO_EIO_CMD_ABORTED )
181 #define EINFO_EIO_CMD_ABORTED \
182 __einfo_uniqify ( EINFO_EIO, ( 25 - 0 ), \
183 "Command aborted" )
184 #define EIO_STOP \
185 __einfo_error ( EINFO_EIO_STOP )
186 #define EINFO_EIO_STOP \
187 __einfo_uniqify ( EINFO_EIO, ( 26 - 0 ), \
188 "Stopped" )
189 #define EIO_STOP_LEN \
190 __einfo_error ( EINFO_EIO_STOP_LEN )
191 #define EINFO_EIO_STOP_LEN \
192 __einfo_uniqify ( EINFO_EIO, ( 27 - 0 ), \
193 "Stopped - length invalid" )
194 #define EIO_STOP_SHORT \
195 __einfo_error ( EINFO_EIO_STOP_SHORT )
196 #define EINFO_EIO_STOP_SHORT \
197 __einfo_uniqify ( EINFO_EIO, ( 28 - 0 ), \
198 "Stopped - short packet" )
199 #define EIO_LATENCY \
200 __einfo_error ( EINFO_EIO_LATENCY )
201 #define EINFO_EIO_LATENCY \
202 __einfo_uniqify ( EINFO_EIO, ( 29 - 0 ), \
203 "Maximum exit latency too large" )
204 #define EIO_ISOCH \
205 __einfo_error ( EINFO_EIO_ISOCH )
206 #define EINFO_EIO_ISOCH \
207 __einfo_uniqify ( EINFO_EIO, ( 31 - 0 ), \
208 "Isochronous buffer overrun" )
209 #define EPROTO_LOST \
210 __einfo_error ( EINFO_EPROTO_LOST )
211 #define EINFO_EPROTO_LOST \
212 __einfo_uniqify ( EINFO_EPROTO, ( 32 - 32 ), \
213 "Event lost" )
214 #define EPROTO_UNDEFINED \
215 __einfo_error ( EINFO_EPROTO_UNDEFINED )
216 #define EINFO_EPROTO_UNDEFINED \
217 __einfo_uniqify ( EINFO_EPROTO, ( 33 - 32 ), \
218 "Undefined error" )
219 #define EPROTO_STREAM_ID \
220 __einfo_error ( EINFO_EPROTO_STREAM_ID )
221 #define EINFO_EPROTO_STREAM_ID \
222 __einfo_uniqify ( EINFO_EPROTO, ( 34 - 32 ), \
223 "Invalid stream ID" )
224 #define EPROTO_SECONDARY \
225 __einfo_error ( EINFO_EPROTO_SECONDARY )
226 #define EINFO_EPROTO_SECONDARY \
227 __einfo_uniqify ( EINFO_EPROTO, ( 35 - 32 ), \
228 "Secondary bandwidth error" )
229 #define EPROTO_SPLIT \
230 __einfo_error ( EINFO_EPROTO_SPLIT )
231 #define EINFO_EPROTO_SPLIT \
232 __einfo_uniqify ( EINFO_EPROTO, ( 36 - 32 ), \
233 "Split transaction error" )
234 #define ECODE(code) \
235 ( ( (code) < 32 ) ? \
236 EUNIQ ( EINFO_EIO, ( (code) & 31 ), EIO_DATA, EIO_BABBLE, \
237 EIO_USB, EIO_TRB, EIO_STALL, EIO_RESOURCE, \
238 EIO_BANDWIDTH, EIO_NO_SLOTS, EIO_STREAM_TYPE, \
239 EIO_SLOT, EIO_ENDPOINT, EIO_SHORT, EIO_UNDERRUN, \
240 EIO_OVERRUN, EIO_VF_RING_FULL, EIO_PARAMETER, \
241 EIO_BANDWIDTH_OVERRUN, EIO_CONTEXT, EIO_NO_PING, \
242 EIO_RING_FULL, EIO_INCOMPATIBLE, EIO_MISSED, \
243 EIO_CMD_STOPPED, EIO_CMD_ABORTED, EIO_STOP, \
244 EIO_STOP_LEN, EIO_STOP_SHORT, EIO_LATENCY, \
245 EIO_ISOCH ) : \
246 ( (code) < 64 ) ? \
247 EUNIQ ( EINFO_EPROTO, ( (code) & 31 ), EPROTO_LOST, \
248 EPROTO_UNDEFINED, EPROTO_STREAM_ID, \
249 EPROTO_SECONDARY, EPROTO_SPLIT ) : \
250 EFAULT )
251
252 /******************************************************************************
253 *
254 * Register access
255 *
256 ******************************************************************************
257 */
258
259 /**
260 * Initialise device
261 *
262 * @v xhci xHCI device
263 * @v regs MMIO registers
264 */
265 static void xhci_init ( struct xhci_device *xhci, void *regs ) {
266 uint32_t hcsparams1;
267 uint32_t hcsparams2;
268 uint32_t hccparams1;
269 uint32_t pagesize;
270 size_t caplength;
271 size_t rtsoff;
272 size_t dboff;
273
274 /* Locate capability, operational, runtime, and doorbell registers */
275 xhci->cap = regs;
276 caplength = readb ( xhci->cap + XHCI_CAP_CAPLENGTH );
277 rtsoff = readl ( xhci->cap + XHCI_CAP_RTSOFF );
278 dboff = readl ( xhci->cap + XHCI_CAP_DBOFF );
279 xhci->op = ( xhci->cap + caplength );
280 xhci->run = ( xhci->cap + rtsoff );
281 xhci->db = ( xhci->cap + dboff );
282 DBGC2 ( xhci, "XHCI %p cap %08lx op %08lx run %08lx db %08lx\n",
283 xhci, virt_to_phys ( xhci->cap ), virt_to_phys ( xhci->op ),
284 virt_to_phys ( xhci->run ), virt_to_phys ( xhci->db ) );
285
286 /* Read structural parameters 1 */
287 hcsparams1 = readl ( xhci->cap + XHCI_CAP_HCSPARAMS1 );
288 xhci->slots = XHCI_HCSPARAMS1_SLOTS ( hcsparams1 );
289 xhci->intrs = XHCI_HCSPARAMS1_INTRS ( hcsparams1 );
290 xhci->ports = XHCI_HCSPARAMS1_PORTS ( hcsparams1 );
291 DBGC ( xhci, "XHCI %p has %d slots %d intrs %d ports\n",
292 xhci, xhci->slots, xhci->intrs, xhci->ports );
293
294 /* Read structural parameters 2 */
295 hcsparams2 = readl ( xhci->cap + XHCI_CAP_HCSPARAMS2 );
296 xhci->scratchpads = XHCI_HCSPARAMS2_SCRATCHPADS ( hcsparams2 );
297 DBGC2 ( xhci, "XHCI %p needs %d scratchpads\n",
298 xhci, xhci->scratchpads );
299
300 /* Read capability parameters 1 */
301 hccparams1 = readl ( xhci->cap + XHCI_CAP_HCCPARAMS1 );
302 xhci->addr64 = XHCI_HCCPARAMS1_ADDR64 ( hccparams1 );
303 xhci->csz_shift = XHCI_HCCPARAMS1_CSZ_SHIFT ( hccparams1 );
304 xhci->xecp = XHCI_HCCPARAMS1_XECP ( hccparams1 );
305
306 /* Read page size */
307 pagesize = readl ( xhci->op + XHCI_OP_PAGESIZE );
308 xhci->pagesize = XHCI_PAGESIZE ( pagesize );
309 assert ( xhci->pagesize != 0 );
310 assert ( ( ( xhci->pagesize ) & ( xhci->pagesize - 1 ) ) == 0 );
311 DBGC2 ( xhci, "XHCI %p page size %zd bytes\n",
312 xhci, xhci->pagesize );
313 }
314
315 /**
316 * Find extended capability
317 *
318 * @v xhci xHCI device
319 * @v id Capability ID
320 * @v offset Offset to previous extended capability instance, or zero
321 * @ret offset Offset to extended capability, or zero if not found
322 */
323 static unsigned int xhci_extended_capability ( struct xhci_device *xhci,
324 unsigned int id,
325 unsigned int offset ) {
326 uint32_t xecp;
327 unsigned int next;
328
329 /* Locate the extended capability */
330 while ( 1 ) {
331
332 /* Locate first or next capability as applicable */
333 if ( offset ) {
334 xecp = readl ( xhci->cap + offset );
335 next = XHCI_XECP_NEXT ( xecp );
336 } else {
337 next = xhci->xecp;
338 }
339 if ( ! next )
340 return 0;
341 offset += next;
342
343 /* Check if this is the requested capability */
344 xecp = readl ( xhci->cap + offset );
345 if ( XHCI_XECP_ID ( xecp ) == id )
346 return offset;
347 }
348 }
349
350 /**
351 * Write potentially 64-bit register
352 *
353 * @v xhci xHCI device
354 * @v value Value
355 * @v reg Register address
356 * @ret rc Return status code
357 */
358 static inline __attribute__ (( always_inline )) int
359 xhci_writeq ( struct xhci_device *xhci, physaddr_t value, void *reg ) {
360
361 /* If this is a 32-bit build, then this can never fail
362 * (allowing the compiler to optimise out the error path).
363 */
364 if ( sizeof ( value ) <= sizeof ( uint32_t ) ) {
365 writel ( value, reg );
366 writel ( 0, ( reg + sizeof ( uint32_t ) ) );
367 return 0;
368 }
369
370 /* If the device does not support 64-bit addresses and this
371 * address is outside the 32-bit address space, then fail.
372 */
373 if ( ( value & ~0xffffffffULL ) && ! xhci->addr64 ) {
374 DBGC ( xhci, "XHCI %p cannot access address %lx\n",
375 xhci, value );
376 return -ENOTSUP;
377 }
378
379 /* If this is a 64-bit build, then writeq() is available */
380 writeq ( value, reg );
381 return 0;
382 }
383
384 /**
385 * Calculate buffer alignment
386 *
387 * @v len Length
388 * @ret align Buffer alignment
389 *
390 * Determine alignment required for a buffer which must be aligned to
391 * at least XHCI_MIN_ALIGN and which must not cross a page boundary.
392 */
393 static inline size_t xhci_align ( size_t len ) {
394 size_t align;
395
396 /* Align to own length (rounded up to a power of two) */
397 align = ( 1 << fls ( len - 1 ) );
398
399 /* Round up to XHCI_MIN_ALIGN if needed */
400 if ( align < XHCI_MIN_ALIGN )
401 align = XHCI_MIN_ALIGN;
402
403 return align;
404 }
405
406 /**
407 * Calculate device context offset
408 *
409 * @v xhci xHCI device
410 * @v ctx Context index
411 */
412 static inline size_t xhci_device_context_offset ( struct xhci_device *xhci,
413 unsigned int ctx ) {
414
415 return ( XHCI_DCI ( ctx ) << xhci->csz_shift );
416 }
417
418 /**
419 * Calculate input context offset
420 *
421 * @v xhci xHCI device
422 * @v ctx Context index
423 */
424 static inline size_t xhci_input_context_offset ( struct xhci_device *xhci,
425 unsigned int ctx ) {
426
427 return ( XHCI_ICI ( ctx ) << xhci->csz_shift );
428 }
429
430 /******************************************************************************
431 *
432 * Diagnostics
433 *
434 ******************************************************************************
435 */
436
437 /**
438 * Dump host controller registers
439 *
440 * @v xhci xHCI device
441 */
442 static inline void xhci_dump ( struct xhci_device *xhci ) {
443 uint32_t usbcmd;
444 uint32_t usbsts;
445 uint32_t pagesize;
446 uint32_t dnctrl;
447 uint32_t config;
448
449 /* Do nothing unless debugging is enabled */
450 if ( ! DBG_LOG )
451 return;
452
453 /* Dump USBCMD */
454 usbcmd = readl ( xhci->op + XHCI_OP_USBCMD );
455 DBGC ( xhci, "XHCI %p USBCMD %08x%s%s\n", xhci, usbcmd,
456 ( ( usbcmd & XHCI_USBCMD_RUN ) ? " run" : "" ),
457 ( ( usbcmd & XHCI_USBCMD_HCRST ) ? " hcrst" : "" ) );
458
459 /* Dump USBSTS */
460 usbsts = readl ( xhci->op + XHCI_OP_USBSTS );
461 DBGC ( xhci, "XHCI %p USBSTS %08x%s\n", xhci, usbsts,
462 ( ( usbsts & XHCI_USBSTS_HCH ) ? " hch" : "" ) );
463
464 /* Dump PAGESIZE */
465 pagesize = readl ( xhci->op + XHCI_OP_PAGESIZE );
466 DBGC ( xhci, "XHCI %p PAGESIZE %08x\n", xhci, pagesize );
467
468 /* Dump DNCTRL */
469 dnctrl = readl ( xhci->op + XHCI_OP_DNCTRL );
470 DBGC ( xhci, "XHCI %p DNCTRL %08x\n", xhci, dnctrl );
471
472 /* Dump CONFIG */
473 config = readl ( xhci->op + XHCI_OP_CONFIG );
474 DBGC ( xhci, "XHCI %p CONFIG %08x\n", xhci, config );
475 }
476
477 /**
478 * Dump port registers
479 *
480 * @v xhci xHCI device
481 * @v port Port number
482 */
483 static inline void xhci_dump_port ( struct xhci_device *xhci,
484 unsigned int port ) {
485 uint32_t portsc;
486 uint32_t portpmsc;
487 uint32_t portli;
488 uint32_t porthlpmc;
489
490 /* Do nothing unless debugging is enabled */
491 if ( ! DBG_LOG )
492 return;
493
494 /* Dump PORTSC */
495 portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port ) );
496 DBGC ( xhci, "XHCI %p port %d PORTSC %08x%s%s%s%s psiv=%d\n",
497 xhci, port, portsc,
498 ( ( portsc & XHCI_PORTSC_CCS ) ? " ccs" : "" ),
499 ( ( portsc & XHCI_PORTSC_PED ) ? " ped" : "" ),
500 ( ( portsc & XHCI_PORTSC_PR ) ? " pr" : "" ),
501 ( ( portsc & XHCI_PORTSC_PP ) ? " pp" : "" ),
502 XHCI_PORTSC_PSIV ( portsc ) );
503
504 /* Dump PORTPMSC */
505 portpmsc = readl ( xhci->op + XHCI_OP_PORTPMSC ( port ) );
506 DBGC ( xhci, "XHCI %p port %d PORTPMSC %08x\n", xhci, port, portpmsc );
507
508 /* Dump PORTLI */
509 portli = readl ( xhci->op + XHCI_OP_PORTLI ( port ) );
510 DBGC ( xhci, "XHCI %p port %d PORTLI %08x\n", xhci, port, portli );
511
512 /* Dump PORTHLPMC */
513 porthlpmc = readl ( xhci->op + XHCI_OP_PORTHLPMC ( port ) );
514 DBGC ( xhci, "XHCI %p port %d PORTHLPMC %08x\n",
515 xhci, port, porthlpmc );
516 }
517
518 /******************************************************************************
519 *
520 * USB legacy support
521 *
522 ******************************************************************************
523 */
524
525 /** Prevent the release of ownership back to BIOS */
526 static int xhci_legacy_prevent_release;
527
528 /**
529 * Initialise USB legacy support
530 *
531 * @v xhci xHCI device
532 */
533 static void xhci_legacy_init ( struct xhci_device *xhci ) {
534 unsigned int legacy;
535 uint8_t bios;
536
537 /* Locate USB legacy support capability (if present) */
538 legacy = xhci_extended_capability ( xhci, XHCI_XECP_ID_LEGACY, 0 );
539 if ( ! legacy ) {
540 /* Not an error; capability may not be present */
541 DBGC ( xhci, "XHCI %p has no USB legacy support capability\n",
542 xhci );
543 return;
544 }
545
546 /* Check if legacy USB support is enabled */
547 bios = readb ( xhci->cap + legacy + XHCI_USBLEGSUP_BIOS );
548 if ( ! ( bios & XHCI_USBLEGSUP_BIOS_OWNED ) ) {
549 /* Not an error; already owned by OS */
550 DBGC ( xhci, "XHCI %p USB legacy support already disabled\n",
551 xhci );
552 return;
553 }
554
555 /* Record presence of USB legacy support capability */
556 xhci->legacy = legacy;
557 }
558
559 /**
560 * Claim ownership from BIOS
561 *
562 * @v xhci xHCI device
563 */
564 static void xhci_legacy_claim ( struct xhci_device *xhci ) {
565 uint32_t ctlsts;
566 uint8_t bios;
567 unsigned int i;
568
569 /* Do nothing unless legacy support capability is present */
570 if ( ! xhci->legacy )
571 return;
572
573 /* Claim ownership */
574 writeb ( XHCI_USBLEGSUP_OS_OWNED,
575 xhci->cap + xhci->legacy + XHCI_USBLEGSUP_OS );
576
577 /* Wait for BIOS to release ownership */
578 for ( i = 0 ; i < XHCI_USBLEGSUP_MAX_WAIT_MS ; i++ ) {
579
580 /* Check if BIOS has released ownership */
581 bios = readb ( xhci->cap + xhci->legacy + XHCI_USBLEGSUP_BIOS );
582 if ( ! ( bios & XHCI_USBLEGSUP_BIOS_OWNED ) ) {
583 DBGC ( xhci, "XHCI %p claimed ownership from BIOS\n",
584 xhci );
585 ctlsts = readl ( xhci->cap + xhci->legacy +
586 XHCI_USBLEGSUP_CTLSTS );
587 if ( ctlsts ) {
588 DBGC ( xhci, "XHCI %p warning: BIOS retained "
589 "SMIs: %08x\n", xhci, ctlsts );
590 }
591 return;
592 }
593
594 /* Delay */
595 mdelay ( 1 );
596 }
597
598 /* BIOS did not release ownership. Claim it forcibly by
599 * disabling all SMIs.
600 */
601 DBGC ( xhci, "XHCI %p could not claim ownership from BIOS: forcibly "
602 "disabling SMIs\n", xhci );
603 writel ( 0, xhci->cap + xhci->legacy + XHCI_USBLEGSUP_CTLSTS );
604 }
605
606 /**
607 * Release ownership back to BIOS
608 *
609 * @v xhci xHCI device
610 */
611 static void xhci_legacy_release ( struct xhci_device *xhci ) {
612
613 /* Do nothing unless legacy support capability is present */
614 if ( ! xhci->legacy )
615 return;
616
617 /* Do nothing if releasing ownership is prevented */
618 if ( xhci_legacy_prevent_release ) {
619 DBGC ( xhci, "XHCI %p not releasing ownership to BIOS\n", xhci);
620 return;
621 }
622
623 /* Release ownership */
624 writeb ( 0, xhci->cap + xhci->legacy + XHCI_USBLEGSUP_OS );
625 DBGC ( xhci, "XHCI %p released ownership to BIOS\n", xhci );
626 }
627
628 /******************************************************************************
629 *
630 * Supported protocols
631 *
632 ******************************************************************************
633 */
634
635 /**
636 * Transcribe port speed (for debugging)
637 *
638 * @v psi Protocol speed ID
639 * @ret speed Transcribed speed
640 */
641 static inline const char * xhci_speed_name ( uint32_t psi ) {
642 static const char *exponents[4] = { "", "k", "M", "G" };
643 static char buf[ 10 /* "xxxxxXbps" + NUL */ ];
644 unsigned int mantissa;
645 unsigned int exponent;
646
647 /* Extract mantissa and exponent */
648 mantissa = XHCI_SUPPORTED_PSI_MANTISSA ( psi );
649 exponent = XHCI_SUPPORTED_PSI_EXPONENT ( psi );
650
651 /* Transcribe speed */
652 snprintf ( buf, sizeof ( buf ), "%d%sbps",
653 mantissa, exponents[exponent] );
654 return buf;
655 }
656
657 /**
658 * Find supported protocol extended capability for a port
659 *
660 * @v xhci xHCI device
661 * @v port Port number
662 * @ret supported Offset to extended capability, or zero if not found
663 */
664 static unsigned int xhci_supported_protocol ( struct xhci_device *xhci,
665 unsigned int port ) {
666 unsigned int supported = 0;
667 unsigned int offset;
668 unsigned int count;
669 uint32_t ports;
670
671 /* Iterate over all supported protocol structures */
672 while ( ( supported = xhci_extended_capability ( xhci,
673 XHCI_XECP_ID_SUPPORTED,
674 supported ) ) ) {
675
676 /* Determine port range */
677 ports = readl ( xhci->cap + supported + XHCI_SUPPORTED_PORTS );
678 offset = XHCI_SUPPORTED_PORTS_OFFSET ( ports );
679 count = XHCI_SUPPORTED_PORTS_COUNT ( ports );
680
681 /* Check if port lies within this range */
682 if ( ( port - offset ) < count )
683 return supported;
684 }
685
686 DBGC ( xhci, "XHCI %p port %d has no supported protocol\n",
687 xhci, port );
688 return 0;
689 }
690
691 /**
692 * Find port protocol
693 *
694 * @v xhci xHCI device
695 * @v port Port number
696 * @ret protocol USB protocol, or zero if not found
697 */
698 static unsigned int xhci_port_protocol ( struct xhci_device *xhci,
699 unsigned int port ) {
700 unsigned int supported = xhci_supported_protocol ( xhci, port );
701 union {
702 uint32_t raw;
703 char text[5];
704 } name;
705 unsigned int protocol;
706 unsigned int type;
707 unsigned int psic;
708 unsigned int psiv;
709 unsigned int i;
710 uint32_t revision;
711 uint32_t ports;
712 uint32_t slot;
713 uint32_t psi;
714
715 /* Fail if there is no supported protocol */
716 if ( ! supported )
717 return 0;
718
719 /* Determine protocol version */
720 revision = readl ( xhci->cap + supported + XHCI_SUPPORTED_REVISION );
721 protocol = XHCI_SUPPORTED_REVISION_VER ( revision );
722
723 /* Describe port protocol */
724 if ( DBG_EXTRA ) {
725 name.raw = cpu_to_le32 ( readl ( xhci->cap + supported +
726 XHCI_SUPPORTED_NAME ) );
727 name.text[4] = '\0';
728 slot = readl ( xhci->cap + supported + XHCI_SUPPORTED_SLOT );
729 type = XHCI_SUPPORTED_SLOT_TYPE ( slot );
730 DBGC2 ( xhci, "XHCI %p port %d %sv%04x type %d",
731 xhci, port, name.text, protocol, type );
732 ports = readl ( xhci->cap + supported + XHCI_SUPPORTED_PORTS );
733 psic = XHCI_SUPPORTED_PORTS_PSIC ( ports );
734 if ( psic ) {
735 DBGC2 ( xhci, " speeds" );
736 for ( i = 0 ; i < psic ; i++ ) {
737 psi = readl ( xhci->cap + supported +
738 XHCI_SUPPORTED_PSI ( i ) );
739 psiv = XHCI_SUPPORTED_PSI_VALUE ( psi );
740 DBGC2 ( xhci, " %d:%s", psiv,
741 xhci_speed_name ( psi ) );
742 }
743 }
744 DBGC2 ( xhci, "\n" );
745 }
746
747 return protocol;
748 }
749
750 /**
751 * Find port slot type
752 *
753 * @v xhci xHCI device
754 * @v port Port number
755 * @ret type Slot type, or negative error
756 */
757 static int xhci_port_slot_type ( struct xhci_device *xhci, unsigned int port ) {
758 unsigned int supported = xhci_supported_protocol ( xhci, port );
759 unsigned int type;
760 uint32_t slot;
761
762 /* Fail if there is no supported protocol */
763 if ( ! supported )
764 return -ENOTSUP;
765
766 /* Get slot type */
767 slot = readl ( xhci->cap + supported + XHCI_SUPPORTED_SLOT );
768 type = XHCI_SUPPORTED_SLOT_TYPE ( slot );
769
770 return type;
771 }
772
773 /**
774 * Find port speed
775 *
776 * @v xhci xHCI device
777 * @v port Port number
778 * @v psiv Protocol speed ID value
779 * @ret speed Port speed, or negative error
780 */
781 static int xhci_port_speed ( struct xhci_device *xhci, unsigned int port,
782 unsigned int psiv ) {
783 unsigned int supported = xhci_supported_protocol ( xhci, port );
784 unsigned int psic;
785 unsigned int mantissa;
786 unsigned int exponent;
787 unsigned int speed;
788 unsigned int i;
789 uint32_t ports;
790 uint32_t psi;
791
792 /* Fail if there is no supported protocol */
793 if ( ! supported )
794 return -ENOTSUP;
795
796 /* Get protocol speed ID count */
797 ports = readl ( xhci->cap + supported + XHCI_SUPPORTED_PORTS );
798 psic = XHCI_SUPPORTED_PORTS_PSIC ( ports );
799
800 /* Use the default mappings if applicable */
801 if ( ! psic ) {
802 switch ( psiv ) {
803 case XHCI_SPEED_LOW : return USB_SPEED_LOW;
804 case XHCI_SPEED_FULL : return USB_SPEED_FULL;
805 case XHCI_SPEED_HIGH : return USB_SPEED_HIGH;
806 case XHCI_SPEED_SUPER : return USB_SPEED_SUPER;
807 default:
808 DBGC ( xhci, "XHCI %p port %d non-standard PSI value "
809 "%d\n", xhci, port, psiv );
810 return -ENOTSUP;
811 }
812 }
813
814 /* Iterate over PSI dwords looking for a match */
815 for ( i = 0 ; i < psic ; i++ ) {
816 psi = readl ( xhci->cap + supported + XHCI_SUPPORTED_PSI ( i ));
817 if ( psiv == XHCI_SUPPORTED_PSI_VALUE ( psi ) ) {
818 mantissa = XHCI_SUPPORTED_PSI_MANTISSA ( psi );
819 exponent = XHCI_SUPPORTED_PSI_EXPONENT ( psi );
820 speed = USB_SPEED ( mantissa, exponent );
821 return speed;
822 }
823 }
824
825 DBGC ( xhci, "XHCI %p port %d spurious PSI value %d\n",
826 xhci, port, psiv );
827 return -ENOENT;
828 }
829
830 /**
831 * Find protocol speed ID value
832 *
833 * @v xhci xHCI device
834 * @v port Port number
835 * @v speed USB speed
836 * @ret psiv Protocol speed ID value, or negative error
837 */
838 static int xhci_port_psiv ( struct xhci_device *xhci, unsigned int port,
839 unsigned int speed ) {
840 unsigned int supported = xhci_supported_protocol ( xhci, port );
841 unsigned int psic;
842 unsigned int mantissa;
843 unsigned int exponent;
844 unsigned int psiv;
845 unsigned int i;
846 uint32_t ports;
847 uint32_t psi;
848
849 /* Fail if there is no supported protocol */
850 if ( ! supported )
851 return -ENOTSUP;
852
853 /* Get protocol speed ID count */
854 ports = readl ( xhci->cap + supported + XHCI_SUPPORTED_PORTS );
855 psic = XHCI_SUPPORTED_PORTS_PSIC ( ports );
856
857 /* Use the default mappings if applicable */
858 if ( ! psic ) {
859 switch ( speed ) {
860 case USB_SPEED_LOW : return XHCI_SPEED_LOW;
861 case USB_SPEED_FULL : return XHCI_SPEED_FULL;
862 case USB_SPEED_HIGH : return XHCI_SPEED_HIGH;
863 case USB_SPEED_SUPER : return XHCI_SPEED_SUPER;
864 default:
865 DBGC ( xhci, "XHCI %p port %d non-standad speed %d\n",
866 xhci, port, speed );
867 return -ENOTSUP;
868 }
869 }
870
871 /* Iterate over PSI dwords looking for a match */
872 for ( i = 0 ; i < psic ; i++ ) {
873 psi = readl ( xhci->cap + supported + XHCI_SUPPORTED_PSI ( i ));
874 mantissa = XHCI_SUPPORTED_PSI_MANTISSA ( psi );
875 exponent = XHCI_SUPPORTED_PSI_EXPONENT ( psi );
876 if ( speed == USB_SPEED ( mantissa, exponent ) ) {
877 psiv = XHCI_SUPPORTED_PSI_VALUE ( psi );
878 return psiv;
879 }
880 }
881
882 DBGC ( xhci, "XHCI %p port %d unrepresentable speed %#x\n",
883 xhci, port, speed );
884 return -ENOENT;
885 }
886
887 /******************************************************************************
888 *
889 * Device context base address array
890 *
891 ******************************************************************************
892 */
893
894 /**
895 * Allocate device context base address array
896 *
897 * @v xhci xHCI device
898 * @ret rc Return status code
899 */
900 static int xhci_dcbaa_alloc ( struct xhci_device *xhci ) {
901 size_t len;
902 physaddr_t dcbaap;
903 int rc;
904
905 /* Allocate and initialise structure. Must be at least
906 * 64-byte aligned and must not cross a page boundary, so
907 * align on its own size (rounded up to a power of two and
908 * with a minimum of 64 bytes).
909 */
910 len = ( ( xhci->slots + 1 ) * sizeof ( xhci->dcbaa[0] ) );
911 xhci->dcbaa = malloc_dma ( len, xhci_align ( len ) );
912 if ( ! xhci->dcbaa ) {
913 DBGC ( xhci, "XHCI %p could not allocate DCBAA\n", xhci );
914 rc = -ENOMEM;
915 goto err_alloc;
916 }
917 memset ( xhci->dcbaa, 0, len );
918
919 /* Program DCBAA pointer */
920 dcbaap = virt_to_phys ( xhci->dcbaa );
921 if ( ( rc = xhci_writeq ( xhci, dcbaap,
922 xhci->op + XHCI_OP_DCBAAP ) ) != 0 )
923 goto err_writeq;
924
925 DBGC2 ( xhci, "XHCI %p DCBAA at [%08lx,%08lx)\n",
926 xhci, dcbaap, ( dcbaap + len ) );
927 return 0;
928
929 err_writeq:
930 free_dma ( xhci->dcbaa, len );
931 err_alloc:
932 return rc;
933 }
934
935 /**
936 * Free device context base address array
937 *
938 * @v xhci xHCI device
939 */
940 static void xhci_dcbaa_free ( struct xhci_device *xhci ) {
941 size_t len;
942 unsigned int i;
943
944 /* Sanity check */
945 for ( i = 0 ; i <= xhci->slots ; i++ )
946 assert ( xhci->dcbaa[i] == 0 );
947
948 /* Clear DCBAA pointer */
949 xhci_writeq ( xhci, 0, xhci->op + XHCI_OP_DCBAAP );
950
951 /* Free DCBAA */
952 len = ( ( xhci->slots + 1 ) * sizeof ( xhci->dcbaa[0] ) );
953 free_dma ( xhci->dcbaa, len );
954 }
955
956 /******************************************************************************
957 *
958 * Scratchpad buffers
959 *
960 ******************************************************************************
961 */
962
963 /**
964 * Allocate scratchpad buffers
965 *
966 * @v xhci xHCI device
967 * @ret rc Return status code
968 */
969 static int xhci_scratchpad_alloc ( struct xhci_device *xhci ) {
970 size_t array_len;
971 size_t len;
972 physaddr_t phys;
973 unsigned int i;
974 int rc;
975
976 /* Do nothing if no scratchpad buffers are used */
977 if ( ! xhci->scratchpads )
978 return 0;
979
980 /* Allocate scratchpads */
981 len = ( xhci->scratchpads * xhci->pagesize );
982 xhci->scratchpad = umalloc ( len );
983 if ( ! xhci->scratchpad ) {
984 DBGC ( xhci, "XHCI %p could not allocate scratchpad buffers\n",
985 xhci );
986 rc = -ENOMEM;
987 goto err_alloc;
988 }
989 memset_user ( xhci->scratchpad, 0, 0, len );
990
991 /* Allocate scratchpad array */
992 array_len = ( xhci->scratchpads * sizeof ( xhci->scratchpad_array[0] ));
993 xhci->scratchpad_array =
994 malloc_dma ( array_len, xhci_align ( array_len ) );
995 if ( ! xhci->scratchpad_array ) {
996 DBGC ( xhci, "XHCI %p could not allocate scratchpad buffer "
997 "array\n", xhci );
998 rc = -ENOMEM;
999 goto err_alloc_array;
1000 }
1001
1002 /* Populate scratchpad array */
1003 for ( i = 0 ; i < xhci->scratchpads ; i++ ) {
1004 phys = user_to_phys ( xhci->scratchpad, ( i * xhci->pagesize ));
1005 xhci->scratchpad_array[i] = phys;
1006 }
1007
1008 /* Set scratchpad array pointer */
1009 assert ( xhci->dcbaa != NULL );
1010 xhci->dcbaa[0] = cpu_to_le64 ( virt_to_phys ( xhci->scratchpad_array ));
1011
1012 DBGC2 ( xhci, "XHCI %p scratchpad [%08lx,%08lx) array [%08lx,%08lx)\n",
1013 xhci, user_to_phys ( xhci->scratchpad, 0 ),
1014 user_to_phys ( xhci->scratchpad, len ),
1015 virt_to_phys ( xhci->scratchpad_array ),
1016 ( virt_to_phys ( xhci->scratchpad_array ) + array_len ) );
1017 return 0;
1018
1019 free_dma ( xhci->scratchpad_array, array_len );
1020 err_alloc_array:
1021 ufree ( xhci->scratchpad );
1022 err_alloc:
1023 return rc;
1024 }
1025
1026 /**
1027 * Free scratchpad buffers
1028 *
1029 * @v xhci xHCI device
1030 */
1031 static void xhci_scratchpad_free ( struct xhci_device *xhci ) {
1032 size_t array_len;
1033
1034 /* Do nothing if no scratchpad buffers are used */
1035 if ( ! xhci->scratchpads )
1036 return;
1037
1038 /* Clear scratchpad array pointer */
1039 assert ( xhci->dcbaa != NULL );
1040 xhci->dcbaa[0] = 0;
1041
1042 /* Free scratchpad array */
1043 array_len = ( xhci->scratchpads * sizeof ( xhci->scratchpad_array[0] ));
1044 free_dma ( xhci->scratchpad_array, array_len );
1045
1046 /* Free scratchpads */
1047 ufree ( xhci->scratchpad );
1048 }
1049
1050 /******************************************************************************
1051 *
1052 * Run / stop / reset
1053 *
1054 ******************************************************************************
1055 */
1056
1057 /**
1058 * Start xHCI device
1059 *
1060 * @v xhci xHCI device
1061 */
1062 static void xhci_run ( struct xhci_device *xhci ) {
1063 uint32_t config;
1064 uint32_t usbcmd;
1065
1066 /* Configure number of device slots */
1067 config = readl ( xhci->op + XHCI_OP_CONFIG );
1068 config &= ~XHCI_CONFIG_MAX_SLOTS_EN_MASK;
1069 config |= XHCI_CONFIG_MAX_SLOTS_EN ( xhci->slots );
1070 writel ( config, xhci->op + XHCI_OP_CONFIG );
1071
1072 /* Set run/stop bit */
1073 usbcmd = readl ( xhci->op + XHCI_OP_USBCMD );
1074 usbcmd |= XHCI_USBCMD_RUN;
1075 writel ( usbcmd, xhci->op + XHCI_OP_USBCMD );
1076 }
1077
1078 /**
1079 * Stop xHCI device
1080 *
1081 * @v xhci xHCI device
1082 * @ret rc Return status code
1083 */
1084 static int xhci_stop ( struct xhci_device *xhci ) {
1085 uint32_t usbcmd;
1086 uint32_t usbsts;
1087 unsigned int i;
1088
1089 /* Clear run/stop bit */
1090 usbcmd = readl ( xhci->op + XHCI_OP_USBCMD );
1091 usbcmd &= ~XHCI_USBCMD_RUN;
1092 writel ( usbcmd, xhci->op + XHCI_OP_USBCMD );
1093
1094 /* Wait for device to stop */
1095 for ( i = 0 ; i < XHCI_STOP_MAX_WAIT_MS ; i++ ) {
1096
1097 /* Check if device is stopped */
1098 usbsts = readl ( xhci->op + XHCI_OP_USBSTS );
1099 if ( usbsts & XHCI_USBSTS_HCH )
1100 return 0;
1101
1102 /* Delay */
1103 mdelay ( 1 );
1104 }
1105
1106 DBGC ( xhci, "XHCI %p timed out waiting for stop\n", xhci );
1107 return -ETIMEDOUT;
1108 }
1109
1110 /**
1111 * Reset xHCI device
1112 *
1113 * @v xhci xHCI device
1114 * @ret rc Return status code
1115 */
1116 static int xhci_reset ( struct xhci_device *xhci ) {
1117 uint32_t usbcmd;
1118 unsigned int i;
1119 int rc;
1120
1121 /* The xHCI specification states that resetting a running
1122 * device may result in undefined behaviour, so try stopping
1123 * it first.
1124 */
1125 if ( ( rc = xhci_stop ( xhci ) ) != 0 ) {
1126 /* Ignore errors and attempt to reset the device anyway */
1127 }
1128
1129 /* Reset device */
1130 writel ( XHCI_USBCMD_HCRST, xhci->op + XHCI_OP_USBCMD );
1131
1132 /* Wait for reset to complete */
1133 for ( i = 0 ; i < XHCI_RESET_MAX_WAIT_MS ; i++ ) {
1134
1135 /* Check if reset is complete */
1136 usbcmd = readl ( xhci->op + XHCI_OP_USBCMD );
1137 if ( ! ( usbcmd & XHCI_USBCMD_HCRST ) )
1138 return 0;
1139
1140 /* Delay */
1141 mdelay ( 1 );
1142 }
1143
1144 DBGC ( xhci, "XHCI %p timed out waiting for reset\n", xhci );
1145 return -ETIMEDOUT;
1146 }
1147
1148 /******************************************************************************
1149 *
1150 * Transfer request blocks
1151 *
1152 ******************************************************************************
1153 */
1154
1155 /**
1156 * Allocate transfer request block ring
1157 *
1158 * @v xhci xHCI device
1159 * @v ring TRB ring
1160 * @v shift Ring size (log2)
1161 * @v slot Device slot
1162 * @v target Doorbell target
1163 * @v stream Doorbell stream ID
1164 * @ret rc Return status code
1165 */
1166 static int xhci_ring_alloc ( struct xhci_device *xhci,
1167 struct xhci_trb_ring *ring,
1168 unsigned int shift, unsigned int slot,
1169 unsigned int target, unsigned int stream ) {
1170 struct xhci_trb_link *link;
1171 unsigned int count;
1172 int rc;
1173
1174 /* Sanity check */
1175 assert ( shift > 0 );
1176
1177 /* Initialise structure */
1178 memset ( ring, 0, sizeof ( *ring ) );
1179 ring->shift = shift;
1180 count = ( 1U << shift );
1181 ring->mask = ( count - 1 );
1182 ring->len = ( ( count + 1 /* Link TRB */ ) * sizeof ( ring->trb[0] ) );
1183 ring->db = ( xhci->db + ( slot * sizeof ( ring->dbval ) ) );
1184 ring->dbval = XHCI_DBVAL ( target, stream );
1185
1186 /* Allocate I/O buffers */
1187 ring->iobuf = zalloc ( count * sizeof ( ring->iobuf[0] ) );
1188 if ( ! ring->iobuf ) {
1189 rc = -ENOMEM;
1190 goto err_alloc_iobuf;
1191 }
1192
1193 /* Allocate TRBs */
1194 ring->trb = malloc_dma ( ring->len, xhci_align ( ring->len ) );
1195 if ( ! ring->trb ) {
1196 rc = -ENOMEM;
1197 goto err_alloc_trb;
1198 }
1199 memset ( ring->trb, 0, ring->len );
1200
1201 /* Initialise Link TRB */
1202 link = &ring->trb[count].link;
1203 link->next = cpu_to_le64 ( virt_to_phys ( ring->trb ) );
1204 link->flags = XHCI_TRB_TC;
1205 link->type = XHCI_TRB_LINK;
1206 ring->link = link;
1207
1208 return 0;
1209
1210 free_dma ( ring->trb, ring->len );
1211 err_alloc_trb:
1212 free ( ring->iobuf );
1213 err_alloc_iobuf:
1214 return rc;
1215 }
1216
1217 /**
1218 * Reset transfer request block ring
1219 *
1220 * @v ring TRB ring
1221 */
1222 static void xhci_ring_reset ( struct xhci_trb_ring *ring ) {
1223 unsigned int count = ( 1U << ring->shift );
1224
1225 /* Reset producer and consumer counters */
1226 ring->prod = 0;
1227 ring->cons = 0;
1228
1229 /* Reset TRBs (except Link TRB) */
1230 memset ( ring->trb, 0, ( count * sizeof ( ring->trb[0] ) ) );
1231 }
1232
1233 /**
1234 * Free transfer request block ring
1235 *
1236 * @v ring TRB ring
1237 */
1238 static void xhci_ring_free ( struct xhci_trb_ring *ring ) {
1239 unsigned int count = ( 1U << ring->shift );
1240 unsigned int i;
1241
1242 /* Sanity checks */
1243 assert ( ring->cons == ring->prod );
1244 for ( i = 0 ; i < count ; i++ )
1245 assert ( ring->iobuf[i] == NULL );
1246
1247 /* Free TRBs */
1248 free_dma ( ring->trb, ring->len );
1249
1250 /* Free I/O buffers */
1251 free ( ring->iobuf );
1252 }
1253
1254 /**
1255 * Enqueue a transfer request block
1256 *
1257 * @v ring TRB ring
1258 * @v iobuf I/O buffer (if any)
1259 * @v trb Transfer request block (with empty Cycle flag)
1260 * @ret rc Return status code
1261 *
1262 * This operation does not implicitly ring the doorbell register.
1263 */
1264 static int xhci_enqueue ( struct xhci_trb_ring *ring, struct io_buffer *iobuf,
1265 const union xhci_trb *trb ) {
1266 union xhci_trb *dest;
1267 unsigned int prod;
1268 unsigned int mask;
1269 unsigned int index;
1270 unsigned int cycle;
1271
1272 /* Sanity check */
1273 assert ( ! ( trb->common.flags & XHCI_TRB_C ) );
1274
1275 /* Fail if ring is full */
1276 if ( ! xhci_ring_remaining ( ring ) )
1277 return -ENOBUFS;
1278
1279 /* Update producer counter (and link TRB, if applicable) */
1280 prod = ring->prod++;
1281 mask = ring->mask;
1282 cycle = ( ( ~( prod >> ring->shift ) ) & XHCI_TRB_C );
1283 index = ( prod & mask );
1284 if ( index == 0 )
1285 ring->link->flags = ( XHCI_TRB_TC | ( cycle ^ XHCI_TRB_C ) );
1286
1287 /* Record I/O buffer */
1288 ring->iobuf[index] = iobuf;
1289
1290 /* Enqueue TRB */
1291 dest = &ring->trb[index];
1292 dest->template.parameter = trb->template.parameter;
1293 dest->template.status = trb->template.status;
1294 wmb();
1295 dest->template.control = ( trb->template.control |
1296 cpu_to_le32 ( cycle ) );
1297
1298 return 0;
1299 }
1300
1301 /**
1302 * Dequeue a transfer request block
1303 *
1304 * @v ring TRB ring
1305 * @ret iobuf I/O buffer
1306 */
1307 static struct io_buffer * xhci_dequeue ( struct xhci_trb_ring *ring ) {
1308 struct io_buffer *iobuf;
1309 unsigned int cons;
1310 unsigned int mask;
1311 unsigned int index;
1312
1313 /* Sanity check */
1314 assert ( xhci_ring_fill ( ring ) != 0 );
1315
1316 /* Update consumer counter */
1317 cons = ring->cons++;
1318 mask = ring->mask;
1319 index = ( cons & mask );
1320
1321 /* Retrieve I/O buffer */
1322 iobuf = ring->iobuf[index];
1323 ring->iobuf[index] = NULL;
1324
1325 return iobuf;
1326 }
1327
1328 /**
1329 * Enqueue multiple transfer request blocks
1330 *
1331 * @v ring TRB ring
1332 * @v iobuf I/O buffer
1333 * @v trbs Transfer request blocks (with empty Cycle flag)
1334 * @v count Number of transfer request blocks
1335 * @ret rc Return status code
1336 *
1337 * This operation does not implicitly ring the doorbell register.
1338 */
1339 static int xhci_enqueue_multi ( struct xhci_trb_ring *ring,
1340 struct io_buffer *iobuf,
1341 const union xhci_trb *trbs,
1342 unsigned int count ) {
1343 const union xhci_trb *trb = trbs;
1344 int rc;
1345
1346 /* Sanity check */
1347 assert ( iobuf != NULL );
1348
1349 /* Fail if ring does not have sufficient space */
1350 if ( xhci_ring_remaining ( ring ) < count )
1351 return -ENOBUFS;
1352
1353 /* Enqueue each TRB, recording the I/O buffer with the final TRB */
1354 while ( count-- ) {
1355 rc = xhci_enqueue ( ring, ( count ? NULL : iobuf ), trb++ );
1356 assert ( rc == 0 ); /* Should never be able to fail */
1357 }
1358
1359 return 0;
1360 }
1361
1362 /**
1363 * Dequeue multiple transfer request blocks
1364 *
1365 * @v ring TRB ring
1366 * @ret iobuf I/O buffer
1367 */
1368 static struct io_buffer * xhci_dequeue_multi ( struct xhci_trb_ring *ring ) {
1369 struct io_buffer *iobuf;
1370
1371 /* Dequeue TRBs until we reach the final TRB for an I/O buffer */
1372 do {
1373 iobuf = xhci_dequeue ( ring );
1374 } while ( iobuf == NULL );
1375
1376 return iobuf;
1377 }
1378
1379 /**
1380 * Ring doorbell register
1381 *
1382 * @v ring TRB ring
1383 */
1384 static inline __attribute__ (( always_inline )) void
1385 xhci_doorbell ( struct xhci_trb_ring *ring ) {
1386
1387 wmb();
1388 writel ( ring->dbval, ring->db );
1389 }
1390
1391 /******************************************************************************
1392 *
1393 * Command and event rings
1394 *
1395 ******************************************************************************
1396 */
1397
1398 /**
1399 * Allocate command ring
1400 *
1401 * @v xhci xHCI device
1402 * @ret rc Return status code
1403 */
1404 static int xhci_command_alloc ( struct xhci_device *xhci ) {
1405 physaddr_t crp;
1406 int rc;
1407
1408 /* Allocate TRB ring */
1409 if ( ( rc = xhci_ring_alloc ( xhci, &xhci->command, XHCI_CMD_TRBS_LOG2,
1410 0, 0, 0 ) ) != 0 )
1411 goto err_ring_alloc;
1412
1413 /* Program command ring control register */
1414 crp = virt_to_phys ( xhci->command.trb );
1415 if ( ( rc = xhci_writeq ( xhci, ( crp | XHCI_CRCR_RCS ),
1416 xhci->op + XHCI_OP_CRCR ) ) != 0 )
1417 goto err_writeq;
1418
1419 DBGC2 ( xhci, "XHCI %p CRCR at [%08lx,%08lx)\n",
1420 xhci, crp, ( crp + xhci->command.len ) );
1421 return 0;
1422
1423 err_writeq:
1424 xhci_ring_free ( &xhci->command );
1425 err_ring_alloc:
1426 return rc;
1427 }
1428
1429 /**
1430 * Free command ring
1431 *
1432 * @v xhci xHCI device
1433 */
1434 static void xhci_command_free ( struct xhci_device *xhci ) {
1435
1436 /* Sanity check */
1437 assert ( ( readl ( xhci->op + XHCI_OP_CRCR ) & XHCI_CRCR_CRR ) == 0 );
1438
1439 /* Clear command ring control register */
1440 xhci_writeq ( xhci, 0, xhci->op + XHCI_OP_CRCR );
1441
1442 /* Free TRB ring */
1443 xhci_ring_free ( &xhci->command );
1444 }
1445
1446 /**
1447 * Allocate event ring
1448 *
1449 * @v xhci xHCI device
1450 * @ret rc Return status code
1451 */
1452 static int xhci_event_alloc ( struct xhci_device *xhci ) {
1453 struct xhci_event_ring *event = &xhci->event;
1454 unsigned int count;
1455 size_t len;
1456 int rc;
1457
1458 /* Allocate event ring */
1459 count = ( 1 << XHCI_EVENT_TRBS_LOG2 );
1460 len = ( count * sizeof ( event->trb[0] ) );
1461 event->trb = malloc_dma ( len, xhci_align ( len ) );
1462 if ( ! event->trb ) {
1463 rc = -ENOMEM;
1464 goto err_alloc_trb;
1465 }
1466 memset ( event->trb, 0, len );
1467
1468 /* Allocate event ring segment table */
1469 event->segment = malloc_dma ( sizeof ( event->segment[0] ),
1470 xhci_align ( sizeof (event->segment[0])));
1471 if ( ! event->segment ) {
1472 rc = -ENOMEM;
1473 goto err_alloc_segment;
1474 }
1475 memset ( event->segment, 0, sizeof ( event->segment[0] ) );
1476 event->segment[0].base = cpu_to_le64 ( virt_to_phys ( event->trb ) );
1477 event->segment[0].count = cpu_to_le32 ( count );
1478
1479 /* Program event ring registers */
1480 writel ( 1, xhci->run + XHCI_RUN_ERSTSZ ( 0 ) );
1481 if ( ( rc = xhci_writeq ( xhci, virt_to_phys ( event->trb ),
1482 xhci->run + XHCI_RUN_ERDP ( 0 ) ) ) != 0 )
1483 goto err_writeq_erdp;
1484 if ( ( rc = xhci_writeq ( xhci, virt_to_phys ( event->segment ),
1485 xhci->run + XHCI_RUN_ERSTBA ( 0 ) ) ) != 0 )
1486 goto err_writeq_erstba;
1487
1488 DBGC2 ( xhci, "XHCI %p event ring [%08lx,%08lx) table [%08lx,%08lx)\n",
1489 xhci, virt_to_phys ( event->trb ),
1490 ( virt_to_phys ( event->trb ) + len ),
1491 virt_to_phys ( event->segment ),
1492 ( virt_to_phys ( event->segment ) +
1493 sizeof (event->segment[0] ) ) );
1494 return 0;
1495
1496 xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERSTBA ( 0 ) );
1497 err_writeq_erstba:
1498 xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERDP ( 0 ) );
1499 err_writeq_erdp:
1500 free_dma ( event->trb, len );
1501 err_alloc_segment:
1502 free_dma ( event->segment, sizeof ( event->segment[0] ) );
1503 err_alloc_trb:
1504 return rc;
1505 }
1506
1507 /**
1508 * Free event ring
1509 *
1510 * @v xhci xHCI device
1511 */
1512 static void xhci_event_free ( struct xhci_device *xhci ) {
1513 struct xhci_event_ring *event = &xhci->event;
1514 unsigned int count;
1515 size_t len;
1516
1517 /* Clear event ring registers */
1518 writel ( 0, xhci->run + XHCI_RUN_ERSTSZ ( 0 ) );
1519 xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERSTBA ( 0 ) );
1520 xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERDP ( 0 ) );
1521
1522 /* Free event ring segment table */
1523 free_dma ( event->segment, sizeof ( event->segment[0] ) );
1524
1525 /* Free event ring */
1526 count = ( 1 << XHCI_EVENT_TRBS_LOG2 );
1527 len = ( count * sizeof ( event->trb[0] ) );
1528 free_dma ( event->trb, len );
1529 }
1530
1531 /**
1532 * Handle transfer event
1533 *
1534 * @v xhci xHCI device
1535 * @v transfer Transfer event TRB
1536 */
1537 static void xhci_transfer ( struct xhci_device *xhci,
1538 struct xhci_trb_transfer *transfer ) {
1539 struct xhci_slot *slot;
1540 struct xhci_endpoint *endpoint;
1541 struct io_buffer *iobuf;
1542 int rc;
1543
1544 /* Profile transfer events */
1545 profile_start ( &xhci_transfer_profiler );
1546
1547 /* Identify slot */
1548 if ( ( transfer->slot > xhci->slots ) ||
1549 ( ( slot = xhci->slot[transfer->slot] ) == NULL ) ) {
1550 DBGC ( xhci, "XHCI %p transfer event invalid slot %d:\n",
1551 xhci, transfer->slot );
1552 DBGC_HDA ( xhci, 0, transfer, sizeof ( *transfer ) );
1553 return;
1554 }
1555
1556 /* Identify endpoint */
1557 if ( ( transfer->endpoint > XHCI_CTX_END ) ||
1558 ( ( endpoint = slot->endpoint[transfer->endpoint] ) == NULL ) ) {
1559 DBGC ( xhci, "XHCI %p slot %d transfer event invalid epid "
1560 "%d:\n", xhci, slot->id, transfer->endpoint );
1561 DBGC_HDA ( xhci, 0, transfer, sizeof ( *transfer ) );
1562 return;
1563 }
1564
1565 /* Dequeue TRB(s) */
1566 iobuf = xhci_dequeue_multi ( &endpoint->ring );
1567 assert ( iobuf != NULL );
1568
1569 /* Check for errors */
1570 if ( ! ( ( transfer->code == XHCI_CMPLT_SUCCESS ) ||
1571 ( transfer->code == XHCI_CMPLT_SHORT ) ) ) {
1572
1573 /* Construct error */
1574 rc = -ECODE ( transfer->code );
1575 DBGC ( xhci, "XHCI %p slot %d ctx %d failed (code %d): %s\n",
1576 xhci, slot->id, endpoint->ctx, transfer->code,
1577 strerror ( rc ) );
1578 DBGC_HDA ( xhci, 0, transfer, sizeof ( *transfer ) );
1579
1580 /* Sanity check */
1581 assert ( ( endpoint->context->state & XHCI_ENDPOINT_STATE_MASK )
1582 != XHCI_ENDPOINT_RUNNING );
1583
1584 /* Report failure to USB core */
1585 usb_complete_err ( endpoint->ep, iobuf, rc );
1586 return;
1587 }
1588
1589 /* Record actual transfer size */
1590 iob_unput ( iobuf, le16_to_cpu ( transfer->residual ) );
1591
1592 /* Sanity check (for successful completions only) */
1593 assert ( xhci_ring_consumed ( &endpoint->ring ) ==
1594 le64_to_cpu ( transfer->transfer ) );
1595
1596 /* Report completion to USB core */
1597 usb_complete ( endpoint->ep, iobuf );
1598 profile_stop ( &xhci_transfer_profiler );
1599 }
1600
1601 /**
1602 * Handle command completion event
1603 *
1604 * @v xhci xHCI device
1605 * @v complete Command completion event
1606 */
1607 static void xhci_complete ( struct xhci_device *xhci,
1608 struct xhci_trb_complete *complete ) {
1609 int rc;
1610
1611 /* Ignore "command ring stopped" notifications */
1612 if ( complete->code == XHCI_CMPLT_CMD_STOPPED ) {
1613 DBGC2 ( xhci, "XHCI %p command ring stopped\n", xhci );
1614 return;
1615 }
1616
1617 /* Ignore unexpected completions */
1618 if ( ! xhci->pending ) {
1619 rc = -ECODE ( complete->code );
1620 DBGC ( xhci, "XHCI %p unexpected completion (code %d): %s\n",
1621 xhci, complete->code, strerror ( rc ) );
1622 DBGC_HDA ( xhci, 0, complete, sizeof ( *complete ) );
1623 return;
1624 }
1625
1626 /* Dequeue command TRB */
1627 xhci_dequeue ( &xhci->command );
1628
1629 /* Sanity check */
1630 assert ( xhci_ring_consumed ( &xhci->command ) ==
1631 le64_to_cpu ( complete->command ) );
1632
1633 /* Record completion */
1634 memcpy ( xhci->pending, complete, sizeof ( *xhci->pending ) );
1635 xhci->pending = NULL;
1636 }
1637
1638 /**
1639 * Handle port status event
1640 *
1641 * @v xhci xHCI device
1642 * @v port Port status event
1643 */
1644 static void xhci_port_status ( struct xhci_device *xhci,
1645 struct xhci_trb_port_status *port ) {
1646 uint32_t portsc;
1647
1648 /* Sanity check */
1649 assert ( ( port->port > 0 ) && ( port->port <= xhci->ports ) );
1650
1651 /* Clear port status change bits */
1652 portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port->port ) );
1653 portsc &= ( XHCI_PORTSC_PRESERVE | XHCI_PORTSC_CHANGE );
1654 writel ( portsc, xhci->op + XHCI_OP_PORTSC ( port->port ) );
1655
1656 /* Report port status change */
1657 usb_port_changed ( usb_port ( xhci->bus->hub, port->port ) );
1658 }
1659
1660 /**
1661 * Handle host controller event
1662 *
1663 * @v xhci xHCI device
1664 * @v host Host controller event
1665 */
1666 static void xhci_host_controller ( struct xhci_device *xhci,
1667 struct xhci_trb_host_controller *host ) {
1668 int rc;
1669
1670 /* Construct error */
1671 rc = -ECODE ( host->code );
1672 DBGC ( xhci, "XHCI %p host controller event (code %d): %s\n",
1673 xhci, host->code, strerror ( rc ) );
1674 }
1675
1676 /**
1677 * Poll event ring
1678 *
1679 * @v xhci xHCI device
1680 */
1681 static void xhci_event_poll ( struct xhci_device *xhci ) {
1682 struct xhci_event_ring *event = &xhci->event;
1683 union xhci_trb *trb;
1684 unsigned int shift = XHCI_EVENT_TRBS_LOG2;
1685 unsigned int count = ( 1 << shift );
1686 unsigned int mask = ( count - 1 );
1687 unsigned int consumed;
1688 unsigned int type;
1689
1690 /* Poll for events */
1691 profile_start ( &xhci_event_profiler );
1692 for ( consumed = 0 ; ; consumed++ ) {
1693
1694 /* Stop if we reach an empty TRB */
1695 rmb();
1696 trb = &event->trb[ event->cons & mask ];
1697 if ( ! ( ( trb->common.flags ^
1698 ( event->cons >> shift ) ) & XHCI_TRB_C ) )
1699 break;
1700
1701 /* Handle TRB */
1702 type = ( trb->common.type & XHCI_TRB_TYPE_MASK );
1703 switch ( type ) {
1704
1705 case XHCI_TRB_TRANSFER :
1706 xhci_transfer ( xhci, &trb->transfer );
1707 break;
1708
1709 case XHCI_TRB_COMPLETE :
1710 xhci_complete ( xhci, &trb->complete );
1711 break;
1712
1713 case XHCI_TRB_PORT_STATUS:
1714 xhci_port_status ( xhci, &trb->port );
1715 break;
1716
1717 case XHCI_TRB_HOST_CONTROLLER:
1718 xhci_host_controller ( xhci, &trb->host );
1719 break;
1720
1721 default:
1722 DBGC ( xhci, "XHCI %p unrecognised event %#x\n:",
1723 xhci, event->cons );
1724 DBGC_HDA ( xhci, virt_to_phys ( trb ),
1725 trb, sizeof ( *trb ) );
1726 break;
1727 }
1728
1729 /* Consume this TRB */
1730 event->cons++;
1731 }
1732
1733 /* Update dequeue pointer if applicable */
1734 if ( consumed ) {
1735 xhci_writeq ( xhci, virt_to_phys ( trb ),
1736 xhci->run + XHCI_RUN_ERDP ( 0 ) );
1737 profile_stop ( &xhci_event_profiler );
1738 }
1739 }
1740
1741 /**
1742 * Abort command
1743 *
1744 * @v xhci xHCI device
1745 */
1746 static void xhci_abort ( struct xhci_device *xhci ) {
1747 physaddr_t crp;
1748
1749 /* Abort the command */
1750 DBGC2 ( xhci, "XHCI %p aborting command\n", xhci );
1751 xhci_writeq ( xhci, XHCI_CRCR_CA, xhci->op + XHCI_OP_CRCR );
1752
1753 /* Allow time for command to abort */
1754 mdelay ( XHCI_COMMAND_ABORT_DELAY_MS );
1755
1756 /* Sanity check */
1757 assert ( ( readl ( xhci->op + XHCI_OP_CRCR ) & XHCI_CRCR_CRR ) == 0 );
1758
1759 /* Consume (and ignore) any final command status */
1760 xhci_event_poll ( xhci );
1761
1762 /* Reset the command ring control register */
1763 xhci_ring_reset ( &xhci->command );
1764 crp = virt_to_phys ( xhci->command.trb );
1765 xhci_writeq ( xhci, ( crp | XHCI_CRCR_RCS ), xhci->op + XHCI_OP_CRCR );
1766 }
1767
1768 /**
1769 * Issue command and wait for completion
1770 *
1771 * @v xhci xHCI device
1772 * @v trb Transfer request block (with empty Cycle flag)
1773 * @ret rc Return status code
1774 *
1775 * On a successful completion, the TRB will be overwritten with the
1776 * completion.
1777 */
1778 static int xhci_command ( struct xhci_device *xhci, union xhci_trb *trb ) {
1779 struct xhci_trb_complete *complete = &trb->complete;
1780 unsigned int i;
1781 int rc;
1782
1783 /* Record the pending command */
1784 xhci->pending = trb;
1785
1786 /* Enqueue the command */
1787 if ( ( rc = xhci_enqueue ( &xhci->command, NULL, trb ) ) != 0 )
1788 goto err_enqueue;
1789
1790 /* Ring the command doorbell */
1791 xhci_doorbell ( &xhci->command );
1792
1793 /* Wait for the command to complete */
1794 for ( i = 0 ; i < XHCI_COMMAND_MAX_WAIT_MS ; i++ ) {
1795
1796 /* Poll event ring */
1797 xhci_event_poll ( xhci );
1798
1799 /* Check for completion */
1800 if ( ! xhci->pending ) {
1801 if ( complete->code != XHCI_CMPLT_SUCCESS ) {
1802 rc = -ECODE ( complete->code );
1803 DBGC ( xhci, "XHCI %p command failed (code "
1804 "%d): %s\n", xhci, complete->code,
1805 strerror ( rc ) );
1806 DBGC_HDA ( xhci, 0, trb, sizeof ( *trb ) );
1807 return rc;
1808 }
1809 return 0;
1810 }
1811
1812 /* Delay */
1813 mdelay ( 1 );
1814 }
1815
1816 /* Timeout */
1817 DBGC ( xhci, "XHCI %p timed out waiting for completion\n", xhci );
1818 rc = -ETIMEDOUT;
1819
1820 /* Abort command */
1821 xhci_abort ( xhci );
1822
1823 err_enqueue:
1824 xhci->pending = NULL;
1825 return rc;
1826 }
1827
1828 /**
1829 * Issue NOP and wait for completion
1830 *
1831 * @v xhci xHCI device
1832 * @ret rc Return status code
1833 */
1834 static inline int xhci_nop ( struct xhci_device *xhci ) {
1835 union xhci_trb trb;
1836 struct xhci_trb_common *nop = &trb.common;
1837 int rc;
1838
1839 /* Construct command */
1840 memset ( nop, 0, sizeof ( *nop ) );
1841 nop->flags = XHCI_TRB_IOC;
1842 nop->type = XHCI_TRB_NOP_CMD;
1843
1844 /* Issue command and wait for completion */
1845 if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 )
1846 return rc;
1847
1848 return 0;
1849 }
1850
1851 /**
1852 * Enable slot
1853 *
1854 * @v xhci xHCI device
1855 * @v type Slot type
1856 * @ret slot Device slot ID, or negative error
1857 */
1858 static inline int xhci_enable_slot ( struct xhci_device *xhci,
1859 unsigned int type ) {
1860 union xhci_trb trb;
1861 struct xhci_trb_enable_slot *enable = &trb.enable;
1862 struct xhci_trb_complete *enabled = &trb.complete;
1863 unsigned int slot;
1864 int rc;
1865
1866 /* Construct command */
1867 memset ( enable, 0, sizeof ( *enable ) );
1868 enable->slot = type;
1869 enable->type = XHCI_TRB_ENABLE_SLOT;
1870
1871 /* Issue command and wait for completion */
1872 if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) {
1873 DBGC ( xhci, "XHCI %p could not enable new slot: %s\n",
1874 xhci, strerror ( rc ) );
1875 return rc;
1876 }
1877
1878 /* Extract slot number */
1879 slot = enabled->slot;
1880
1881 DBGC2 ( xhci, "XHCI %p slot %d enabled\n", xhci, slot );
1882 return slot;
1883 }
1884
1885 /**
1886 * Disable slot
1887 *
1888 * @v xhci xHCI device
1889 * @v slot Device slot
1890 * @ret rc Return status code
1891 */
1892 static inline int xhci_disable_slot ( struct xhci_device *xhci,
1893 unsigned int slot ) {
1894 union xhci_trb trb;
1895 struct xhci_trb_disable_slot *disable = &trb.disable;
1896 int rc;
1897
1898 /* Construct command */
1899 memset ( disable, 0, sizeof ( *disable ) );
1900 disable->type = XHCI_TRB_DISABLE_SLOT;
1901 disable->slot = slot;
1902
1903 /* Issue command and wait for completion */
1904 if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) {
1905 DBGC ( xhci, "XHCI %p could not disable slot %d: %s\n",
1906 xhci, slot, strerror ( rc ) );
1907 return rc;
1908 }
1909
1910 DBGC2 ( xhci, "XHCI %p slot %d disabled\n", xhci, slot );
1911 return 0;
1912 }
1913
1914 /**
1915 * Issue context-based command and wait for completion
1916 *
1917 * @v xhci xHCI device
1918 * @v slot Device slot
1919 * @v endpoint Endpoint
1920 * @v type TRB type
1921 * @v populate Input context populater
1922 * @ret rc Return status code
1923 */
1924 static int xhci_context ( struct xhci_device *xhci, struct xhci_slot *slot,
1925 struct xhci_endpoint *endpoint, unsigned int type,
1926 void ( * populate ) ( struct xhci_device *xhci,
1927 struct xhci_slot *slot,
1928 struct xhci_endpoint *endpoint,
1929 void *input ) ) {
1930 union xhci_trb trb;
1931 struct xhci_trb_context *context = &trb.context;
1932 size_t len;
1933 void *input;
1934 int rc;
1935
1936 /* Allocate an input context */
1937 len = xhci_input_context_offset ( xhci, XHCI_CTX_END );
1938 input = malloc_dma ( len, xhci_align ( len ) );
1939 if ( ! input ) {
1940 rc = -ENOMEM;
1941 goto err_alloc;
1942 }
1943 memset ( input, 0, len );
1944
1945 /* Populate input context */
1946 populate ( xhci, slot, endpoint, input );
1947
1948 /* Construct command */
1949 memset ( context, 0, sizeof ( *context ) );
1950 context->type = type;
1951 context->input = cpu_to_le64 ( virt_to_phys ( input ) );
1952 context->slot = slot->id;
1953
1954 /* Issue command and wait for completion */
1955 if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 )
1956 goto err_command;
1957
1958 err_command:
1959 free_dma ( input, len );
1960 err_alloc:
1961 return rc;
1962 }
1963
1964 /**
1965 * Populate address device input context
1966 *
1967 * @v xhci xHCI device
1968 * @v slot Device slot
1969 * @v endpoint Endpoint
1970 * @v input Input context
1971 */
1972 static void xhci_address_device_input ( struct xhci_device *xhci,
1973 struct xhci_slot *slot,
1974 struct xhci_endpoint *endpoint,
1975 void *input ) {
1976 struct xhci_control_context *control_ctx;
1977 struct xhci_slot_context *slot_ctx;
1978 struct xhci_endpoint_context *ep_ctx;
1979
1980 /* Sanity checks */
1981 assert ( endpoint->ctx == XHCI_CTX_EP0 );
1982
1983 /* Populate control context */
1984 control_ctx = input;
1985 control_ctx->add = cpu_to_le32 ( ( 1 << XHCI_CTX_SLOT ) |
1986 ( 1 << XHCI_CTX_EP0 ) );
1987
1988 /* Populate slot context */
1989 slot_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_SLOT ));
1990 slot_ctx->info = cpu_to_le32 ( XHCI_SLOT_INFO ( 1, 0, slot->psiv,
1991 slot->route ) );
1992 slot_ctx->port = slot->port;
1993 slot_ctx->tt_id = slot->tt_id;
1994 slot_ctx->tt_port = slot->tt_port;
1995
1996 /* Populate control endpoint context */
1997 ep_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_EP0 ) );
1998 ep_ctx->type = XHCI_EP_TYPE_CONTROL;
1999 ep_ctx->burst = endpoint->ep->burst;
2000 ep_ctx->mtu = cpu_to_le16 ( endpoint->ep->mtu );
2001 ep_ctx->dequeue = cpu_to_le64 ( virt_to_phys ( endpoint->ring.trb ) |
2002 XHCI_EP_DCS );
2003 ep_ctx->trb_len = cpu_to_le16 ( XHCI_EP0_TRB_LEN );
2004 }
2005
2006 /**
2007 * Address device
2008 *
2009 * @v xhci xHCI device
2010 * @v slot Device slot
2011 * @ret rc Return status code
2012 */
2013 static inline int xhci_address_device ( struct xhci_device *xhci,
2014 struct xhci_slot *slot ) {
2015 struct usb_device *usb = slot->usb;
2016 struct xhci_slot_context *slot_ctx;
2017 int rc;
2018
2019 /* Assign device address */
2020 if ( ( rc = xhci_context ( xhci, slot, slot->endpoint[XHCI_CTX_EP0],
2021 XHCI_TRB_ADDRESS_DEVICE,
2022 xhci_address_device_input ) ) != 0 )
2023 return rc;
2024
2025 /* Get assigned address */
2026 slot_ctx = ( slot->context +
2027 xhci_device_context_offset ( xhci, XHCI_CTX_SLOT ) );
2028 usb->address = slot_ctx->address;
2029 DBGC2 ( xhci, "XHCI %p assigned address %d to %s\n",
2030 xhci, usb->address, usb->name );
2031
2032 return 0;
2033 }
2034
2035 /**
2036 * Populate configure endpoint input context
2037 *
2038 * @v xhci xHCI device
2039 * @v slot Device slot
2040 * @v endpoint Endpoint
2041 * @v input Input context
2042 */
2043 static void xhci_configure_endpoint_input ( struct xhci_device *xhci,
2044 struct xhci_slot *slot,
2045 struct xhci_endpoint *endpoint,
2046 void *input ) {
2047 struct xhci_control_context *control_ctx;
2048 struct xhci_slot_context *slot_ctx;
2049 struct xhci_endpoint_context *ep_ctx;
2050
2051 /* Populate control context */
2052 control_ctx = input;
2053 control_ctx->add = cpu_to_le32 ( ( 1 << XHCI_CTX_SLOT ) |
2054 ( 1 << endpoint->ctx ) );
2055
2056 /* Populate slot context */
2057 slot_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_SLOT ));
2058 slot_ctx->info = cpu_to_le32 ( XHCI_SLOT_INFO ( ( XHCI_CTX_END - 1 ),
2059 ( slot->ports ? 1 : 0 ),
2060 slot->psiv, 0 ) );
2061 slot_ctx->ports = slot->ports;
2062
2063 /* Populate endpoint context */
2064 ep_ctx = ( input + xhci_input_context_offset ( xhci, endpoint->ctx ) );
2065 ep_ctx->interval = endpoint->interval;
2066 ep_ctx->type = endpoint->type;
2067 ep_ctx->burst = endpoint->ep->burst;
2068 ep_ctx->mtu = cpu_to_le16 ( endpoint->ep->mtu );
2069 ep_ctx->dequeue = cpu_to_le64 ( virt_to_phys ( endpoint->ring.trb ) |
2070 XHCI_EP_DCS );
2071 ep_ctx->trb_len = cpu_to_le16 ( endpoint->ep->mtu ); /* best guess */
2072 }
2073
2074 /**
2075 * Configure endpoint
2076 *
2077 * @v xhci xHCI device
2078 * @v slot Device slot
2079 * @v endpoint Endpoint
2080 * @ret rc Return status code
2081 */
2082 static inline int xhci_configure_endpoint ( struct xhci_device *xhci,
2083 struct xhci_slot *slot,
2084 struct xhci_endpoint *endpoint ) {
2085 int rc;
2086
2087 /* Configure endpoint */
2088 if ( ( rc = xhci_context ( xhci, slot, endpoint,
2089 XHCI_TRB_CONFIGURE_ENDPOINT,
2090 xhci_configure_endpoint_input ) ) != 0 )
2091 return rc;
2092
2093 DBGC2 ( xhci, "XHCI %p slot %d ctx %d configured\n",
2094 xhci, slot->id, endpoint->ctx );
2095 return 0;
2096 }
2097
2098 /**
2099 * Populate deconfigure endpoint input context
2100 *
2101 * @v xhci xHCI device
2102 * @v slot Device slot
2103 * @v endpoint Endpoint
2104 * @v input Input context
2105 */
2106 static void
2107 xhci_deconfigure_endpoint_input ( struct xhci_device *xhci __unused,
2108 struct xhci_slot *slot __unused,
2109 struct xhci_endpoint *endpoint,
2110 void *input ) {
2111 struct xhci_control_context *control_ctx;
2112 struct xhci_slot_context *slot_ctx;
2113
2114 /* Populate control context */
2115 control_ctx = input;
2116 control_ctx->add = cpu_to_le32 ( 1 << XHCI_CTX_SLOT );
2117 control_ctx->drop = cpu_to_le32 ( 1 << endpoint->ctx );
2118
2119 /* Populate slot context */
2120 slot_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_SLOT ));
2121 slot_ctx->info = cpu_to_le32 ( XHCI_SLOT_INFO ( ( XHCI_CTX_END - 1 ),
2122 0, 0, 0 ) );
2123 }
2124
2125 /**
2126 * Deconfigure endpoint
2127 *
2128 * @v xhci xHCI device
2129 * @v slot Device slot
2130 * @v endpoint Endpoint
2131 * @ret rc Return status code
2132 */
2133 static inline int xhci_deconfigure_endpoint ( struct xhci_device *xhci,
2134 struct xhci_slot *slot,
2135 struct xhci_endpoint *endpoint ) {
2136 int rc;
2137
2138 /* Deconfigure endpoint */
2139 if ( ( rc = xhci_context ( xhci, slot, endpoint,
2140 XHCI_TRB_CONFIGURE_ENDPOINT,
2141 xhci_deconfigure_endpoint_input ) ) != 0 )
2142 return rc;
2143
2144 DBGC2 ( xhci, "XHCI %p slot %d ctx %d deconfigured\n",
2145 xhci, slot->id, endpoint->ctx );
2146 return 0;
2147 }
2148
2149 /**
2150 * Populate evaluate context input context
2151 *
2152 * @v xhci xHCI device
2153 * @v slot Device slot
2154 * @v endpoint Endpoint
2155 * @v input Input context
2156 */
2157 static void xhci_evaluate_context_input ( struct xhci_device *xhci,
2158 struct xhci_slot *slot __unused,
2159 struct xhci_endpoint *endpoint,
2160 void *input ) {
2161 struct xhci_control_context *control_ctx;
2162 struct xhci_slot_context *slot_ctx;
2163 struct xhci_endpoint_context *ep_ctx;
2164
2165 /* Populate control context */
2166 control_ctx = input;
2167 control_ctx->add = cpu_to_le32 ( ( 1 << XHCI_CTX_SLOT ) |
2168 ( 1 << endpoint->ctx ) );
2169
2170 /* Populate slot context */
2171 slot_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_SLOT ));
2172 slot_ctx->info = cpu_to_le32 ( XHCI_SLOT_INFO ( ( XHCI_CTX_END - 1 ),
2173 0, 0, 0 ) );
2174
2175 /* Populate endpoint context */
2176 ep_ctx = ( input + xhci_input_context_offset ( xhci, endpoint->ctx ) );
2177 ep_ctx->mtu = cpu_to_le16 ( endpoint->ep->mtu );
2178 }
2179
2180 /**
2181 * Evaluate context
2182 *
2183 * @v xhci xHCI device
2184 * @v slot Device slot
2185 * @v endpoint Endpoint
2186 * @ret rc Return status code
2187 */
2188 static inline int xhci_evaluate_context ( struct xhci_device *xhci,
2189 struct xhci_slot *slot,
2190 struct xhci_endpoint *endpoint ) {
2191 int rc;
2192
2193 /* Configure endpoint */
2194 if ( ( rc = xhci_context ( xhci, slot, endpoint,
2195 XHCI_TRB_EVALUATE_CONTEXT,
2196 xhci_evaluate_context_input ) ) != 0 )
2197 return rc;
2198
2199 DBGC2 ( xhci, "XHCI %p slot %d ctx %d (re-)evaluated\n",
2200 xhci, slot->id, endpoint->ctx );
2201 return 0;
2202 }
2203
2204 /**
2205 * Reset endpoint
2206 *
2207 * @v xhci xHCI device
2208 * @v slot Device slot
2209 * @v endpoint Endpoint
2210 * @ret rc Return status code
2211 */
2212 static inline int xhci_reset_endpoint ( struct xhci_device *xhci,
2213 struct xhci_slot *slot,
2214 struct xhci_endpoint *endpoint ) {
2215 union xhci_trb trb;
2216 struct xhci_trb_reset_endpoint *reset = &trb.reset;
2217 int rc;
2218
2219 /* Construct command */
2220 memset ( reset, 0, sizeof ( *reset ) );
2221 reset->slot = slot->id;
2222 reset->endpoint = endpoint->ctx;
2223 reset->type = XHCI_TRB_RESET_ENDPOINT;
2224
2225 /* Issue command and wait for completion */
2226 if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) {
2227 DBGC ( xhci, "XHCI %p slot %d ctx %d could not reset endpoint "
2228 "in state %d: %s\n", xhci, slot->id, endpoint->ctx,
2229 endpoint->context->state, strerror ( rc ) );
2230 return rc;
2231 }
2232
2233 return 0;
2234 }
2235
2236 /**
2237 * Stop endpoint
2238 *
2239 * @v xhci xHCI device
2240 * @v slot Device slot
2241 * @v endpoint Endpoint
2242 * @ret rc Return status code
2243 */
2244 static inline int xhci_stop_endpoint ( struct xhci_device *xhci,
2245 struct xhci_slot *slot,
2246 struct xhci_endpoint *endpoint ) {
2247 union xhci_trb trb;
2248 struct xhci_trb_stop_endpoint *stop = &trb.stop;
2249 int rc;
2250
2251 /* Construct command */
2252 memset ( stop, 0, sizeof ( *stop ) );
2253 stop->slot = slot->id;
2254 stop->endpoint = endpoint->ctx;
2255 stop->type = XHCI_TRB_STOP_ENDPOINT;
2256
2257 /* Issue command and wait for completion */
2258 if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) {
2259 DBGC ( xhci, "XHCI %p slot %d ctx %d could not stop endpoint "
2260 "in state %d: %s\n", xhci, slot->id, endpoint->ctx,
2261 endpoint->context->state, strerror ( rc ) );
2262 return rc;
2263 }
2264
2265 return 0;
2266 }
2267
2268 /**
2269 * Set transfer ring dequeue pointer
2270 *
2271 * @v xhci xHCI device
2272 * @v slot Device slot
2273 * @v endpoint Endpoint
2274 * @ret rc Return status code
2275 */
2276 static inline int
2277 xhci_set_tr_dequeue_pointer ( struct xhci_device *xhci,
2278 struct xhci_slot *slot,
2279 struct xhci_endpoint *endpoint ) {
2280 union xhci_trb trb;
2281 struct xhci_trb_set_tr_dequeue_pointer *dequeue = &trb.dequeue;
2282 struct xhci_trb_ring *ring = &endpoint->ring;
2283 unsigned int cons;
2284 unsigned int mask;
2285 unsigned int index;
2286 unsigned int dcs;
2287 int rc;
2288
2289 /* Construct command */
2290 memset ( dequeue, 0, sizeof ( *dequeue ) );
2291 cons = ring->cons;
2292 mask = ring->mask;
2293 dcs = ( ( ~( cons >> ring->shift ) ) & XHCI_EP_DCS );
2294 index = ( cons & mask );
2295 dequeue->dequeue =
2296 cpu_to_le64 ( virt_to_phys ( &ring->trb[index] ) | dcs );
2297 dequeue->slot = slot->id;
2298 dequeue->endpoint = endpoint->ctx;
2299 dequeue->type = XHCI_TRB_SET_TR_DEQUEUE_POINTER;
2300
2301 /* Issue command and wait for completion */
2302 if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) {
2303 DBGC ( xhci, "XHCI %p slot %d ctx %d could not set TR dequeue "
2304 "pointer in state %d: %s\n", xhci, slot->id,
2305 endpoint->ctx, endpoint->context->state, strerror ( rc));
2306 return rc;
2307 }
2308
2309 return 0;
2310 }
2311
2312 /******************************************************************************
2313 *
2314 * Endpoint operations
2315 *
2316 ******************************************************************************
2317 */
2318
2319 /**
2320 * Open endpoint
2321 *
2322 * @v ep USB endpoint
2323 * @ret rc Return status code
2324 */
2325 static int xhci_endpoint_open ( struct usb_endpoint *ep ) {
2326 struct usb_device *usb = ep->usb;
2327 struct xhci_slot *slot = usb_get_hostdata ( usb );
2328 struct xhci_device *xhci = slot->xhci;
2329 struct xhci_endpoint *endpoint;
2330 unsigned int ctx;
2331 unsigned int type;
2332 unsigned int interval;
2333 int rc;
2334
2335 /* Calculate context index */
2336 ctx = XHCI_CTX ( ep->address );
2337 assert ( slot->endpoint[ctx] == NULL );
2338
2339 /* Calculate endpoint type */
2340 type = XHCI_EP_TYPE ( ep->attributes & USB_ENDPOINT_ATTR_TYPE_MASK );
2341 if ( type == XHCI_EP_TYPE ( USB_ENDPOINT_ATTR_CONTROL ) )
2342 type = XHCI_EP_TYPE_CONTROL;
2343 if ( ep->address & USB_DIR_IN )
2344 type |= XHCI_EP_TYPE_IN;
2345
2346 /* Calculate interval */
2347 if ( type & XHCI_EP_TYPE_PERIODIC ) {
2348 interval = ( fls ( ep->interval ) - 1 );
2349 } else {
2350 interval = ep->interval;
2351 }
2352
2353 /* Allocate and initialise structure */
2354 endpoint = zalloc ( sizeof ( *endpoint ) );
2355 if ( ! endpoint ) {
2356 rc = -ENOMEM;
2357 goto err_alloc;
2358 }
2359 usb_endpoint_set_hostdata ( ep, endpoint );
2360 slot->endpoint[ctx] = endpoint;
2361 endpoint->xhci = xhci;
2362 endpoint->slot = slot;
2363 endpoint->ep = ep;
2364 endpoint->ctx = ctx;
2365 endpoint->type = type;
2366 endpoint->interval = interval;
2367 endpoint->context = ( ( ( void * ) slot->context ) +
2368 xhci_device_context_offset ( xhci, ctx ) );
2369
2370 /* Allocate transfer ring */
2371 if ( ( rc = xhci_ring_alloc ( xhci, &endpoint->ring,
2372 XHCI_TRANSFER_TRBS_LOG2,
2373 slot->id, ctx, 0 ) ) != 0 )
2374 goto err_ring_alloc;
2375
2376 /* Configure endpoint, if applicable */
2377 if ( ( ctx != XHCI_CTX_EP0 ) &&
2378 ( ( rc = xhci_configure_endpoint ( xhci, slot, endpoint ) ) != 0 ))
2379 goto err_configure_endpoint;
2380
2381 DBGC2 ( xhci, "XHCI %p slot %d ctx %d ring [%08lx,%08lx)\n",
2382 xhci, slot->id, ctx, virt_to_phys ( endpoint->ring.trb ),
2383 ( virt_to_phys ( endpoint->ring.trb ) + endpoint->ring.len ) );
2384 return 0;
2385
2386 xhci_deconfigure_endpoint ( xhci, slot, endpoint );
2387 err_configure_endpoint:
2388 xhci_ring_free ( &endpoint->ring );
2389 err_ring_alloc:
2390 slot->endpoint[ctx] = NULL;
2391 free ( endpoint );
2392 err_alloc:
2393 return rc;
2394 }
2395
2396 /**
2397 * Close endpoint
2398 *
2399 * @v ep USB endpoint
2400 */
2401 static void xhci_endpoint_close ( struct usb_endpoint *ep ) {
2402 struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
2403 struct xhci_slot *slot = endpoint->slot;
2404 struct xhci_device *xhci = slot->xhci;
2405 struct io_buffer *iobuf;
2406 unsigned int ctx = endpoint->ctx;
2407
2408 /* Deconfigure endpoint, if applicable */
2409 if ( ctx != XHCI_CTX_EP0 )
2410 xhci_deconfigure_endpoint ( xhci, slot, endpoint );
2411
2412 /* Cancel any incomplete transfers */
2413 while ( xhci_ring_fill ( &endpoint->ring ) ) {
2414 iobuf = xhci_dequeue_multi ( &endpoint->ring );
2415 usb_complete_err ( ep, iobuf, -ECANCELED );
2416 }
2417
2418 /* Free endpoint */
2419 xhci_ring_free ( &endpoint->ring );
2420 slot->endpoint[ctx] = NULL;
2421 free ( endpoint );
2422 }
2423
2424 /**
2425 * Reset endpoint
2426 *
2427 * @v ep USB endpoint
2428 * @ret rc Return status code
2429 */
2430 static int xhci_endpoint_reset ( struct usb_endpoint *ep ) {
2431 struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
2432 struct xhci_slot *slot = endpoint->slot;
2433 struct xhci_device *xhci = slot->xhci;
2434 int rc;
2435
2436 /* Reset endpoint context */
2437 if ( ( rc = xhci_reset_endpoint ( xhci, slot, endpoint ) ) != 0 )
2438 return rc;
2439
2440 /* Set transfer ring dequeue pointer */
2441 if ( ( rc = xhci_set_tr_dequeue_pointer ( xhci, slot, endpoint ) ) != 0)
2442 return rc;
2443
2444 /* Ring doorbell to resume processing */
2445 xhci_doorbell ( &endpoint->ring );
2446
2447 DBGC ( xhci, "XHCI %p slot %d ctx %d reset\n",
2448 xhci, slot->id, endpoint->ctx );
2449 return 0;
2450 }
2451
2452 /**
2453 * Update MTU
2454 *
2455 * @v ep USB endpoint
2456 * @ret rc Return status code
2457 */
2458 static int xhci_endpoint_mtu ( struct usb_endpoint *ep ) {
2459 struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
2460 struct xhci_slot *slot = endpoint->slot;
2461 struct xhci_device *xhci = slot->xhci;
2462 int rc;
2463
2464 /* Evalulate context */
2465 if ( ( rc = xhci_evaluate_context ( xhci, slot, endpoint ) ) != 0 )
2466 return rc;
2467
2468 return 0;
2469 }
2470
2471 /**
2472 * Enqueue message transfer
2473 *
2474 * @v ep USB endpoint
2475 * @v packet Setup packet
2476 * @v iobuf I/O buffer
2477 * @ret rc Return status code
2478 */
2479 static int xhci_endpoint_message ( struct usb_endpoint *ep,
2480 struct usb_setup_packet *packet,
2481 struct io_buffer *iobuf ) {
2482 struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
2483 unsigned int input = ( le16_to_cpu ( packet->request ) & USB_DIR_IN );
2484 size_t len = iob_len ( iobuf );
2485 union xhci_trb trbs[ 1 /* setup */ + 1 /* possible data */ +
2486 1 /* status */ ];
2487 union xhci_trb *trb = trbs;
2488 struct xhci_trb_setup *setup;
2489 struct xhci_trb_data *data;
2490 struct xhci_trb_status *status;
2491 int rc;
2492
2493 /* Profile message transfers */
2494 profile_start ( &xhci_message_profiler );
2495
2496 /* Construct setup stage TRB */
2497 memset ( trbs, 0, sizeof ( trbs ) );
2498 setup = &(trb++)->setup;
2499 memcpy ( &setup->packet, packet, sizeof ( setup->packet ) );
2500 setup->len = cpu_to_le32 ( sizeof ( *packet ) );
2501 setup->flags = XHCI_TRB_IDT;
2502 setup->type = XHCI_TRB_SETUP;
2503 if ( len )
2504 setup->direction = ( input ? XHCI_SETUP_IN : XHCI_SETUP_OUT );
2505
2506 /* Construct data stage TRB, if applicable */
2507 if ( len ) {
2508 data = &(trb++)->data;
2509 data->data = cpu_to_le64 ( virt_to_phys ( iobuf->data ) );
2510 data->len = cpu_to_le32 ( len );
2511 data->type = XHCI_TRB_DATA;
2512 data->direction = ( input ? XHCI_DATA_IN : XHCI_DATA_OUT );
2513 }
2514
2515 /* Construct status stage TRB */
2516 status = &(trb++)->status;
2517 status->flags = XHCI_TRB_IOC;
2518 status->type = XHCI_TRB_STATUS;
2519 status->direction =
2520 ( ( len && input ) ? XHCI_STATUS_OUT : XHCI_STATUS_IN );
2521
2522 /* Enqueue TRBs */
2523 if ( ( rc = xhci_enqueue_multi ( &endpoint->ring, iobuf, trbs,
2524 ( trb - trbs ) ) ) != 0 )
2525 return rc;
2526
2527 /* Ring the doorbell */
2528 xhci_doorbell ( &endpoint->ring );
2529
2530 profile_stop ( &xhci_message_profiler );
2531 return 0;
2532 }
2533
2534 /**
2535 * Enqueue stream transfer
2536 *
2537 * @v ep USB endpoint
2538 * @v iobuf I/O buffer
2539 * @v terminate Terminate using a short packet
2540 * @ret rc Return status code
2541 */
2542 static int xhci_endpoint_stream ( struct usb_endpoint *ep,
2543 struct io_buffer *iobuf, int terminate ) {
2544 struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
2545 union xhci_trb trbs[ 1 /* Normal */ + 1 /* Possible zero-length */ ];
2546 union xhci_trb *trb = trbs;
2547 struct xhci_trb_normal *normal;
2548 size_t len = iob_len ( iobuf );
2549 int rc;
2550
2551 /* Profile stream transfers */
2552 profile_start ( &xhci_stream_profiler );
2553
2554 /* Construct normal TRBs */
2555 memset ( &trbs, 0, sizeof ( trbs ) );
2556 normal = &(trb++)->normal;
2557 normal->data = cpu_to_le64 ( virt_to_phys ( iobuf->data ) );
2558 normal->len = cpu_to_le32 ( len );
2559 normal->type = XHCI_TRB_NORMAL;
2560 if ( terminate && ( ( len & ( ep->mtu - 1 ) ) == 0 ) ) {
2561 normal->flags = XHCI_TRB_CH;
2562 normal = &(trb++)->normal;
2563 normal->type = XHCI_TRB_NORMAL;
2564 }
2565 normal->flags = XHCI_TRB_IOC;
2566
2567 /* Enqueue TRBs */
2568 if ( ( rc = xhci_enqueue_multi ( &endpoint->ring, iobuf, trbs,
2569 ( trb - trbs ) ) ) != 0 )
2570 return rc;
2571
2572 /* Ring the doorbell */
2573 xhci_doorbell ( &endpoint->ring );
2574
2575 profile_stop ( &xhci_stream_profiler );
2576 return 0;
2577 }
2578
2579 /******************************************************************************
2580 *
2581 * Device operations
2582 *
2583 ******************************************************************************
2584 */
2585
2586 /**
2587 * Open device
2588 *
2589 * @v usb USB device
2590 * @ret rc Return status code
2591 */
2592 static int xhci_device_open ( struct usb_device *usb ) {
2593 struct xhci_device *xhci = usb_bus_get_hostdata ( usb->port->hub->bus );
2594 struct usb_port *tt = usb_transaction_translator ( usb );
2595 struct xhci_slot *slot;
2596 struct xhci_slot *tt_slot;
2597 size_t len;
2598 int type;
2599 int id;
2600 int rc;
2601
2602 /* Determine applicable slot type */
2603 type = xhci_port_slot_type ( xhci, usb->port->address );
2604 if ( type < 0 ) {
2605 rc = type;
2606 DBGC ( xhci, "XHCI %p port %d has no slot type\n",
2607 xhci, usb->port->address );
2608 goto err_type;
2609 }
2610
2611 /* Allocate a device slot number */
2612 id = xhci_enable_slot ( xhci, type );
2613 if ( id < 0 ) {
2614 rc = id;
2615 goto err_enable_slot;
2616 }
2617 assert ( xhci->slot[id] == NULL );
2618
2619 /* Allocate and initialise structure */
2620 slot = zalloc ( sizeof ( *slot ) );
2621 if ( ! slot ) {
2622 rc = -ENOMEM;
2623 goto err_alloc;
2624 }
2625 usb_set_hostdata ( usb, slot );
2626 xhci->slot[id] = slot;
2627 slot->xhci = xhci;
2628 slot->usb = usb;
2629 slot->id = id;
2630 if ( tt ) {
2631 tt_slot = usb_get_hostdata ( tt->hub->usb );
2632 slot->tt_id = tt_slot->id;
2633 slot->tt_port = tt->address;
2634 }
2635
2636 /* Allocate a device context */
2637 len = xhci_device_context_offset ( xhci, XHCI_CTX_END );
2638 slot->context = malloc_dma ( len, xhci_align ( len ) );
2639 if ( ! slot->context ) {
2640 rc = -ENOMEM;
2641 goto err_alloc_context;
2642 }
2643 memset ( slot->context, 0, len );
2644
2645 /* Set device context base address */
2646 assert ( xhci->dcbaa[id] == 0 );
2647 xhci->dcbaa[id] = cpu_to_le64 ( virt_to_phys ( slot->context ) );
2648
2649 DBGC2 ( xhci, "XHCI %p slot %d device context [%08lx,%08lx) for %s\n",
2650 xhci, slot->id, virt_to_phys ( slot->context ),
2651 ( virt_to_phys ( slot->context ) + len ), usb->name );
2652 return 0;
2653
2654 xhci->dcbaa[id] = 0;
2655 free_dma ( slot->context, len );
2656 err_alloc_context:
2657 xhci->slot[id] = NULL;
2658 free ( slot );
2659 err_alloc:
2660 xhci_disable_slot ( xhci, id );
2661 err_enable_slot:
2662 err_type:
2663 return rc;
2664 }
2665
2666 /**
2667 * Close device
2668 *
2669 * @v usb USB device
2670 */
2671 static void xhci_device_close ( struct usb_device *usb ) {
2672 struct xhci_slot *slot = usb_get_hostdata ( usb );
2673 struct xhci_device *xhci = slot->xhci;
2674 size_t len = xhci_device_context_offset ( xhci, XHCI_CTX_END );
2675 unsigned int id = slot->id;
2676 int rc;
2677
2678 /* Disable slot */
2679 if ( ( rc = xhci_disable_slot ( xhci, id ) ) != 0 ) {
2680 /* Slot is still enabled. Leak the slot context,
2681 * since the controller may still write to this
2682 * memory, and leave the DCBAA entry intact.
2683 *
2684 * If the controller later reports that this same slot
2685 * has been re-enabled, then some assertions will be
2686 * triggered.
2687 */
2688 DBGC ( xhci, "XHCI %p slot %d leaking context memory\n",
2689 xhci, slot->id );
2690 slot->context = NULL;
2691 }
2692
2693 /* Free slot */
2694 if ( slot->context ) {
2695 free_dma ( slot->context, len );
2696 xhci->dcbaa[id] = 0;
2697 }
2698 xhci->slot[id] = NULL;
2699 free ( slot );
2700 }
2701
2702 /**
2703 * Assign device address
2704 *
2705 * @v usb USB device
2706 * @ret rc Return status code
2707 */
2708 static int xhci_device_address ( struct usb_device *usb ) {
2709 struct xhci_slot *slot = usb_get_hostdata ( usb );
2710 struct xhci_device *xhci = slot->xhci;
2711 struct usb_port *port = usb->port;
2712 struct usb_port *root_port;
2713 int psiv;
2714 int rc;
2715
2716 /* Calculate route string */
2717 slot->route = usb_route_string ( usb );
2718
2719 /* Calculate root hub port number */
2720 root_port = usb_root_hub_port ( usb );
2721 slot->port = root_port->address;
2722
2723 /* Calculate protocol speed ID */
2724 psiv = xhci_port_psiv ( xhci, slot->port, port->speed );
2725 if ( psiv < 0 ) {
2726 rc = psiv;
2727 return rc;
2728 }
2729 slot->psiv = psiv;
2730
2731 /* Address device */
2732 if ( ( rc = xhci_address_device ( xhci, slot ) ) != 0 )
2733 return rc;
2734
2735 return 0;
2736 }
2737
2738 /******************************************************************************
2739 *
2740 * Bus operations
2741 *
2742 ******************************************************************************
2743 */
2744
2745 /**
2746 * Open USB bus
2747 *
2748 * @v bus USB bus
2749 * @ret rc Return status code
2750 */
2751 static int xhci_bus_open ( struct usb_bus *bus ) {
2752 struct xhci_device *xhci = usb_bus_get_hostdata ( bus );
2753 int rc;
2754
2755 /* Allocate device slot array */
2756 xhci->slot = zalloc ( xhci->slots * sizeof ( xhci->slot[0] ) );
2757 if ( ! xhci->slot ) {
2758 rc = -ENOMEM;
2759 goto err_slot_alloc;
2760 }
2761
2762 /* Allocate device context base address array */
2763 if ( ( rc = xhci_dcbaa_alloc ( xhci ) ) != 0 )
2764 goto err_dcbaa_alloc;
2765
2766 /* Allocate scratchpad buffers */
2767 if ( ( rc = xhci_scratchpad_alloc ( xhci ) ) != 0 )
2768 goto err_scratchpad_alloc;
2769
2770 /* Allocate command ring */
2771 if ( ( rc = xhci_command_alloc ( xhci ) ) != 0 )
2772 goto err_command_alloc;
2773
2774 /* Allocate event ring */
2775 if ( ( rc = xhci_event_alloc ( xhci ) ) != 0 )
2776 goto err_event_alloc;
2777
2778 /* Start controller */
2779 xhci_run ( xhci );
2780
2781 return 0;
2782
2783 xhci_stop ( xhci );
2784 xhci_event_free ( xhci );
2785 err_event_alloc:
2786 xhci_command_free ( xhci );
2787 err_command_alloc:
2788 xhci_scratchpad_free ( xhci );
2789 err_scratchpad_alloc:
2790 xhci_dcbaa_free ( xhci );
2791 err_dcbaa_alloc:
2792 free ( xhci->slot );
2793 err_slot_alloc:
2794 return rc;
2795 }
2796
2797 /**
2798 * Close USB bus
2799 *
2800 * @v bus USB bus
2801 */
2802 static void xhci_bus_close ( struct usb_bus *bus ) {
2803 struct xhci_device *xhci = usb_bus_get_hostdata ( bus );
2804 unsigned int i;
2805
2806 /* Sanity checks */
2807 assert ( xhci->slot != NULL );
2808 for ( i = 0 ; i < xhci->slots ; i++ )
2809 assert ( xhci->slot[i] == NULL );
2810
2811 xhci_stop ( xhci );
2812 xhci_event_free ( xhci );
2813 xhci_command_free ( xhci );
2814 xhci_scratchpad_free ( xhci );
2815 xhci_dcbaa_free ( xhci );
2816 free ( xhci->slot );
2817 }
2818
2819 /**
2820 * Poll USB bus
2821 *
2822 * @v bus USB bus
2823 */
2824 static void xhci_bus_poll ( struct usb_bus *bus ) {
2825 struct xhci_device *xhci = usb_bus_get_hostdata ( bus );
2826
2827 /* Poll event ring */
2828 xhci_event_poll ( xhci );
2829 }
2830
2831 /******************************************************************************
2832 *
2833 * Hub operations
2834 *
2835 ******************************************************************************
2836 */
2837
2838 /**
2839 * Open hub
2840 *
2841 * @v hub USB hub
2842 * @ret rc Return status code
2843 */
2844 static int xhci_hub_open ( struct usb_hub *hub ) {
2845 struct xhci_slot *slot;
2846
2847 /* Do nothing if this is the root hub */
2848 if ( ! hub->usb )
2849 return 0;
2850
2851 /* Get device slot */
2852 slot = usb_get_hostdata ( hub->usb );
2853
2854 /* Update device slot hub parameters. We don't inform the
2855 * hardware of this information until the hub's interrupt
2856 * endpoint is opened, since the only mechanism for so doing
2857 * provided by the xHCI specification is a Configure Endpoint
2858 * command, and we can't issue that command until we have a
2859 * non-EP0 endpoint to configure.
2860 */
2861 slot->ports = hub->ports;
2862
2863 return 0;
2864 }
2865
2866 /**
2867 * Close hub
2868 *
2869 * @v hub USB hub
2870 */
2871 static void xhci_hub_close ( struct usb_hub *hub __unused ) {
2872
2873 /* Nothing to do */
2874 }
2875
2876 /******************************************************************************
2877 *
2878 * Root hub operations
2879 *
2880 ******************************************************************************
2881 */
2882
2883 /**
2884 * Open root hub
2885 *
2886 * @v hub USB hub
2887 * @ret rc Return status code
2888 */
2889 static int xhci_root_open ( struct usb_hub *hub ) {
2890 struct usb_bus *bus = hub->bus;
2891 struct xhci_device *xhci = usb_bus_get_hostdata ( bus );
2892 struct usb_port *port;
2893 uint32_t portsc;
2894 unsigned int i;
2895
2896 /* Enable power to all ports */
2897 for ( i = 1 ; i <= xhci->ports ; i++ ) {
2898 portsc = readl ( xhci->op + XHCI_OP_PORTSC ( i ) );
2899 portsc &= XHCI_PORTSC_PRESERVE;
2900 portsc |= XHCI_PORTSC_PP;
2901 writel ( portsc, xhci->op + XHCI_OP_PORTSC ( i ) );
2902 }
2903
2904 /* xHCI spec requires us to potentially wait 20ms after
2905 * enabling power to a port.
2906 */
2907 mdelay ( XHCI_PORT_POWER_DELAY_MS );
2908
2909 /* USB3 ports may power up as Disabled */
2910 for ( i = 1 ; i <= xhci->ports ; i++ ) {
2911 portsc = readl ( xhci->op + XHCI_OP_PORTSC ( i ) );
2912 port = usb_port ( hub, i );
2913 if ( ( port->protocol >= USB_PROTO_3_0 ) &&
2914 ( ( portsc & XHCI_PORTSC_PLS_MASK ) ==
2915 XHCI_PORTSC_PLS_DISABLED ) ) {
2916 /* Force link state to RxDetect */
2917 portsc &= XHCI_PORTSC_PRESERVE;
2918 portsc |= ( XHCI_PORTSC_PLS_RXDETECT | XHCI_PORTSC_LWS);
2919 writel ( portsc, xhci->op + XHCI_OP_PORTSC ( i ) );
2920 }
2921 }
2922
2923 /* Some xHCI cards seem to require an additional delay after
2924 * setting the link state to RxDetect.
2925 */
2926 mdelay ( XHCI_LINK_STATE_DELAY_MS );
2927
2928 /* Record hub driver private data */
2929 usb_hub_set_drvdata ( hub, xhci );
2930
2931 return 0;
2932 }
2933
2934 /**
2935 * Close root hub
2936 *
2937 * @v hub USB hub
2938 */
2939 static void xhci_root_close ( struct usb_hub *hub ) {
2940
2941 /* Clear hub driver private data */
2942 usb_hub_set_drvdata ( hub, NULL );
2943 }
2944
2945 /**
2946 * Enable port
2947 *
2948 * @v hub USB hub
2949 * @v port USB port
2950 * @ret rc Return status code
2951 */
2952 static int xhci_root_enable ( struct usb_hub *hub, struct usb_port *port ) {
2953 struct xhci_device *xhci = usb_hub_get_drvdata ( hub );
2954 uint32_t portsc;
2955 unsigned int i;
2956
2957 /* Reset port */
2958 portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port->address ) );
2959 portsc &= XHCI_PORTSC_PRESERVE;
2960 portsc |= XHCI_PORTSC_PR;
2961 writel ( portsc, xhci->op + XHCI_OP_PORTSC ( port->address ) );
2962
2963 /* Wait for port to become enabled */
2964 for ( i = 0 ; i < XHCI_PORT_RESET_MAX_WAIT_MS ; i++ ) {
2965
2966 /* Check port status */
2967 portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port->address ) );
2968 if ( portsc & XHCI_PORTSC_PED )
2969 return 0;
2970
2971 /* Delay */
2972 mdelay ( 1 );
2973 }
2974
2975 DBGC ( xhci, "XHCI %p timed out waiting for port %d to enable\n",
2976 xhci, port->address );
2977 return -ETIMEDOUT;
2978 }
2979
2980 /**
2981 * Disable port
2982 *
2983 * @v hub USB hub
2984 * @v port USB port
2985 * @ret rc Return status code
2986 */
2987 static int xhci_root_disable ( struct usb_hub *hub, struct usb_port *port ) {
2988 struct xhci_device *xhci = usb_hub_get_drvdata ( hub );
2989 uint32_t portsc;
2990
2991 /* Disable port */
2992 portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port->address ) );
2993 portsc &= XHCI_PORTSC_PRESERVE;
2994 portsc |= XHCI_PORTSC_PED;
2995 writel ( portsc, xhci->op + XHCI_OP_PORTSC ( port->address ) );
2996
2997 return 0;
2998 }
2999
3000 /**
3001 * Update root hub port speed
3002 *
3003 * @v hub USB hub
3004 * @v port USB port
3005 * @ret rc Return status code
3006 */
3007 static int xhci_root_speed ( struct usb_hub *hub, struct usb_port *port ) {
3008 struct xhci_device *xhci = usb_hub_get_drvdata ( hub );
3009 uint32_t portsc;
3010 unsigned int psiv;
3011 int ccs;
3012 int ped;
3013 int speed;
3014 int rc;
3015
3016 /* Read port status */
3017 portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port->address ) );
3018 DBGC2 ( xhci, "XHCI %p port %d status is %08x\n",
3019 xhci, port->address, portsc );
3020
3021 /* Check whether or not port is connected */
3022 ccs = ( portsc & XHCI_PORTSC_CCS );
3023 if ( ! ccs ) {
3024 port->speed = USB_SPEED_NONE;
3025 return 0;
3026 }
3027
3028 /* For USB2 ports, the PSIV field is not valid until the port
3029 * completes reset and becomes enabled.
3030 */
3031 ped = ( portsc & XHCI_PORTSC_PED );
3032 if ( ( port->protocol < USB_PROTO_3_0 ) && ! ped ) {
3033 port->speed = USB_SPEED_FULL;
3034 return 0;
3035 }
3036
3037 /* Get port speed and map to generic USB speed */
3038 psiv = XHCI_PORTSC_PSIV ( portsc );
3039 speed = xhci_port_speed ( xhci, port->address, psiv );
3040 if ( speed < 0 ) {
3041 rc = speed;
3042 return rc;
3043 }
3044
3045 port->speed = speed;
3046 return 0;
3047 }
3048
3049 /**
3050 * Clear transaction translator buffer
3051 *
3052 * @v hub USB hub
3053 * @v port USB port
3054 * @v ep USB endpoint
3055 * @ret rc Return status code
3056 */
3057 static int xhci_root_clear_tt ( struct usb_hub *hub, struct usb_port *port,
3058 struct usb_endpoint *ep ) {
3059 struct ehci_device *ehci = usb_hub_get_drvdata ( hub );
3060
3061 /* Should never be called; this is a root hub */
3062 DBGC ( ehci, "XHCI %p port %d nonsensical CLEAR_TT for %s endpoint "
3063 "%02x\n", ehci, port->address, ep->usb->name, ep->address );
3064
3065 return -ENOTSUP;
3066 }
3067
3068 /******************************************************************************
3069 *
3070 * PCI interface
3071 *
3072 ******************************************************************************
3073 */
3074
3075 /** USB host controller operations */
3076 static struct usb_host_operations xhci_operations = {
3077 .endpoint = {
3078 .open = xhci_endpoint_open,
3079 .close = xhci_endpoint_close,
3080 .reset = xhci_endpoint_reset,
3081 .mtu = xhci_endpoint_mtu,
3082 .message = xhci_endpoint_message,
3083 .stream = xhci_endpoint_stream,
3084 },
3085 .device = {
3086 .open = xhci_device_open,
3087 .close = xhci_device_close,
3088 .address = xhci_device_address,
3089 },
3090 .bus = {
3091 .open = xhci_bus_open,
3092 .close = xhci_bus_close,
3093 .poll = xhci_bus_poll,
3094 },
3095 .hub = {
3096 .open = xhci_hub_open,
3097 .close = xhci_hub_close,
3098 },
3099 .root = {
3100 .open = xhci_root_open,
3101 .close = xhci_root_close,
3102 .enable = xhci_root_enable,
3103 .disable = xhci_root_disable,
3104 .speed = xhci_root_speed,
3105 .clear_tt = xhci_root_clear_tt,
3106 },
3107 };
3108
3109 /**
3110 * Fix Intel PCH-specific quirks
3111 *
3112 * @v xhci xHCI device
3113 * @v pci PCI device
3114 */
3115 static void xhci_pch_fix ( struct xhci_device *xhci, struct pci_device *pci ) {
3116 struct xhci_pch *pch = &xhci->pch;
3117 uint32_t xusb2pr;
3118 uint32_t xusb2prm;
3119 uint32_t usb3pssen;
3120 uint32_t usb3prm;
3121
3122 /* Enable SuperSpeed capability. Do this before rerouting
3123 * USB2 ports, so that USB3 devices connect at SuperSpeed.
3124 */
3125 pci_read_config_dword ( pci, XHCI_PCH_USB3PSSEN, &usb3pssen );
3126 pci_read_config_dword ( pci, XHCI_PCH_USB3PRM, &usb3prm );
3127 if ( usb3prm & ~usb3pssen ) {
3128 DBGC ( xhci, "XHCI %p enabling SuperSpeed on ports %08x\n",
3129 xhci, ( usb3prm & ~usb3pssen ) );
3130 }
3131 pch->usb3pssen = usb3pssen;
3132 usb3pssen |= usb3prm;
3133 pci_write_config_dword ( pci, XHCI_PCH_USB3PSSEN, usb3pssen );
3134
3135 /* Route USB2 ports from EHCI to xHCI */
3136 pci_read_config_dword ( pci, XHCI_PCH_XUSB2PR, &xusb2pr );
3137 pci_read_config_dword ( pci, XHCI_PCH_XUSB2PRM, &xusb2prm );
3138 if ( xusb2prm & ~xusb2pr ) {
3139 DBGC ( xhci, "XHCI %p routing ports %08x from EHCI to xHCI\n",
3140 xhci, ( xusb2prm & ~xusb2pr ) );
3141 }
3142 pch->xusb2pr = xusb2pr;
3143 xusb2pr |= xusb2prm;
3144 pci_write_config_dword ( pci, XHCI_PCH_XUSB2PR, xusb2pr );
3145 }
3146
3147 /**
3148 * Undo Intel PCH-specific quirk fixes
3149 *
3150 * @v xhci xHCI device
3151 * @v pci PCI device
3152 */
3153 static void xhci_pch_undo ( struct xhci_device *xhci, struct pci_device *pci ) {
3154 struct xhci_pch *pch = &xhci->pch;
3155
3156 /* Restore USB2 port routing to original state */
3157 pci_write_config_dword ( pci, XHCI_PCH_XUSB2PR, pch->xusb2pr );
3158
3159 /* Restore SuperSpeed capability to original state */
3160 pci_write_config_dword ( pci, XHCI_PCH_USB3PSSEN, pch->usb3pssen );
3161 }
3162
3163 /**
3164 * Probe PCI device
3165 *
3166 * @v pci PCI device
3167 * @ret rc Return status code
3168 */
3169 static int xhci_probe ( struct pci_device *pci ) {
3170 struct xhci_device *xhci;
3171 struct usb_port *port;
3172 unsigned long bar_start;
3173 size_t bar_size;
3174 unsigned int i;
3175 int rc;
3176
3177 /* Allocate and initialise structure */
3178 xhci = zalloc ( sizeof ( *xhci ) );
3179 if ( ! xhci ) {
3180 rc = -ENOMEM;
3181 goto err_alloc;
3182 }
3183
3184 /* Fix up PCI device */
3185 adjust_pci_device ( pci );
3186
3187 /* Map registers */
3188 bar_start = pci_bar_start ( pci, XHCI_BAR );
3189 bar_size = pci_bar_size ( pci, XHCI_BAR );
3190 xhci->regs = ioremap ( bar_start, bar_size );
3191 if ( ! xhci->regs ) {
3192 rc = -ENODEV;
3193 goto err_ioremap;
3194 }
3195
3196 /* Initialise xHCI device */
3197 xhci_init ( xhci, xhci->regs );
3198
3199 /* Initialise USB legacy support and claim ownership */
3200 xhci_legacy_init ( xhci );
3201 xhci_legacy_claim ( xhci );
3202
3203 /* Fix Intel PCH-specific quirks, if applicable */
3204 if ( pci->id->driver_data & XHCI_PCH )
3205 xhci_pch_fix ( xhci, pci );
3206
3207 /* Reset device */
3208 if ( ( rc = xhci_reset ( xhci ) ) != 0 )
3209 goto err_reset;
3210
3211 /* Allocate USB bus */
3212 xhci->bus = alloc_usb_bus ( &pci->dev, xhci->ports, XHCI_MTU,
3213 &xhci_operations );
3214 if ( ! xhci->bus ) {
3215 rc = -ENOMEM;
3216 goto err_alloc_bus;
3217 }
3218 usb_bus_set_hostdata ( xhci->bus, xhci );
3219 usb_hub_set_drvdata ( xhci->bus->hub, xhci );
3220
3221 /* Set port protocols */
3222 for ( i = 1 ; i <= xhci->ports ; i++ ) {
3223 port = usb_port ( xhci->bus->hub, i );
3224 port->protocol = xhci_port_protocol ( xhci, i );
3225 }
3226
3227 /* Register USB bus */
3228 if ( ( rc = register_usb_bus ( xhci->bus ) ) != 0 )
3229 goto err_register;
3230
3231 pci_set_drvdata ( pci, xhci );
3232 return 0;
3233
3234 unregister_usb_bus ( xhci->bus );
3235 err_register:
3236 free_usb_bus ( xhci->bus );
3237 err_alloc_bus:
3238 xhci_reset ( xhci );
3239 err_reset:
3240 if ( pci->id->driver_data & XHCI_PCH )
3241 xhci_pch_undo ( xhci, pci );
3242 xhci_legacy_release ( xhci );
3243 iounmap ( xhci->regs );
3244 err_ioremap:
3245 free ( xhci );
3246 err_alloc:
3247 return rc;
3248 }
3249
3250 /**
3251 * Remove PCI device
3252 *
3253 * @v pci PCI device
3254 */
3255 static void xhci_remove ( struct pci_device *pci ) {
3256 struct xhci_device *xhci = pci_get_drvdata ( pci );
3257 struct usb_bus *bus = xhci->bus;
3258
3259 unregister_usb_bus ( bus );
3260 free_usb_bus ( bus );
3261 xhci_reset ( xhci );
3262 if ( pci->id->driver_data & XHCI_PCH )
3263 xhci_pch_undo ( xhci, pci );
3264 xhci_legacy_release ( xhci );
3265 iounmap ( xhci->regs );
3266 free ( xhci );
3267 }
3268
3269 /** XHCI PCI device IDs */
3270 static struct pci_device_id xhci_ids[] = {
3271 PCI_ROM ( 0x8086, 0xffff, "xhci-pch", "xHCI (Intel PCH)", XHCI_PCH ),
3272 PCI_ROM ( 0xffff, 0xffff, "xhci", "xHCI", 0 ),
3273 };
3274
3275 /** XHCI PCI driver */
3276 struct pci_driver xhci_driver __pci_driver = {
3277 .ids = xhci_ids,
3278 .id_count = ( sizeof ( xhci_ids ) / sizeof ( xhci_ids[0] ) ),
3279 .class = PCI_CLASS ( PCI_CLASS_SERIAL, PCI_CLASS_SERIAL_USB,
3280 PCI_CLASS_SERIAL_USB_XHCI ),
3281 .probe = xhci_probe,
3282 .remove = xhci_remove,
3283 };
3284
3285 /**
3286 * Prepare for exit
3287 *
3288 * @v booting System is shutting down for OS boot
3289 */
3290 static void xhci_shutdown ( int booting ) {
3291 /* If we are shutting down to boot an OS, then prevent the
3292 * release of ownership back to BIOS.
3293 */
3294 xhci_legacy_prevent_release = booting;
3295 }
3296
3297 /** Startup/shutdown function */
3298 struct startup_fn xhci_startup __startup_fn ( STARTUP_LATE ) = {
3299 .shutdown = xhci_shutdown,
3300 };