[acpi] Expose system MAC address via ${sysmac} setting
[ipxe.git] / src / drivers / usb / xhci.c
1 /*
2 * Copyright (C) 2014 Michael Brown <mbrown@fensystems.co.uk>.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA.
18 *
19 * You can also choose to distribute this program under the terms of
20 * the Unmodified Binary Distribution Licence (as given in the file
21 * COPYING.UBDL), provided that you have satisfied its requirements.
22 */
23
24 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
25
26 #include <stdlib.h>
27 #include <stdio.h>
28 #include <unistd.h>
29 #include <string.h>
30 #include <strings.h>
31 #include <errno.h>
32 #include <byteswap.h>
33 #include <ipxe/malloc.h>
34 #include <ipxe/pci.h>
35 #include <ipxe/usb.h>
36 #include <ipxe/init.h>
37 #include <ipxe/profile.h>
38 #include "xhci.h"
39
40 /** @file
41 *
42 * USB eXtensible Host Controller Interface (xHCI) driver
43 *
44 */
45
46 /** Message transfer profiler */
47 static struct profiler xhci_message_profiler __profiler =
48 { .name = "xhci.message" };
49
50 /** Stream transfer profiler */
51 static struct profiler xhci_stream_profiler __profiler =
52 { .name = "xhci.stream" };
53
54 /** Event ring profiler */
55 static struct profiler xhci_event_profiler __profiler =
56 { .name = "xhci.event" };
57
58 /** Transfer event profiler */
59 static struct profiler xhci_transfer_profiler __profiler =
60 { .name = "xhci.transfer" };
61
62 /* Disambiguate the various error causes */
63 #define EIO_DATA \
64 __einfo_error ( EINFO_EIO_DATA )
65 #define EINFO_EIO_DATA \
66 __einfo_uniqify ( EINFO_EIO, ( 2 - 0 ), \
67 "Data buffer error" )
68 #define EIO_BABBLE \
69 __einfo_error ( EINFO_EIO_BABBLE )
70 #define EINFO_EIO_BABBLE \
71 __einfo_uniqify ( EINFO_EIO, ( 3 - 0 ), \
72 "Babble detected" )
73 #define EIO_USB \
74 __einfo_error ( EINFO_EIO_USB )
75 #define EINFO_EIO_USB \
76 __einfo_uniqify ( EINFO_EIO, ( 4 - 0 ), \
77 "USB transaction error" )
78 #define EIO_TRB \
79 __einfo_error ( EINFO_EIO_TRB )
80 #define EINFO_EIO_TRB \
81 __einfo_uniqify ( EINFO_EIO, ( 5 - 0 ), \
82 "TRB error" )
83 #define EIO_STALL \
84 __einfo_error ( EINFO_EIO_STALL )
85 #define EINFO_EIO_STALL \
86 __einfo_uniqify ( EINFO_EIO, ( 6 - 0 ), \
87 "Stall error" )
88 #define EIO_RESOURCE \
89 __einfo_error ( EINFO_EIO_RESOURCE )
90 #define EINFO_EIO_RESOURCE \
91 __einfo_uniqify ( EINFO_EIO, ( 7 - 0 ), \
92 "Resource error" )
93 #define EIO_BANDWIDTH \
94 __einfo_error ( EINFO_EIO_BANDWIDTH )
95 #define EINFO_EIO_BANDWIDTH \
96 __einfo_uniqify ( EINFO_EIO, ( 8 - 0 ), \
97 "Bandwidth error" )
98 #define EIO_NO_SLOTS \
99 __einfo_error ( EINFO_EIO_NO_SLOTS )
100 #define EINFO_EIO_NO_SLOTS \
101 __einfo_uniqify ( EINFO_EIO, ( 9 - 0 ), \
102 "No slots available" )
103 #define EIO_STREAM_TYPE \
104 __einfo_error ( EINFO_EIO_STREAM_TYPE )
105 #define EINFO_EIO_STREAM_TYPE \
106 __einfo_uniqify ( EINFO_EIO, ( 10 - 0 ), \
107 "Invalid stream type" )
108 #define EIO_SLOT \
109 __einfo_error ( EINFO_EIO_SLOT )
110 #define EINFO_EIO_SLOT \
111 __einfo_uniqify ( EINFO_EIO, ( 11 - 0 ), \
112 "Slot not enabled" )
113 #define EIO_ENDPOINT \
114 __einfo_error ( EINFO_EIO_ENDPOINT )
115 #define EINFO_EIO_ENDPOINT \
116 __einfo_uniqify ( EINFO_EIO, ( 12 - 0 ), \
117 "Endpoint not enabled" )
118 #define EIO_SHORT \
119 __einfo_error ( EINFO_EIO_SHORT )
120 #define EINFO_EIO_SHORT \
121 __einfo_uniqify ( EINFO_EIO, ( 13 - 0 ), \
122 "Short packet" )
123 #define EIO_UNDERRUN \
124 __einfo_error ( EINFO_EIO_UNDERRUN )
125 #define EINFO_EIO_UNDERRUN \
126 __einfo_uniqify ( EINFO_EIO, ( 14 - 0 ), \
127 "Ring underrun" )
128 #define EIO_OVERRUN \
129 __einfo_error ( EINFO_EIO_OVERRUN )
130 #define EINFO_EIO_OVERRUN \
131 __einfo_uniqify ( EINFO_EIO, ( 15 - 0 ), \
132 "Ring overrun" )
133 #define EIO_VF_RING_FULL \
134 __einfo_error ( EINFO_EIO_VF_RING_FULL )
135 #define EINFO_EIO_VF_RING_FULL \
136 __einfo_uniqify ( EINFO_EIO, ( 16 - 0 ), \
137 "Virtual function event ring full" )
138 #define EIO_PARAMETER \
139 __einfo_error ( EINFO_EIO_PARAMETER )
140 #define EINFO_EIO_PARAMETER \
141 __einfo_uniqify ( EINFO_EIO, ( 17 - 0 ), \
142 "Parameter error" )
143 #define EIO_BANDWIDTH_OVERRUN \
144 __einfo_error ( EINFO_EIO_BANDWIDTH_OVERRUN )
145 #define EINFO_EIO_BANDWIDTH_OVERRUN \
146 __einfo_uniqify ( EINFO_EIO, ( 18 - 0 ), \
147 "Bandwidth overrun" )
148 #define EIO_CONTEXT \
149 __einfo_error ( EINFO_EIO_CONTEXT )
150 #define EINFO_EIO_CONTEXT \
151 __einfo_uniqify ( EINFO_EIO, ( 19 - 0 ), \
152 "Context state error" )
153 #define EIO_NO_PING \
154 __einfo_error ( EINFO_EIO_NO_PING )
155 #define EINFO_EIO_NO_PING \
156 __einfo_uniqify ( EINFO_EIO, ( 20 - 0 ), \
157 "No ping response" )
158 #define EIO_RING_FULL \
159 __einfo_error ( EINFO_EIO_RING_FULL )
160 #define EINFO_EIO_RING_FULL \
161 __einfo_uniqify ( EINFO_EIO, ( 21 - 0 ), \
162 "Event ring full" )
163 #define EIO_INCOMPATIBLE \
164 __einfo_error ( EINFO_EIO_INCOMPATIBLE )
165 #define EINFO_EIO_INCOMPATIBLE \
166 __einfo_uniqify ( EINFO_EIO, ( 22 - 0 ), \
167 "Incompatible device" )
168 #define EIO_MISSED \
169 __einfo_error ( EINFO_EIO_MISSED )
170 #define EINFO_EIO_MISSED \
171 __einfo_uniqify ( EINFO_EIO, ( 23 - 0 ), \
172 "Missed service error" )
173 #define EIO_CMD_STOPPED \
174 __einfo_error ( EINFO_EIO_CMD_STOPPED )
175 #define EINFO_EIO_CMD_STOPPED \
176 __einfo_uniqify ( EINFO_EIO, ( 24 - 0 ), \
177 "Command ring stopped" )
178 #define EIO_CMD_ABORTED \
179 __einfo_error ( EINFO_EIO_CMD_ABORTED )
180 #define EINFO_EIO_CMD_ABORTED \
181 __einfo_uniqify ( EINFO_EIO, ( 25 - 0 ), \
182 "Command aborted" )
183 #define EIO_STOP \
184 __einfo_error ( EINFO_EIO_STOP )
185 #define EINFO_EIO_STOP \
186 __einfo_uniqify ( EINFO_EIO, ( 26 - 0 ), \
187 "Stopped" )
188 #define EIO_STOP_LEN \
189 __einfo_error ( EINFO_EIO_STOP_LEN )
190 #define EINFO_EIO_STOP_LEN \
191 __einfo_uniqify ( EINFO_EIO, ( 27 - 0 ), \
192 "Stopped - length invalid" )
193 #define EIO_STOP_SHORT \
194 __einfo_error ( EINFO_EIO_STOP_SHORT )
195 #define EINFO_EIO_STOP_SHORT \
196 __einfo_uniqify ( EINFO_EIO, ( 28 - 0 ), \
197 "Stopped - short packet" )
198 #define EIO_LATENCY \
199 __einfo_error ( EINFO_EIO_LATENCY )
200 #define EINFO_EIO_LATENCY \
201 __einfo_uniqify ( EINFO_EIO, ( 29 - 0 ), \
202 "Maximum exit latency too large" )
203 #define EIO_ISOCH \
204 __einfo_error ( EINFO_EIO_ISOCH )
205 #define EINFO_EIO_ISOCH \
206 __einfo_uniqify ( EINFO_EIO, ( 31 - 0 ), \
207 "Isochronous buffer overrun" )
208 #define EPROTO_LOST \
209 __einfo_error ( EINFO_EPROTO_LOST )
210 #define EINFO_EPROTO_LOST \
211 __einfo_uniqify ( EINFO_EPROTO, ( 32 - 32 ), \
212 "Event lost" )
213 #define EPROTO_UNDEFINED \
214 __einfo_error ( EINFO_EPROTO_UNDEFINED )
215 #define EINFO_EPROTO_UNDEFINED \
216 __einfo_uniqify ( EINFO_EPROTO, ( 33 - 32 ), \
217 "Undefined error" )
218 #define EPROTO_STREAM_ID \
219 __einfo_error ( EINFO_EPROTO_STREAM_ID )
220 #define EINFO_EPROTO_STREAM_ID \
221 __einfo_uniqify ( EINFO_EPROTO, ( 34 - 32 ), \
222 "Invalid stream ID" )
223 #define EPROTO_SECONDARY \
224 __einfo_error ( EINFO_EPROTO_SECONDARY )
225 #define EINFO_EPROTO_SECONDARY \
226 __einfo_uniqify ( EINFO_EPROTO, ( 35 - 32 ), \
227 "Secondary bandwidth error" )
228 #define EPROTO_SPLIT \
229 __einfo_error ( EINFO_EPROTO_SPLIT )
230 #define EINFO_EPROTO_SPLIT \
231 __einfo_uniqify ( EINFO_EPROTO, ( 36 - 32 ), \
232 "Split transaction error" )
233 #define ECODE(code) \
234 ( ( (code) < 32 ) ? \
235 EUNIQ ( EINFO_EIO, ( (code) & 31 ), EIO_DATA, EIO_BABBLE, \
236 EIO_USB, EIO_TRB, EIO_STALL, EIO_RESOURCE, \
237 EIO_BANDWIDTH, EIO_NO_SLOTS, EIO_STREAM_TYPE, \
238 EIO_SLOT, EIO_ENDPOINT, EIO_SHORT, EIO_UNDERRUN, \
239 EIO_OVERRUN, EIO_VF_RING_FULL, EIO_PARAMETER, \
240 EIO_BANDWIDTH_OVERRUN, EIO_CONTEXT, EIO_NO_PING, \
241 EIO_RING_FULL, EIO_INCOMPATIBLE, EIO_MISSED, \
242 EIO_CMD_STOPPED, EIO_CMD_ABORTED, EIO_STOP, \
243 EIO_STOP_LEN, EIO_STOP_SHORT, EIO_LATENCY, \
244 EIO_ISOCH ) : \
245 ( (code) < 64 ) ? \
246 EUNIQ ( EINFO_EPROTO, ( (code) & 31 ), EPROTO_LOST, \
247 EPROTO_UNDEFINED, EPROTO_STREAM_ID, \
248 EPROTO_SECONDARY, EPROTO_SPLIT ) : \
249 EFAULT )
250
251 /******************************************************************************
252 *
253 * Register access
254 *
255 ******************************************************************************
256 */
257
258 /**
259 * Initialise device
260 *
261 * @v xhci xHCI device
262 * @v regs MMIO registers
263 */
264 static void xhci_init ( struct xhci_device *xhci, void *regs ) {
265 uint32_t hcsparams1;
266 uint32_t hcsparams2;
267 uint32_t hccparams1;
268 uint32_t pagesize;
269 size_t caplength;
270 size_t rtsoff;
271 size_t dboff;
272
273 /* Locate capability, operational, runtime, and doorbell registers */
274 xhci->cap = regs;
275 caplength = readb ( xhci->cap + XHCI_CAP_CAPLENGTH );
276 rtsoff = readl ( xhci->cap + XHCI_CAP_RTSOFF );
277 dboff = readl ( xhci->cap + XHCI_CAP_DBOFF );
278 xhci->op = ( xhci->cap + caplength );
279 xhci->run = ( xhci->cap + rtsoff );
280 xhci->db = ( xhci->cap + dboff );
281 DBGC2 ( xhci, "XHCI %s cap %08lx op %08lx run %08lx db %08lx\n",
282 xhci->name, virt_to_phys ( xhci->cap ),
283 virt_to_phys ( xhci->op ), virt_to_phys ( xhci->run ),
284 virt_to_phys ( xhci->db ) );
285
286 /* Read structural parameters 1 */
287 hcsparams1 = readl ( xhci->cap + XHCI_CAP_HCSPARAMS1 );
288 xhci->slots = XHCI_HCSPARAMS1_SLOTS ( hcsparams1 );
289 xhci->intrs = XHCI_HCSPARAMS1_INTRS ( hcsparams1 );
290 xhci->ports = XHCI_HCSPARAMS1_PORTS ( hcsparams1 );
291 DBGC ( xhci, "XHCI %s has %d slots %d intrs %d ports\n",
292 xhci->name, xhci->slots, xhci->intrs, xhci->ports );
293
294 /* Read structural parameters 2 */
295 hcsparams2 = readl ( xhci->cap + XHCI_CAP_HCSPARAMS2 );
296 xhci->scratch.count = XHCI_HCSPARAMS2_SCRATCHPADS ( hcsparams2 );
297 DBGC2 ( xhci, "XHCI %s needs %d scratchpads\n",
298 xhci->name, xhci->scratch.count );
299
300 /* Read capability parameters 1 */
301 hccparams1 = readl ( xhci->cap + XHCI_CAP_HCCPARAMS1 );
302 xhci->addr64 = XHCI_HCCPARAMS1_ADDR64 ( hccparams1 );
303 xhci->csz_shift = XHCI_HCCPARAMS1_CSZ_SHIFT ( hccparams1 );
304 xhci->xecp = XHCI_HCCPARAMS1_XECP ( hccparams1 );
305
306 /* Read page size */
307 pagesize = readl ( xhci->op + XHCI_OP_PAGESIZE );
308 xhci->pagesize = XHCI_PAGESIZE ( pagesize );
309 assert ( xhci->pagesize != 0 );
310 assert ( ( ( xhci->pagesize ) & ( xhci->pagesize - 1 ) ) == 0 );
311 DBGC2 ( xhci, "XHCI %s page size %zd bytes\n",
312 xhci->name, xhci->pagesize );
313 }
314
315 /**
316 * Find extended capability
317 *
318 * @v xhci xHCI device
319 * @v id Capability ID
320 * @v offset Offset to previous extended capability instance, or zero
321 * @ret offset Offset to extended capability, or zero if not found
322 */
323 static unsigned int xhci_extended_capability ( struct xhci_device *xhci,
324 unsigned int id,
325 unsigned int offset ) {
326 uint32_t xecp;
327 unsigned int next;
328
329 /* Locate the extended capability */
330 while ( 1 ) {
331
332 /* Locate first or next capability as applicable */
333 if ( offset ) {
334 xecp = readl ( xhci->cap + offset );
335 next = XHCI_XECP_NEXT ( xecp );
336 } else {
337 next = xhci->xecp;
338 }
339 if ( ! next )
340 return 0;
341 offset += next;
342
343 /* Check if this is the requested capability */
344 xecp = readl ( xhci->cap + offset );
345 if ( XHCI_XECP_ID ( xecp ) == id )
346 return offset;
347 }
348 }
349
350 /**
351 * Write potentially 64-bit register
352 *
353 * @v xhci xHCI device
354 * @v value Value
355 * @v reg Register address
356 * @ret rc Return status code
357 */
358 static inline __attribute__ (( always_inline )) int
359 xhci_writeq ( struct xhci_device *xhci, physaddr_t value, void *reg ) {
360
361 /* If this is a 32-bit build, then this can never fail
362 * (allowing the compiler to optimise out the error path).
363 */
364 if ( sizeof ( value ) <= sizeof ( uint32_t ) ) {
365 writel ( value, reg );
366 writel ( 0, ( reg + sizeof ( uint32_t ) ) );
367 return 0;
368 }
369
370 /* If the device does not support 64-bit addresses and this
371 * address is outside the 32-bit address space, then fail.
372 */
373 if ( ( value & ~0xffffffffULL ) && ! xhci->addr64 ) {
374 DBGC ( xhci, "XHCI %s cannot access address %lx\n",
375 xhci->name, value );
376 return -ENOTSUP;
377 }
378
379 /* If this is a 64-bit build, then writeq() is available */
380 writeq ( value, reg );
381 return 0;
382 }
383
384 /**
385 * Calculate buffer alignment
386 *
387 * @v len Length
388 * @ret align Buffer alignment
389 *
390 * Determine alignment required for a buffer which must be aligned to
391 * at least XHCI_MIN_ALIGN and which must not cross a page boundary.
392 */
393 static inline size_t xhci_align ( size_t len ) {
394 size_t align;
395
396 /* Align to own length (rounded up to a power of two) */
397 align = ( 1 << fls ( len - 1 ) );
398
399 /* Round up to XHCI_MIN_ALIGN if needed */
400 if ( align < XHCI_MIN_ALIGN )
401 align = XHCI_MIN_ALIGN;
402
403 return align;
404 }
405
406 /**
407 * Calculate device context offset
408 *
409 * @v xhci xHCI device
410 * @v ctx Context index
411 */
412 static inline size_t xhci_device_context_offset ( struct xhci_device *xhci,
413 unsigned int ctx ) {
414
415 return ( XHCI_DCI ( ctx ) << xhci->csz_shift );
416 }
417
418 /**
419 * Calculate input context offset
420 *
421 * @v xhci xHCI device
422 * @v ctx Context index
423 */
424 static inline size_t xhci_input_context_offset ( struct xhci_device *xhci,
425 unsigned int ctx ) {
426
427 return ( XHCI_ICI ( ctx ) << xhci->csz_shift );
428 }
429
430 /******************************************************************************
431 *
432 * Diagnostics
433 *
434 ******************************************************************************
435 */
436
437 /**
438 * Dump host controller registers
439 *
440 * @v xhci xHCI device
441 */
442 static inline void xhci_dump ( struct xhci_device *xhci ) {
443 uint32_t usbcmd;
444 uint32_t usbsts;
445 uint32_t pagesize;
446 uint32_t dnctrl;
447 uint32_t config;
448
449 /* Do nothing unless debugging is enabled */
450 if ( ! DBG_LOG )
451 return;
452
453 /* Dump USBCMD */
454 usbcmd = readl ( xhci->op + XHCI_OP_USBCMD );
455 DBGC ( xhci, "XHCI %s USBCMD %08x%s%s\n", xhci->name, usbcmd,
456 ( ( usbcmd & XHCI_USBCMD_RUN ) ? " run" : "" ),
457 ( ( usbcmd & XHCI_USBCMD_HCRST ) ? " hcrst" : "" ) );
458
459 /* Dump USBSTS */
460 usbsts = readl ( xhci->op + XHCI_OP_USBSTS );
461 DBGC ( xhci, "XHCI %s USBSTS %08x%s\n", xhci->name, usbsts,
462 ( ( usbsts & XHCI_USBSTS_HCH ) ? " hch" : "" ) );
463
464 /* Dump PAGESIZE */
465 pagesize = readl ( xhci->op + XHCI_OP_PAGESIZE );
466 DBGC ( xhci, "XHCI %s PAGESIZE %08x\n", xhci->name, pagesize );
467
468 /* Dump DNCTRL */
469 dnctrl = readl ( xhci->op + XHCI_OP_DNCTRL );
470 DBGC ( xhci, "XHCI %s DNCTRL %08x\n", xhci->name, dnctrl );
471
472 /* Dump CONFIG */
473 config = readl ( xhci->op + XHCI_OP_CONFIG );
474 DBGC ( xhci, "XHCI %s CONFIG %08x\n", xhci->name, config );
475 }
476
477 /**
478 * Dump port registers
479 *
480 * @v xhci xHCI device
481 * @v port Port number
482 */
483 static inline void xhci_dump_port ( struct xhci_device *xhci,
484 unsigned int port ) {
485 uint32_t portsc;
486 uint32_t portpmsc;
487 uint32_t portli;
488 uint32_t porthlpmc;
489
490 /* Do nothing unless debugging is enabled */
491 if ( ! DBG_LOG )
492 return;
493
494 /* Dump PORTSC */
495 portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port ) );
496 DBGC ( xhci, "XHCI %s-%d PORTSC %08x%s%s%s%s psiv=%d\n",
497 xhci->name, port, portsc,
498 ( ( portsc & XHCI_PORTSC_CCS ) ? " ccs" : "" ),
499 ( ( portsc & XHCI_PORTSC_PED ) ? " ped" : "" ),
500 ( ( portsc & XHCI_PORTSC_PR ) ? " pr" : "" ),
501 ( ( portsc & XHCI_PORTSC_PP ) ? " pp" : "" ),
502 XHCI_PORTSC_PSIV ( portsc ) );
503
504 /* Dump PORTPMSC */
505 portpmsc = readl ( xhci->op + XHCI_OP_PORTPMSC ( port ) );
506 DBGC ( xhci, "XHCI %s-%d PORTPMSC %08x\n", xhci->name, port, portpmsc );
507
508 /* Dump PORTLI */
509 portli = readl ( xhci->op + XHCI_OP_PORTLI ( port ) );
510 DBGC ( xhci, "XHCI %s-%d PORTLI %08x\n", xhci->name, port, portli );
511
512 /* Dump PORTHLPMC */
513 porthlpmc = readl ( xhci->op + XHCI_OP_PORTHLPMC ( port ) );
514 DBGC ( xhci, "XHCI %s-%d PORTHLPMC %08x\n",
515 xhci->name, port, porthlpmc );
516 }
517
518 /******************************************************************************
519 *
520 * USB legacy support
521 *
522 ******************************************************************************
523 */
524
525 /** Prevent the release of ownership back to BIOS */
526 static int xhci_legacy_prevent_release;
527
528 /**
529 * Initialise USB legacy support
530 *
531 * @v xhci xHCI device
532 */
533 static void xhci_legacy_init ( struct xhci_device *xhci ) {
534 unsigned int legacy;
535 uint8_t bios;
536
537 /* Locate USB legacy support capability (if present) */
538 legacy = xhci_extended_capability ( xhci, XHCI_XECP_ID_LEGACY, 0 );
539 if ( ! legacy ) {
540 /* Not an error; capability may not be present */
541 DBGC ( xhci, "XHCI %s has no USB legacy support capability\n",
542 xhci->name );
543 return;
544 }
545
546 /* Check if legacy USB support is enabled */
547 bios = readb ( xhci->cap + legacy + XHCI_USBLEGSUP_BIOS );
548 if ( ! ( bios & XHCI_USBLEGSUP_BIOS_OWNED ) ) {
549 /* Not an error; already owned by OS */
550 DBGC ( xhci, "XHCI %s USB legacy support already disabled\n",
551 xhci->name );
552 return;
553 }
554
555 /* Record presence of USB legacy support capability */
556 xhci->legacy = legacy;
557 }
558
559 /**
560 * Claim ownership from BIOS
561 *
562 * @v xhci xHCI device
563 */
564 static void xhci_legacy_claim ( struct xhci_device *xhci ) {
565 uint32_t ctlsts;
566 uint8_t bios;
567 unsigned int i;
568
569 /* Do nothing unless legacy support capability is present */
570 if ( ! xhci->legacy )
571 return;
572
573 /* Claim ownership */
574 writeb ( XHCI_USBLEGSUP_OS_OWNED,
575 xhci->cap + xhci->legacy + XHCI_USBLEGSUP_OS );
576
577 /* Wait for BIOS to release ownership */
578 for ( i = 0 ; i < XHCI_USBLEGSUP_MAX_WAIT_MS ; i++ ) {
579
580 /* Check if BIOS has released ownership */
581 bios = readb ( xhci->cap + xhci->legacy + XHCI_USBLEGSUP_BIOS );
582 if ( ! ( bios & XHCI_USBLEGSUP_BIOS_OWNED ) ) {
583 DBGC ( xhci, "XHCI %s claimed ownership from BIOS\n",
584 xhci->name );
585 ctlsts = readl ( xhci->cap + xhci->legacy +
586 XHCI_USBLEGSUP_CTLSTS );
587 if ( ctlsts ) {
588 DBGC ( xhci, "XHCI %s warning: BIOS retained "
589 "SMIs: %08x\n", xhci->name, ctlsts );
590 }
591 return;
592 }
593
594 /* Delay */
595 mdelay ( 1 );
596 }
597
598 /* BIOS did not release ownership. Claim it forcibly by
599 * disabling all SMIs.
600 */
601 DBGC ( xhci, "XHCI %s could not claim ownership from BIOS: forcibly "
602 "disabling SMIs\n", xhci->name );
603 writel ( 0, xhci->cap + xhci->legacy + XHCI_USBLEGSUP_CTLSTS );
604 }
605
606 /**
607 * Release ownership back to BIOS
608 *
609 * @v xhci xHCI device
610 */
611 static void xhci_legacy_release ( struct xhci_device *xhci ) {
612
613 /* Do nothing unless legacy support capability is present */
614 if ( ! xhci->legacy )
615 return;
616
617 /* Do nothing if releasing ownership is prevented */
618 if ( xhci_legacy_prevent_release ) {
619 DBGC ( xhci, "XHCI %s not releasing ownership to BIOS\n",
620 xhci->name );
621 return;
622 }
623
624 /* Release ownership */
625 writeb ( 0, xhci->cap + xhci->legacy + XHCI_USBLEGSUP_OS );
626 DBGC ( xhci, "XHCI %s released ownership to BIOS\n", xhci->name );
627 }
628
629 /******************************************************************************
630 *
631 * Supported protocols
632 *
633 ******************************************************************************
634 */
635
636 /**
637 * Transcribe port speed (for debugging)
638 *
639 * @v psi Protocol speed ID
640 * @ret speed Transcribed speed
641 */
642 static inline const char * xhci_speed_name ( uint32_t psi ) {
643 static const char *exponents[4] = { "", "k", "M", "G" };
644 static char buf[ 10 /* "xxxxxXbps" + NUL */ ];
645 unsigned int mantissa;
646 unsigned int exponent;
647
648 /* Extract mantissa and exponent */
649 mantissa = XHCI_SUPPORTED_PSI_MANTISSA ( psi );
650 exponent = XHCI_SUPPORTED_PSI_EXPONENT ( psi );
651
652 /* Transcribe speed */
653 snprintf ( buf, sizeof ( buf ), "%d%sbps",
654 mantissa, exponents[exponent] );
655 return buf;
656 }
657
658 /**
659 * Find supported protocol extended capability for a port
660 *
661 * @v xhci xHCI device
662 * @v port Port number
663 * @ret supported Offset to extended capability, or zero if not found
664 */
665 static unsigned int xhci_supported_protocol ( struct xhci_device *xhci,
666 unsigned int port ) {
667 unsigned int supported = 0;
668 unsigned int offset;
669 unsigned int count;
670 uint32_t ports;
671
672 /* Iterate over all supported protocol structures */
673 while ( ( supported = xhci_extended_capability ( xhci,
674 XHCI_XECP_ID_SUPPORTED,
675 supported ) ) ) {
676
677 /* Determine port range */
678 ports = readl ( xhci->cap + supported + XHCI_SUPPORTED_PORTS );
679 offset = XHCI_SUPPORTED_PORTS_OFFSET ( ports );
680 count = XHCI_SUPPORTED_PORTS_COUNT ( ports );
681
682 /* Check if port lies within this range */
683 if ( ( port - offset ) < count )
684 return supported;
685 }
686
687 DBGC ( xhci, "XHCI %s-%d has no supported protocol\n",
688 xhci->name, port );
689 return 0;
690 }
691
692 /**
693 * Find port protocol
694 *
695 * @v xhci xHCI device
696 * @v port Port number
697 * @ret protocol USB protocol, or zero if not found
698 */
699 static unsigned int xhci_port_protocol ( struct xhci_device *xhci,
700 unsigned int port ) {
701 unsigned int supported = xhci_supported_protocol ( xhci, port );
702 union {
703 uint32_t raw;
704 char text[5];
705 } name;
706 unsigned int protocol;
707 unsigned int type;
708 unsigned int psic;
709 unsigned int psiv;
710 unsigned int i;
711 uint32_t revision;
712 uint32_t ports;
713 uint32_t slot;
714 uint32_t psi;
715
716 /* Fail if there is no supported protocol */
717 if ( ! supported )
718 return 0;
719
720 /* Determine protocol version */
721 revision = readl ( xhci->cap + supported + XHCI_SUPPORTED_REVISION );
722 protocol = XHCI_SUPPORTED_REVISION_VER ( revision );
723
724 /* Describe port protocol */
725 if ( DBG_EXTRA ) {
726 name.raw = cpu_to_le32 ( readl ( xhci->cap + supported +
727 XHCI_SUPPORTED_NAME ) );
728 name.text[4] = '\0';
729 slot = readl ( xhci->cap + supported + XHCI_SUPPORTED_SLOT );
730 type = XHCI_SUPPORTED_SLOT_TYPE ( slot );
731 DBGC2 ( xhci, "XHCI %s-%d %sv%04x type %d",
732 xhci->name, port, name.text, protocol, type );
733 ports = readl ( xhci->cap + supported + XHCI_SUPPORTED_PORTS );
734 psic = XHCI_SUPPORTED_PORTS_PSIC ( ports );
735 if ( psic ) {
736 DBGC2 ( xhci, " speeds" );
737 for ( i = 0 ; i < psic ; i++ ) {
738 psi = readl ( xhci->cap + supported +
739 XHCI_SUPPORTED_PSI ( i ) );
740 psiv = XHCI_SUPPORTED_PSI_VALUE ( psi );
741 DBGC2 ( xhci, " %d:%s", psiv,
742 xhci_speed_name ( psi ) );
743 }
744 }
745 if ( xhci->quirks & XHCI_BAD_PSIV )
746 DBGC2 ( xhci, " (ignored)" );
747 DBGC2 ( xhci, "\n" );
748 }
749
750 return protocol;
751 }
752
753 /**
754 * Find port slot type
755 *
756 * @v xhci xHCI device
757 * @v port Port number
758 * @ret type Slot type, or negative error
759 */
760 static int xhci_port_slot_type ( struct xhci_device *xhci, unsigned int port ) {
761 unsigned int supported = xhci_supported_protocol ( xhci, port );
762 unsigned int type;
763 uint32_t slot;
764
765 /* Fail if there is no supported protocol */
766 if ( ! supported )
767 return -ENOTSUP;
768
769 /* Get slot type */
770 slot = readl ( xhci->cap + supported + XHCI_SUPPORTED_SLOT );
771 type = XHCI_SUPPORTED_SLOT_TYPE ( slot );
772
773 return type;
774 }
775
776 /**
777 * Find port speed
778 *
779 * @v xhci xHCI device
780 * @v port Port number
781 * @v psiv Protocol speed ID value
782 * @ret speed Port speed, or negative error
783 */
784 static int xhci_port_speed ( struct xhci_device *xhci, unsigned int port,
785 unsigned int psiv ) {
786 unsigned int supported = xhci_supported_protocol ( xhci, port );
787 unsigned int psic;
788 unsigned int mantissa;
789 unsigned int exponent;
790 unsigned int speed;
791 unsigned int i;
792 uint32_t ports;
793 uint32_t psi;
794
795 /* Fail if there is no supported protocol */
796 if ( ! supported )
797 return -ENOTSUP;
798
799 /* Get protocol speed ID count */
800 ports = readl ( xhci->cap + supported + XHCI_SUPPORTED_PORTS );
801 psic = XHCI_SUPPORTED_PORTS_PSIC ( ports );
802
803 /* Use protocol speed ID table unless device is known to be faulty */
804 if ( ! ( xhci->quirks & XHCI_BAD_PSIV ) ) {
805
806 /* Iterate over PSI dwords looking for a match */
807 for ( i = 0 ; i < psic ; i++ ) {
808 psi = readl ( xhci->cap + supported +
809 XHCI_SUPPORTED_PSI ( i ) );
810 if ( psiv == XHCI_SUPPORTED_PSI_VALUE ( psi ) ) {
811 mantissa = XHCI_SUPPORTED_PSI_MANTISSA ( psi );
812 exponent = XHCI_SUPPORTED_PSI_EXPONENT ( psi );
813 speed = USB_SPEED ( mantissa, exponent );
814 return speed;
815 }
816 }
817
818 /* Record device as faulty if no match is found */
819 if ( psic != 0 ) {
820 DBGC ( xhci, "XHCI %s-%d spurious PSI value %d: "
821 "assuming PSI table is invalid\n",
822 xhci->name, port, psiv );
823 xhci->quirks |= XHCI_BAD_PSIV;
824 }
825 }
826
827 /* Use the default mappings */
828 switch ( psiv ) {
829 case XHCI_SPEED_LOW : return USB_SPEED_LOW;
830 case XHCI_SPEED_FULL : return USB_SPEED_FULL;
831 case XHCI_SPEED_HIGH : return USB_SPEED_HIGH;
832 case XHCI_SPEED_SUPER : return USB_SPEED_SUPER;
833 default:
834 DBGC ( xhci, "XHCI %s-%d unrecognised PSI value %d\n",
835 xhci->name, port, psiv );
836 return -ENOTSUP;
837 }
838 }
839
840 /**
841 * Find protocol speed ID value
842 *
843 * @v xhci xHCI device
844 * @v port Port number
845 * @v speed USB speed
846 * @ret psiv Protocol speed ID value, or negative error
847 */
848 static int xhci_port_psiv ( struct xhci_device *xhci, unsigned int port,
849 unsigned int speed ) {
850 unsigned int supported = xhci_supported_protocol ( xhci, port );
851 unsigned int psic;
852 unsigned int mantissa;
853 unsigned int exponent;
854 unsigned int psiv;
855 unsigned int i;
856 uint32_t ports;
857 uint32_t psi;
858
859 /* Fail if there is no supported protocol */
860 if ( ! supported )
861 return -ENOTSUP;
862
863 /* Get protocol speed ID count */
864 ports = readl ( xhci->cap + supported + XHCI_SUPPORTED_PORTS );
865 psic = XHCI_SUPPORTED_PORTS_PSIC ( ports );
866
867 /* Use the default mappings if applicable */
868 if ( ( psic == 0 ) || ( xhci->quirks & XHCI_BAD_PSIV ) ) {
869 switch ( speed ) {
870 case USB_SPEED_LOW : return XHCI_SPEED_LOW;
871 case USB_SPEED_FULL : return XHCI_SPEED_FULL;
872 case USB_SPEED_HIGH : return XHCI_SPEED_HIGH;
873 case USB_SPEED_SUPER : return XHCI_SPEED_SUPER;
874 default:
875 DBGC ( xhci, "XHCI %s-%d non-standard speed %d\n",
876 xhci->name, port, speed );
877 return -ENOTSUP;
878 }
879 }
880
881 /* Iterate over PSI dwords looking for a match */
882 for ( i = 0 ; i < psic ; i++ ) {
883 psi = readl ( xhci->cap + supported + XHCI_SUPPORTED_PSI ( i ));
884 mantissa = XHCI_SUPPORTED_PSI_MANTISSA ( psi );
885 exponent = XHCI_SUPPORTED_PSI_EXPONENT ( psi );
886 if ( speed == USB_SPEED ( mantissa, exponent ) ) {
887 psiv = XHCI_SUPPORTED_PSI_VALUE ( psi );
888 return psiv;
889 }
890 }
891
892 DBGC ( xhci, "XHCI %s-%d unrepresentable speed %#x\n",
893 xhci->name, port, speed );
894 return -ENOENT;
895 }
896
897 /******************************************************************************
898 *
899 * Device context base address array
900 *
901 ******************************************************************************
902 */
903
904 /**
905 * Allocate device context base address array
906 *
907 * @v xhci xHCI device
908 * @ret rc Return status code
909 */
910 static int xhci_dcbaa_alloc ( struct xhci_device *xhci ) {
911 size_t len;
912 physaddr_t dcbaap;
913 int rc;
914
915 /* Allocate and initialise structure. Must be at least
916 * 64-byte aligned and must not cross a page boundary, so
917 * align on its own size (rounded up to a power of two and
918 * with a minimum of 64 bytes).
919 */
920 len = ( ( xhci->slots + 1 ) * sizeof ( xhci->dcbaa.context[0] ) );
921 xhci->dcbaa.context = dma_alloc ( xhci->dma, &xhci->dcbaa.map, len,
922 xhci_align ( len ) );
923 if ( ! xhci->dcbaa.context ) {
924 DBGC ( xhci, "XHCI %s could not allocate DCBAA\n", xhci->name );
925 rc = -ENOMEM;
926 goto err_alloc;
927 }
928 memset ( xhci->dcbaa.context, 0, len );
929
930 /* Program DCBAA pointer */
931 dcbaap = dma ( &xhci->dcbaa.map, xhci->dcbaa.context );
932 if ( ( rc = xhci_writeq ( xhci, dcbaap,
933 xhci->op + XHCI_OP_DCBAAP ) ) != 0 )
934 goto err_writeq;
935
936 DBGC2 ( xhci, "XHCI %s DCBAA at [%08lx,%08lx)\n", xhci->name,
937 virt_to_phys ( xhci->dcbaa.context ),
938 ( virt_to_phys ( xhci->dcbaa.context ) + len ) );
939 return 0;
940
941 err_writeq:
942 dma_free ( &xhci->dcbaa.map, xhci->dcbaa.context, len );
943 err_alloc:
944 return rc;
945 }
946
947 /**
948 * Free device context base address array
949 *
950 * @v xhci xHCI device
951 */
952 static void xhci_dcbaa_free ( struct xhci_device *xhci ) {
953 size_t len;
954 unsigned int i;
955
956 /* Sanity check */
957 for ( i = 0 ; i <= xhci->slots ; i++ )
958 assert ( xhci->dcbaa.context[i] == 0 );
959
960 /* Clear DCBAA pointer */
961 xhci_writeq ( xhci, 0, xhci->op + XHCI_OP_DCBAAP );
962
963 /* Free DCBAA */
964 len = ( ( xhci->slots + 1 ) * sizeof ( xhci->dcbaa.context[0] ) );
965 dma_free ( &xhci->dcbaa.map, xhci->dcbaa.context, len );
966 }
967
968 /******************************************************************************
969 *
970 * Scratchpad buffers
971 *
972 ******************************************************************************
973 */
974
975 /**
976 * Allocate scratchpad buffers
977 *
978 * @v xhci xHCI device
979 * @ret rc Return status code
980 */
981 static int xhci_scratchpad_alloc ( struct xhci_device *xhci ) {
982 struct xhci_scratchpad *scratch = &xhci->scratch;
983 size_t buffer_len;
984 size_t array_len;
985 physaddr_t addr;
986 unsigned int i;
987 int rc;
988
989 /* Do nothing if no scratchpad buffers are used */
990 if ( ! scratch->count )
991 return 0;
992
993 /* Allocate scratchpad buffers */
994 buffer_len = ( scratch->count * xhci->pagesize );
995 scratch->buffer = dma_umalloc ( xhci->dma, &scratch->buffer_map,
996 buffer_len, xhci->pagesize );
997 if ( ! scratch->buffer ) {
998 DBGC ( xhci, "XHCI %s could not allocate scratchpad buffers\n",
999 xhci->name );
1000 rc = -ENOMEM;
1001 goto err_alloc;
1002 }
1003 memset_user ( scratch->buffer, 0, 0, buffer_len );
1004
1005 /* Allocate scratchpad array */
1006 array_len = ( scratch->count * sizeof ( scratch->array[0] ) );
1007 scratch->array = dma_alloc ( xhci->dma, &scratch->array_map,
1008 array_len, xhci_align ( array_len ) );
1009 if ( ! scratch->array ) {
1010 DBGC ( xhci, "XHCI %s could not allocate scratchpad buffer "
1011 "array\n", xhci->name );
1012 rc = -ENOMEM;
1013 goto err_alloc_array;
1014 }
1015
1016 /* Populate scratchpad array */
1017 addr = dma_phys ( &scratch->buffer_map,
1018 user_to_phys ( scratch->buffer, 0 ) );
1019 for ( i = 0 ; i < scratch->count ; i++ ) {
1020 scratch->array[i] = cpu_to_le64 ( addr );
1021 addr += xhci->pagesize;
1022 }
1023
1024 /* Set scratchpad array pointer */
1025 assert ( xhci->dcbaa.context != NULL );
1026 xhci->dcbaa.context[0] = cpu_to_le64 ( dma ( &scratch->array_map,
1027 scratch->array ) );
1028
1029 DBGC2 ( xhci, "XHCI %s scratchpad [%08lx,%08lx) array [%08lx,%08lx)\n",
1030 xhci->name, user_to_phys ( scratch->buffer, 0 ),
1031 user_to_phys ( scratch->buffer, buffer_len ),
1032 virt_to_phys ( scratch->array ),
1033 ( virt_to_phys ( scratch->array ) + array_len ) );
1034 return 0;
1035
1036 dma_free ( &scratch->array_map, scratch->array, array_len );
1037 err_alloc_array:
1038 dma_ufree ( &scratch->buffer_map, scratch->buffer, buffer_len );
1039 err_alloc:
1040 return rc;
1041 }
1042
1043 /**
1044 * Free scratchpad buffers
1045 *
1046 * @v xhci xHCI device
1047 */
1048 static void xhci_scratchpad_free ( struct xhci_device *xhci ) {
1049 struct xhci_scratchpad *scratch = &xhci->scratch;
1050 size_t array_len;
1051 size_t buffer_len;
1052
1053 /* Do nothing if no scratchpad buffers are used */
1054 if ( ! scratch->count )
1055 return;
1056
1057 /* Clear scratchpad array pointer */
1058 assert ( xhci->dcbaa.context != NULL );
1059 xhci->dcbaa.context[0] = 0;
1060
1061 /* Free scratchpad array */
1062 array_len = ( scratch->count * sizeof ( scratch->array[0] ) );
1063 dma_free ( &scratch->array_map, scratch->array, array_len );
1064
1065 /* Free scratchpad buffers */
1066 buffer_len = ( scratch->count * xhci->pagesize );
1067 dma_ufree ( &scratch->buffer_map, scratch->buffer, buffer_len );
1068 }
1069
1070 /******************************************************************************
1071 *
1072 * Run / stop / reset
1073 *
1074 ******************************************************************************
1075 */
1076
1077 /**
1078 * Start xHCI device
1079 *
1080 * @v xhci xHCI device
1081 */
1082 static void xhci_run ( struct xhci_device *xhci ) {
1083 uint32_t config;
1084 uint32_t usbcmd;
1085
1086 /* Configure number of device slots */
1087 config = readl ( xhci->op + XHCI_OP_CONFIG );
1088 config &= ~XHCI_CONFIG_MAX_SLOTS_EN_MASK;
1089 config |= XHCI_CONFIG_MAX_SLOTS_EN ( xhci->slots );
1090 writel ( config, xhci->op + XHCI_OP_CONFIG );
1091
1092 /* Set run/stop bit */
1093 usbcmd = readl ( xhci->op + XHCI_OP_USBCMD );
1094 usbcmd |= XHCI_USBCMD_RUN;
1095 writel ( usbcmd, xhci->op + XHCI_OP_USBCMD );
1096 }
1097
1098 /**
1099 * Stop xHCI device
1100 *
1101 * @v xhci xHCI device
1102 * @ret rc Return status code
1103 */
1104 static int xhci_stop ( struct xhci_device *xhci ) {
1105 uint32_t usbcmd;
1106 uint32_t usbsts;
1107 unsigned int i;
1108
1109 /* Clear run/stop bit */
1110 usbcmd = readl ( xhci->op + XHCI_OP_USBCMD );
1111 usbcmd &= ~XHCI_USBCMD_RUN;
1112 writel ( usbcmd, xhci->op + XHCI_OP_USBCMD );
1113
1114 /* Wait for device to stop */
1115 for ( i = 0 ; i < XHCI_STOP_MAX_WAIT_MS ; i++ ) {
1116
1117 /* Check if device is stopped */
1118 usbsts = readl ( xhci->op + XHCI_OP_USBSTS );
1119 if ( usbsts & XHCI_USBSTS_HCH )
1120 return 0;
1121
1122 /* Delay */
1123 mdelay ( 1 );
1124 }
1125
1126 DBGC ( xhci, "XHCI %s timed out waiting for stop\n", xhci->name );
1127 return -ETIMEDOUT;
1128 }
1129
1130 /**
1131 * Reset xHCI device
1132 *
1133 * @v xhci xHCI device
1134 * @ret rc Return status code
1135 */
1136 static int xhci_reset ( struct xhci_device *xhci ) {
1137 uint32_t usbcmd;
1138 unsigned int i;
1139 int rc;
1140
1141 /* The xHCI specification states that resetting a running
1142 * device may result in undefined behaviour, so try stopping
1143 * it first.
1144 */
1145 if ( ( rc = xhci_stop ( xhci ) ) != 0 ) {
1146 /* Ignore errors and attempt to reset the device anyway */
1147 }
1148
1149 /* Reset device */
1150 writel ( XHCI_USBCMD_HCRST, xhci->op + XHCI_OP_USBCMD );
1151
1152 /* Wait for reset to complete */
1153 for ( i = 0 ; i < XHCI_RESET_MAX_WAIT_MS ; i++ ) {
1154
1155 /* Check if reset is complete */
1156 usbcmd = readl ( xhci->op + XHCI_OP_USBCMD );
1157 if ( ! ( usbcmd & XHCI_USBCMD_HCRST ) )
1158 return 0;
1159
1160 /* Delay */
1161 mdelay ( 1 );
1162 }
1163
1164 DBGC ( xhci, "XHCI %s timed out waiting for reset\n", xhci->name );
1165 return -ETIMEDOUT;
1166 }
1167
1168 /**
1169 * Mark xHCI device as permanently failed
1170 *
1171 * @v xhci xHCI device
1172 * @ret rc Return status code
1173 */
1174 static int xhci_fail ( struct xhci_device *xhci ) {
1175 size_t len;
1176 int rc;
1177
1178 /* Mark command mechanism as permanently failed */
1179 xhci->failed = 1;
1180
1181 /* Reset device */
1182 if ( ( rc = xhci_reset ( xhci ) ) != 0 )
1183 return rc;
1184
1185 /* Discard DCBAA entries since DCBAAP has been cleared */
1186 assert ( xhci->dcbaa.context != NULL );
1187 len = ( ( xhci->slots + 1 ) * sizeof ( xhci->dcbaa.context[0] ) );
1188 memset ( xhci->dcbaa.context, 0, len );
1189
1190 return 0;
1191 }
1192
1193 /******************************************************************************
1194 *
1195 * Transfer request blocks
1196 *
1197 ******************************************************************************
1198 */
1199
1200 /**
1201 * Allocate transfer request block ring
1202 *
1203 * @v xhci xHCI device
1204 * @v ring TRB ring
1205 * @v shift Ring size (log2)
1206 * @v slot Device slot
1207 * @v target Doorbell target
1208 * @v stream Doorbell stream ID
1209 * @ret rc Return status code
1210 */
1211 static int xhci_ring_alloc ( struct xhci_device *xhci,
1212 struct xhci_trb_ring *ring,
1213 unsigned int shift, unsigned int slot,
1214 unsigned int target, unsigned int stream ) {
1215 struct xhci_trb_link *link;
1216 unsigned int count;
1217 int rc;
1218
1219 /* Sanity check */
1220 assert ( shift > 0 );
1221
1222 /* Initialise structure */
1223 memset ( ring, 0, sizeof ( *ring ) );
1224 ring->shift = shift;
1225 count = ( 1U << shift );
1226 ring->mask = ( count - 1 );
1227 ring->len = ( ( count + 1 /* Link TRB */ ) * sizeof ( ring->trb[0] ) );
1228 ring->db = ( xhci->db + ( slot * sizeof ( ring->dbval ) ) );
1229 ring->dbval = XHCI_DBVAL ( target, stream );
1230
1231 /* Allocate I/O buffers */
1232 ring->iobuf = zalloc ( count * sizeof ( ring->iobuf[0] ) );
1233 if ( ! ring->iobuf ) {
1234 rc = -ENOMEM;
1235 goto err_alloc_iobuf;
1236 }
1237
1238 /* Allocate TRBs */
1239 ring->trb = dma_alloc ( xhci->dma, &ring->map, ring->len,
1240 xhci_align ( ring->len ) );
1241 if ( ! ring->trb ) {
1242 rc = -ENOMEM;
1243 goto err_alloc_trb;
1244 }
1245 memset ( ring->trb, 0, ring->len );
1246
1247 /* Initialise Link TRB */
1248 link = &ring->trb[count].link;
1249 link->next = cpu_to_le64 ( dma ( &ring->map, ring->trb ) );
1250 link->flags = XHCI_TRB_TC;
1251 link->type = XHCI_TRB_LINK;
1252 ring->link = link;
1253
1254 return 0;
1255
1256 dma_free ( &ring->map, ring->trb, ring->len );
1257 err_alloc_trb:
1258 free ( ring->iobuf );
1259 err_alloc_iobuf:
1260 return rc;
1261 }
1262
1263 /**
1264 * Reset transfer request block ring
1265 *
1266 * @v ring TRB ring
1267 */
1268 static void xhci_ring_reset ( struct xhci_trb_ring *ring ) {
1269 unsigned int count = ( 1U << ring->shift );
1270
1271 /* Reset producer and consumer counters */
1272 ring->prod = 0;
1273 ring->cons = 0;
1274
1275 /* Reset TRBs (except Link TRB) */
1276 memset ( ring->trb, 0, ( count * sizeof ( ring->trb[0] ) ) );
1277 }
1278
1279 /**
1280 * Free transfer request block ring
1281 *
1282 * @v ring TRB ring
1283 */
1284 static void xhci_ring_free ( struct xhci_trb_ring *ring ) {
1285 unsigned int count = ( 1U << ring->shift );
1286 unsigned int i;
1287
1288 /* Sanity checks */
1289 assert ( ring->cons == ring->prod );
1290 for ( i = 0 ; i < count ; i++ )
1291 assert ( ring->iobuf[i] == NULL );
1292
1293 /* Free TRBs */
1294 dma_free ( &ring->map, ring->trb, ring->len );
1295
1296 /* Free I/O buffers */
1297 free ( ring->iobuf );
1298 }
1299
1300 /**
1301 * Enqueue a transfer request block
1302 *
1303 * @v ring TRB ring
1304 * @v iobuf I/O buffer (if any)
1305 * @v trb Transfer request block (with empty Cycle flag)
1306 * @ret rc Return status code
1307 *
1308 * This operation does not implicitly ring the doorbell register.
1309 */
1310 static int xhci_enqueue ( struct xhci_trb_ring *ring, struct io_buffer *iobuf,
1311 const union xhci_trb *trb ) {
1312 union xhci_trb *dest;
1313 unsigned int prod;
1314 unsigned int mask;
1315 unsigned int index;
1316 unsigned int cycle;
1317
1318 /* Sanity check */
1319 assert ( ! ( trb->common.flags & XHCI_TRB_C ) );
1320
1321 /* Fail if ring is full */
1322 if ( ! xhci_ring_remaining ( ring ) )
1323 return -ENOBUFS;
1324
1325 /* Update producer counter (and link TRB, if applicable) */
1326 prod = ring->prod++;
1327 mask = ring->mask;
1328 cycle = ( ( ~( prod >> ring->shift ) ) & XHCI_TRB_C );
1329 index = ( prod & mask );
1330 if ( index == 0 )
1331 ring->link->flags = ( XHCI_TRB_TC | ( cycle ^ XHCI_TRB_C ) );
1332
1333 /* Record I/O buffer */
1334 ring->iobuf[index] = iobuf;
1335
1336 /* Enqueue TRB */
1337 dest = &ring->trb[index];
1338 dest->template.parameter = trb->template.parameter;
1339 dest->template.status = trb->template.status;
1340 wmb();
1341 dest->template.control = ( trb->template.control |
1342 cpu_to_le32 ( cycle ) );
1343
1344 return 0;
1345 }
1346
1347 /**
1348 * Dequeue a transfer request block
1349 *
1350 * @v ring TRB ring
1351 * @ret iobuf I/O buffer
1352 */
1353 static struct io_buffer * xhci_dequeue ( struct xhci_trb_ring *ring ) {
1354 struct io_buffer *iobuf;
1355 unsigned int cons;
1356 unsigned int mask;
1357 unsigned int index;
1358
1359 /* Sanity check */
1360 assert ( xhci_ring_fill ( ring ) != 0 );
1361
1362 /* Update consumer counter */
1363 cons = ring->cons++;
1364 mask = ring->mask;
1365 index = ( cons & mask );
1366
1367 /* Retrieve I/O buffer */
1368 iobuf = ring->iobuf[index];
1369 ring->iobuf[index] = NULL;
1370
1371 return iobuf;
1372 }
1373
1374 /**
1375 * Enqueue multiple transfer request blocks
1376 *
1377 * @v ring TRB ring
1378 * @v iobuf I/O buffer
1379 * @v trbs Transfer request blocks (with empty Cycle flag)
1380 * @v count Number of transfer request blocks
1381 * @ret rc Return status code
1382 *
1383 * This operation does not implicitly ring the doorbell register.
1384 */
1385 static int xhci_enqueue_multi ( struct xhci_trb_ring *ring,
1386 struct io_buffer *iobuf,
1387 const union xhci_trb *trbs,
1388 unsigned int count ) {
1389 const union xhci_trb *trb = trbs;
1390 int rc;
1391
1392 /* Sanity check */
1393 assert ( iobuf != NULL );
1394
1395 /* Fail if ring does not have sufficient space */
1396 if ( xhci_ring_remaining ( ring ) < count )
1397 return -ENOBUFS;
1398
1399 /* Enqueue each TRB, recording the I/O buffer with the final TRB */
1400 while ( count-- ) {
1401 rc = xhci_enqueue ( ring, ( count ? NULL : iobuf ), trb++ );
1402 assert ( rc == 0 ); /* Should never be able to fail */
1403 }
1404
1405 return 0;
1406 }
1407
1408 /**
1409 * Dequeue multiple transfer request blocks
1410 *
1411 * @v ring TRB ring
1412 * @ret iobuf I/O buffer
1413 */
1414 static struct io_buffer * xhci_dequeue_multi ( struct xhci_trb_ring *ring ) {
1415 struct io_buffer *iobuf;
1416
1417 /* Dequeue TRBs until we reach the final TRB for an I/O buffer */
1418 do {
1419 iobuf = xhci_dequeue ( ring );
1420 } while ( iobuf == NULL );
1421
1422 return iobuf;
1423 }
1424
1425 /**
1426 * Ring doorbell register
1427 *
1428 * @v ring TRB ring
1429 */
1430 static inline __attribute__ (( always_inline )) void
1431 xhci_doorbell ( struct xhci_trb_ring *ring ) {
1432
1433 wmb();
1434 writel ( ring->dbval, ring->db );
1435 }
1436
1437 /******************************************************************************
1438 *
1439 * Command and event rings
1440 *
1441 ******************************************************************************
1442 */
1443
1444 /**
1445 * Allocate command ring
1446 *
1447 * @v xhci xHCI device
1448 * @ret rc Return status code
1449 */
1450 static int xhci_command_alloc ( struct xhci_device *xhci ) {
1451 physaddr_t crp;
1452 int rc;
1453
1454 /* Allocate TRB ring */
1455 if ( ( rc = xhci_ring_alloc ( xhci, &xhci->command, XHCI_CMD_TRBS_LOG2,
1456 0, 0, 0 ) ) != 0 )
1457 goto err_ring_alloc;
1458
1459 /* Program command ring control register */
1460 crp = dma ( &xhci->command.map, xhci->command.trb );
1461 if ( ( rc = xhci_writeq ( xhci, ( crp | XHCI_CRCR_RCS ),
1462 xhci->op + XHCI_OP_CRCR ) ) != 0 )
1463 goto err_writeq;
1464
1465 DBGC2 ( xhci, "XHCI %s CRCR at [%08lx,%08lx)\n", xhci->name,
1466 virt_to_phys ( xhci->command.trb ),
1467 ( virt_to_phys ( xhci->command.trb ) + xhci->command.len ) );
1468 return 0;
1469
1470 err_writeq:
1471 xhci_ring_free ( &xhci->command );
1472 err_ring_alloc:
1473 return rc;
1474 }
1475
1476 /**
1477 * Free command ring
1478 *
1479 * @v xhci xHCI device
1480 */
1481 static void xhci_command_free ( struct xhci_device *xhci ) {
1482
1483 /* Sanity check */
1484 assert ( ( readl ( xhci->op + XHCI_OP_CRCR ) & XHCI_CRCR_CRR ) == 0 );
1485
1486 /* Clear command ring control register */
1487 xhci_writeq ( xhci, 0, xhci->op + XHCI_OP_CRCR );
1488
1489 /* Free TRB ring */
1490 xhci_ring_free ( &xhci->command );
1491 }
1492
1493 /**
1494 * Allocate event ring
1495 *
1496 * @v xhci xHCI device
1497 * @ret rc Return status code
1498 */
1499 static int xhci_event_alloc ( struct xhci_device *xhci ) {
1500 struct xhci_event_ring *event = &xhci->event;
1501 unsigned int count;
1502 size_t len;
1503 int rc;
1504
1505 /* Allocate event ring */
1506 count = ( 1 << XHCI_EVENT_TRBS_LOG2 );
1507 len = ( count * sizeof ( event->trb[0] ) );
1508 event->trb = dma_alloc ( xhci->dma, &event->trb_map, len,
1509 xhci_align ( len ) );
1510 if ( ! event->trb ) {
1511 rc = -ENOMEM;
1512 goto err_alloc_trb;
1513 }
1514 memset ( event->trb, 0, len );
1515
1516 /* Allocate event ring segment table */
1517 event->segment = dma_alloc ( xhci->dma, &event->segment_map,
1518 sizeof ( event->segment[0] ),
1519 xhci_align ( sizeof (event->segment[0])));
1520 if ( ! event->segment ) {
1521 rc = -ENOMEM;
1522 goto err_alloc_segment;
1523 }
1524 memset ( event->segment, 0, sizeof ( event->segment[0] ) );
1525 event->segment[0].base = cpu_to_le64 ( dma ( &event->trb_map,
1526 event->trb ) );
1527 event->segment[0].count = cpu_to_le32 ( count );
1528
1529 /* Program event ring registers */
1530 writel ( 1, xhci->run + XHCI_RUN_ERSTSZ ( 0 ) );
1531 if ( ( rc = xhci_writeq ( xhci, dma ( &event->trb_map, event->trb ),
1532 xhci->run + XHCI_RUN_ERDP ( 0 ) ) ) != 0 )
1533 goto err_writeq_erdp;
1534 if ( ( rc = xhci_writeq ( xhci,
1535 dma ( &event->segment_map, event->segment ),
1536 xhci->run + XHCI_RUN_ERSTBA ( 0 ) ) ) != 0 )
1537 goto err_writeq_erstba;
1538
1539 DBGC2 ( xhci, "XHCI %s event ring [%08lx,%08lx) table [%08lx,%08lx)\n",
1540 xhci->name, virt_to_phys ( event->trb ),
1541 ( virt_to_phys ( event->trb ) + len ),
1542 virt_to_phys ( event->segment ),
1543 ( virt_to_phys ( event->segment ) +
1544 sizeof ( event->segment[0] ) ) );
1545 return 0;
1546
1547 xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERSTBA ( 0 ) );
1548 err_writeq_erstba:
1549 xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERDP ( 0 ) );
1550 err_writeq_erdp:
1551 dma_free ( &event->segment_map, event->segment,
1552 sizeof ( event->segment[0] ) );
1553 err_alloc_segment:
1554 dma_free ( &event->trb_map, event->trb, len );
1555 err_alloc_trb:
1556 return rc;
1557 }
1558
1559 /**
1560 * Free event ring
1561 *
1562 * @v xhci xHCI device
1563 */
1564 static void xhci_event_free ( struct xhci_device *xhci ) {
1565 struct xhci_event_ring *event = &xhci->event;
1566 unsigned int count;
1567 size_t len;
1568
1569 /* Clear event ring registers */
1570 writel ( 0, xhci->run + XHCI_RUN_ERSTSZ ( 0 ) );
1571 xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERSTBA ( 0 ) );
1572 xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERDP ( 0 ) );
1573
1574 /* Free event ring segment table */
1575 dma_free ( &event->segment_map, event->segment,
1576 sizeof ( event->segment[0] ) );
1577
1578 /* Free event ring */
1579 count = ( 1 << XHCI_EVENT_TRBS_LOG2 );
1580 len = ( count * sizeof ( event->trb[0] ) );
1581 dma_free ( &event->trb_map, event->trb, len );
1582 }
1583
1584 /**
1585 * Handle transfer event
1586 *
1587 * @v xhci xHCI device
1588 * @v trb Transfer event TRB
1589 */
1590 static void xhci_transfer ( struct xhci_device *xhci,
1591 struct xhci_trb_transfer *trb ) {
1592 struct xhci_slot *slot;
1593 struct xhci_endpoint *endpoint;
1594 struct io_buffer *iobuf;
1595 int rc;
1596
1597 /* Profile transfer events */
1598 profile_start ( &xhci_transfer_profiler );
1599
1600 /* Identify slot */
1601 if ( ( trb->slot > xhci->slots ) ||
1602 ( ( slot = xhci->slot[trb->slot] ) == NULL ) ) {
1603 DBGC ( xhci, "XHCI %s transfer event invalid slot %d:\n",
1604 xhci->name, trb->slot );
1605 DBGC_HDA ( xhci, 0, trb, sizeof ( *trb ) );
1606 return;
1607 }
1608
1609 /* Identify endpoint */
1610 if ( ( trb->endpoint >= XHCI_CTX_END ) ||
1611 ( ( endpoint = slot->endpoint[trb->endpoint] ) == NULL ) ) {
1612 DBGC ( xhci, "XHCI %s slot %d transfer event invalid epid "
1613 "%d:\n", xhci->name, slot->id, trb->endpoint );
1614 DBGC_HDA ( xhci, 0, trb, sizeof ( *trb ) );
1615 return;
1616 }
1617
1618 /* Dequeue TRB(s) */
1619 iobuf = xhci_dequeue_multi ( &endpoint->ring );
1620 assert ( iobuf != NULL );
1621
1622 /* Unmap I/O buffer */
1623 iob_unmap ( iobuf );
1624
1625 /* Check for errors */
1626 if ( ! ( ( trb->code == XHCI_CMPLT_SUCCESS ) ||
1627 ( trb->code == XHCI_CMPLT_SHORT ) ) ) {
1628
1629 /* Construct error */
1630 rc = -ECODE ( trb->code );
1631 DBGC ( xhci, "XHCI %s slot %d ctx %d failed (code %d): %s\n",
1632 xhci->name, slot->id, endpoint->ctx, trb->code,
1633 strerror ( rc ) );
1634 DBGC_HDA ( xhci, 0, trb, sizeof ( *trb ) );
1635
1636 /* Sanity check */
1637 assert ( ( endpoint->context->state & XHCI_ENDPOINT_STATE_MASK )
1638 != XHCI_ENDPOINT_RUNNING );
1639
1640 /* Report failure to USB core */
1641 usb_complete_err ( endpoint->ep, iobuf, rc );
1642 return;
1643 }
1644
1645 /* Record actual transfer size */
1646 iob_unput ( iobuf, le16_to_cpu ( trb->residual ) );
1647
1648 /* Sanity check (for successful completions only) */
1649 assert ( xhci_ring_consumed ( &endpoint->ring ) ==
1650 le64_to_cpu ( trb->transfer ) );
1651
1652 /* Report completion to USB core */
1653 usb_complete ( endpoint->ep, iobuf );
1654 profile_stop ( &xhci_transfer_profiler );
1655 }
1656
1657 /**
1658 * Handle command completion event
1659 *
1660 * @v xhci xHCI device
1661 * @v trb Command completion event
1662 */
1663 static void xhci_complete ( struct xhci_device *xhci,
1664 struct xhci_trb_complete *trb ) {
1665 int rc;
1666
1667 /* Ignore "command ring stopped" notifications */
1668 if ( trb->code == XHCI_CMPLT_CMD_STOPPED ) {
1669 DBGC2 ( xhci, "XHCI %s command ring stopped\n", xhci->name );
1670 return;
1671 }
1672
1673 /* Ignore unexpected completions */
1674 if ( ! xhci->pending ) {
1675 rc = -ECODE ( trb->code );
1676 DBGC ( xhci, "XHCI %s unexpected completion (code %d): %s\n",
1677 xhci->name, trb->code, strerror ( rc ) );
1678 DBGC_HDA ( xhci, 0, trb, sizeof ( *trb ) );
1679 return;
1680 }
1681
1682 /* Dequeue command TRB */
1683 xhci_dequeue ( &xhci->command );
1684
1685 /* Sanity check */
1686 assert ( xhci_ring_consumed ( &xhci->command ) ==
1687 le64_to_cpu ( trb->command ) );
1688
1689 /* Record completion */
1690 memcpy ( xhci->pending, trb, sizeof ( *xhci->pending ) );
1691 xhci->pending = NULL;
1692 }
1693
1694 /**
1695 * Handle port status event
1696 *
1697 * @v xhci xHCI device
1698 * @v trb Port status event
1699 */
1700 static void xhci_port_status ( struct xhci_device *xhci,
1701 struct xhci_trb_port_status *trb ) {
1702 struct usb_port *port = usb_port ( xhci->bus->hub, trb->port );
1703 uint32_t portsc;
1704
1705 /* Sanity check */
1706 assert ( ( trb->port > 0 ) && ( trb->port <= xhci->ports ) );
1707
1708 /* Record disconnections and clear changes */
1709 portsc = readl ( xhci->op + XHCI_OP_PORTSC ( trb->port ) );
1710 port->disconnected |= ( portsc & XHCI_PORTSC_CSC );
1711 portsc &= ( XHCI_PORTSC_PRESERVE | XHCI_PORTSC_CHANGE );
1712 writel ( portsc, xhci->op + XHCI_OP_PORTSC ( trb->port ) );
1713
1714 /* Report port status change */
1715 usb_port_changed ( port );
1716 }
1717
1718 /**
1719 * Handle host controller event
1720 *
1721 * @v xhci xHCI device
1722 * @v trb Host controller event
1723 */
1724 static void xhci_host_controller ( struct xhci_device *xhci,
1725 struct xhci_trb_host_controller *trb ) {
1726 int rc;
1727
1728 /* Construct error */
1729 rc = -ECODE ( trb->code );
1730 DBGC ( xhci, "XHCI %s host controller event (code %d): %s\n",
1731 xhci->name, trb->code, strerror ( rc ) );
1732 }
1733
1734 /**
1735 * Poll event ring
1736 *
1737 * @v xhci xHCI device
1738 */
1739 static void xhci_event_poll ( struct xhci_device *xhci ) {
1740 struct xhci_event_ring *event = &xhci->event;
1741 union xhci_trb *trb;
1742 unsigned int shift = XHCI_EVENT_TRBS_LOG2;
1743 unsigned int count = ( 1 << shift );
1744 unsigned int mask = ( count - 1 );
1745 unsigned int consumed;
1746 unsigned int type;
1747
1748 /* Do nothing if device has permanently failed */
1749 if ( xhci->failed )
1750 return;
1751
1752 /* Poll for events */
1753 profile_start ( &xhci_event_profiler );
1754 for ( consumed = 0 ; ; consumed++ ) {
1755
1756 /* Stop if we reach an empty TRB */
1757 rmb();
1758 trb = &event->trb[ event->cons & mask ];
1759 if ( ! ( ( trb->common.flags ^
1760 ( event->cons >> shift ) ) & XHCI_TRB_C ) )
1761 break;
1762
1763 /* Consume this TRB */
1764 event->cons++;
1765
1766 /* Handle TRB */
1767 type = ( trb->common.type & XHCI_TRB_TYPE_MASK );
1768 switch ( type ) {
1769
1770 case XHCI_TRB_TRANSFER :
1771 xhci_transfer ( xhci, &trb->transfer );
1772 break;
1773
1774 case XHCI_TRB_COMPLETE :
1775 xhci_complete ( xhci, &trb->complete );
1776 break;
1777
1778 case XHCI_TRB_PORT_STATUS:
1779 xhci_port_status ( xhci, &trb->port );
1780 break;
1781
1782 case XHCI_TRB_HOST_CONTROLLER:
1783 xhci_host_controller ( xhci, &trb->host );
1784 break;
1785
1786 default:
1787 DBGC ( xhci, "XHCI %s unrecognised event %#x\n:",
1788 xhci->name, ( event->cons - 1 ) );
1789 DBGC_HDA ( xhci, virt_to_phys ( trb ),
1790 trb, sizeof ( *trb ) );
1791 break;
1792 }
1793 }
1794
1795 /* Update dequeue pointer if applicable */
1796 if ( consumed ) {
1797 xhci_writeq ( xhci, dma ( &event->trb_map, trb ),
1798 xhci->run + XHCI_RUN_ERDP ( 0 ) );
1799 profile_stop ( &xhci_event_profiler );
1800 }
1801 }
1802
1803 /**
1804 * Abort command
1805 *
1806 * @v xhci xHCI device
1807 */
1808 static void xhci_abort ( struct xhci_device *xhci ) {
1809 physaddr_t crp;
1810 uint32_t crcr;
1811
1812 /* Abort the command */
1813 DBGC2 ( xhci, "XHCI %s aborting command\n", xhci->name );
1814 xhci_writeq ( xhci, XHCI_CRCR_CA, xhci->op + XHCI_OP_CRCR );
1815
1816 /* Allow time for command to abort */
1817 mdelay ( XHCI_COMMAND_ABORT_DELAY_MS );
1818
1819 /* Check for failure to abort */
1820 crcr = readl ( xhci->op + XHCI_OP_CRCR );
1821 if ( crcr & XHCI_CRCR_CRR ) {
1822
1823 /* Device has failed to abort a command and is almost
1824 * certainly beyond repair. Reset device, abandoning
1825 * all state, and mark device as failed to avoid
1826 * delays on any future command attempts.
1827 */
1828 DBGC ( xhci, "XHCI %s failed to abort command\n", xhci->name );
1829 xhci_fail ( xhci );
1830 }
1831
1832 /* Consume (and ignore) any final command status */
1833 xhci_event_poll ( xhci );
1834
1835 /* Reset the command ring control register */
1836 xhci_ring_reset ( &xhci->command );
1837 crp = dma ( &xhci->command.map, xhci->command.trb );
1838 xhci_writeq ( xhci, ( crp | XHCI_CRCR_RCS ), xhci->op + XHCI_OP_CRCR );
1839 }
1840
1841 /**
1842 * Issue command and wait for completion
1843 *
1844 * @v xhci xHCI device
1845 * @v trb Transfer request block (with empty Cycle flag)
1846 * @ret rc Return status code
1847 *
1848 * On a successful completion, the TRB will be overwritten with the
1849 * completion.
1850 */
1851 static int xhci_command ( struct xhci_device *xhci, union xhci_trb *trb ) {
1852 struct xhci_trb_complete *complete = &trb->complete;
1853 unsigned int i;
1854 int rc;
1855
1856 /* Immediately fail all commands if command mechanism has failed */
1857 if ( xhci->failed ) {
1858 rc = -EPIPE;
1859 goto err_failed;
1860 }
1861
1862 /* Sanity check */
1863 if ( xhci->pending ) {
1864 DBGC ( xhci, "XHCI %s command ring busy\n", xhci->name );
1865 rc = -EBUSY;
1866 goto err_pending;
1867 }
1868
1869 /* Record the pending command */
1870 xhci->pending = trb;
1871
1872 /* Enqueue the command */
1873 if ( ( rc = xhci_enqueue ( &xhci->command, NULL, trb ) ) != 0 )
1874 goto err_enqueue;
1875
1876 /* Ring the command doorbell */
1877 xhci_doorbell ( &xhci->command );
1878
1879 /* Wait for the command to complete */
1880 for ( i = 0 ; i < XHCI_COMMAND_MAX_WAIT_MS ; i++ ) {
1881
1882 /* Poll event ring */
1883 xhci_event_poll ( xhci );
1884
1885 /* Check for completion */
1886 if ( ! xhci->pending ) {
1887 if ( complete->code != XHCI_CMPLT_SUCCESS ) {
1888 rc = -ECODE ( complete->code );
1889 DBGC ( xhci, "XHCI %s command failed (code "
1890 "%d): %s\n", xhci->name, complete->code,
1891 strerror ( rc ) );
1892 DBGC_HDA ( xhci, 0, trb, sizeof ( *trb ) );
1893 return rc;
1894 }
1895 return 0;
1896 }
1897
1898 /* Delay */
1899 mdelay ( 1 );
1900 }
1901
1902 /* Timeout */
1903 DBGC ( xhci, "XHCI %s timed out waiting for completion\n", xhci->name );
1904 rc = -ETIMEDOUT;
1905
1906 /* Abort command */
1907 xhci_abort ( xhci );
1908
1909 err_enqueue:
1910 xhci->pending = NULL;
1911 err_pending:
1912 err_failed:
1913 return rc;
1914 }
1915
1916 /**
1917 * Issue NOP and wait for completion
1918 *
1919 * @v xhci xHCI device
1920 * @ret rc Return status code
1921 */
1922 static inline int xhci_nop ( struct xhci_device *xhci ) {
1923 union xhci_trb trb;
1924 struct xhci_trb_common *nop = &trb.common;
1925 int rc;
1926
1927 /* Construct command */
1928 memset ( nop, 0, sizeof ( *nop ) );
1929 nop->flags = XHCI_TRB_IOC;
1930 nop->type = XHCI_TRB_NOP_CMD;
1931
1932 /* Issue command and wait for completion */
1933 if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) {
1934 DBGC ( xhci, "XHCI %s NOP failed: %s\n",
1935 xhci->name, strerror ( rc ) );
1936 return rc;
1937 }
1938
1939 DBGC2 ( xhci, "XHCI %s NOP completed successfully\n", xhci->name );
1940 return 0;
1941 }
1942
1943 /**
1944 * Enable slot
1945 *
1946 * @v xhci xHCI device
1947 * @v type Slot type
1948 * @ret slot Device slot ID, or negative error
1949 */
1950 static inline int xhci_enable_slot ( struct xhci_device *xhci,
1951 unsigned int type ) {
1952 union xhci_trb trb;
1953 struct xhci_trb_enable_slot *enable = &trb.enable;
1954 struct xhci_trb_complete *enabled = &trb.complete;
1955 unsigned int slot;
1956 int rc;
1957
1958 /* Construct command */
1959 memset ( enable, 0, sizeof ( *enable ) );
1960 enable->slot = type;
1961 enable->type = XHCI_TRB_ENABLE_SLOT;
1962
1963 /* Issue command and wait for completion */
1964 if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) {
1965 DBGC ( xhci, "XHCI %s could not enable new slot: %s\n",
1966 xhci->name, strerror ( rc ) );
1967 return rc;
1968 }
1969
1970 /* Extract slot number */
1971 slot = enabled->slot;
1972
1973 DBGC2 ( xhci, "XHCI %s slot %d enabled\n", xhci->name, slot );
1974 return slot;
1975 }
1976
1977 /**
1978 * Disable slot
1979 *
1980 * @v xhci xHCI device
1981 * @v slot Device slot
1982 * @ret rc Return status code
1983 */
1984 static inline int xhci_disable_slot ( struct xhci_device *xhci,
1985 unsigned int slot ) {
1986 union xhci_trb trb;
1987 struct xhci_trb_disable_slot *disable = &trb.disable;
1988 int rc;
1989
1990 /* Construct command */
1991 memset ( disable, 0, sizeof ( *disable ) );
1992 disable->type = XHCI_TRB_DISABLE_SLOT;
1993 disable->slot = slot;
1994
1995 /* Issue command and wait for completion */
1996 if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) {
1997 DBGC ( xhci, "XHCI %s could not disable slot %d: %s\n",
1998 xhci->name, slot, strerror ( rc ) );
1999 return rc;
2000 }
2001
2002 DBGC2 ( xhci, "XHCI %s slot %d disabled\n", xhci->name, slot );
2003 return 0;
2004 }
2005
2006 /**
2007 * Issue context-based command and wait for completion
2008 *
2009 * @v xhci xHCI device
2010 * @v slot Device slot
2011 * @v endpoint Endpoint
2012 * @v type TRB type
2013 * @v populate Input context populater
2014 * @ret rc Return status code
2015 */
2016 static int xhci_context ( struct xhci_device *xhci, struct xhci_slot *slot,
2017 struct xhci_endpoint *endpoint, unsigned int type,
2018 void ( * populate ) ( struct xhci_device *xhci,
2019 struct xhci_slot *slot,
2020 struct xhci_endpoint *endpoint,
2021 void *input ) ) {
2022 union xhci_trb trb;
2023 struct xhci_trb_context *context = &trb.context;
2024 struct dma_mapping map;
2025 size_t len;
2026 void *input;
2027 int rc;
2028
2029 /* Allocate an input context */
2030 memset ( &map, 0, sizeof ( map ) );
2031 len = xhci_input_context_offset ( xhci, XHCI_CTX_END );
2032 input = dma_alloc ( xhci->dma, &map, len, xhci_align ( len ) );
2033 if ( ! input ) {
2034 rc = -ENOMEM;
2035 goto err_alloc;
2036 }
2037 memset ( input, 0, len );
2038
2039 /* Populate input context */
2040 populate ( xhci, slot, endpoint, input );
2041
2042 /* Construct command */
2043 memset ( context, 0, sizeof ( *context ) );
2044 context->type = type;
2045 context->input = cpu_to_le64 ( dma ( &map, input ) );
2046 context->slot = slot->id;
2047
2048 /* Issue command and wait for completion */
2049 if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 )
2050 goto err_command;
2051
2052 err_command:
2053 dma_free ( &map, input, len );
2054 err_alloc:
2055 return rc;
2056 }
2057
2058 /**
2059 * Populate address device input context
2060 *
2061 * @v xhci xHCI device
2062 * @v slot Device slot
2063 * @v endpoint Endpoint
2064 * @v input Input context
2065 */
2066 static void xhci_address_device_input ( struct xhci_device *xhci,
2067 struct xhci_slot *slot,
2068 struct xhci_endpoint *endpoint,
2069 void *input ) {
2070 struct xhci_trb_ring *ring = &endpoint->ring;
2071 struct xhci_control_context *control_ctx;
2072 struct xhci_slot_context *slot_ctx;
2073 struct xhci_endpoint_context *ep_ctx;
2074
2075 /* Sanity checks */
2076 assert ( endpoint->ctx == XHCI_CTX_EP0 );
2077
2078 /* Populate control context */
2079 control_ctx = input;
2080 control_ctx->add = cpu_to_le32 ( ( 1 << XHCI_CTX_SLOT ) |
2081 ( 1 << XHCI_CTX_EP0 ) );
2082
2083 /* Populate slot context */
2084 slot_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_SLOT ));
2085 slot_ctx->info = cpu_to_le32 ( XHCI_SLOT_INFO ( 1, 0, slot->psiv,
2086 slot->route ) );
2087 slot_ctx->port = slot->port;
2088 slot_ctx->tt_id = slot->tt_id;
2089 slot_ctx->tt_port = slot->tt_port;
2090
2091 /* Populate control endpoint context */
2092 ep_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_EP0 ) );
2093 ep_ctx->type = XHCI_EP_TYPE_CONTROL;
2094 ep_ctx->burst = endpoint->ep->burst;
2095 ep_ctx->mtu = cpu_to_le16 ( endpoint->ep->mtu );
2096 ep_ctx->dequeue = cpu_to_le64 ( dma ( &ring->map, ring->trb ) |
2097 XHCI_EP_DCS );
2098 ep_ctx->trb_len = cpu_to_le16 ( XHCI_EP0_TRB_LEN );
2099 }
2100
2101 /**
2102 * Address device
2103 *
2104 * @v xhci xHCI device
2105 * @v slot Device slot
2106 * @ret rc Return status code
2107 */
2108 static inline int xhci_address_device ( struct xhci_device *xhci,
2109 struct xhci_slot *slot ) {
2110 struct usb_device *usb = slot->usb;
2111 struct xhci_slot_context *slot_ctx;
2112 int rc;
2113
2114 /* Assign device address */
2115 if ( ( rc = xhci_context ( xhci, slot, slot->endpoint[XHCI_CTX_EP0],
2116 XHCI_TRB_ADDRESS_DEVICE,
2117 xhci_address_device_input ) ) != 0 ) {
2118 DBGC ( xhci, "XHCI %s slot %d could not assign address: %s\n",
2119 xhci->name, slot->id, strerror ( rc ) );
2120 return rc;
2121 }
2122
2123 /* Get assigned address */
2124 slot_ctx = ( slot->context +
2125 xhci_device_context_offset ( xhci, XHCI_CTX_SLOT ) );
2126 usb->address = slot_ctx->address;
2127 DBGC2 ( xhci, "XHCI %s slot %d assigned address %d to %s\n",
2128 xhci->name, slot->id, usb->address, usb->name );
2129
2130 return 0;
2131 }
2132
2133 /**
2134 * Populate configure endpoint input context
2135 *
2136 * @v xhci xHCI device
2137 * @v slot Device slot
2138 * @v endpoint Endpoint
2139 * @v input Input context
2140 */
2141 static void xhci_configure_endpoint_input ( struct xhci_device *xhci,
2142 struct xhci_slot *slot,
2143 struct xhci_endpoint *endpoint,
2144 void *input ) {
2145 struct xhci_trb_ring *ring = &endpoint->ring;
2146 struct xhci_control_context *control_ctx;
2147 struct xhci_slot_context *slot_ctx;
2148 struct xhci_endpoint_context *ep_ctx;
2149
2150 /* Populate control context */
2151 control_ctx = input;
2152 control_ctx->add = cpu_to_le32 ( ( 1 << XHCI_CTX_SLOT ) |
2153 ( 1 << endpoint->ctx ) );
2154
2155 /* Populate slot context */
2156 slot_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_SLOT ));
2157 slot_ctx->info = cpu_to_le32 ( XHCI_SLOT_INFO ( ( XHCI_CTX_END - 1 ),
2158 ( slot->ports ? 1 : 0 ),
2159 slot->psiv, 0 ) );
2160 slot_ctx->ports = slot->ports;
2161
2162 /* Populate endpoint context */
2163 ep_ctx = ( input + xhci_input_context_offset ( xhci, endpoint->ctx ) );
2164 ep_ctx->interval = endpoint->interval;
2165 ep_ctx->type = endpoint->type;
2166 ep_ctx->burst = endpoint->ep->burst;
2167 ep_ctx->mtu = cpu_to_le16 ( endpoint->ep->mtu );
2168 ep_ctx->dequeue = cpu_to_le64 ( dma ( &ring->map, ring->trb ) |
2169 XHCI_EP_DCS );
2170 ep_ctx->trb_len = cpu_to_le16 ( endpoint->ep->mtu ); /* best guess */
2171 }
2172
2173 /**
2174 * Configure endpoint
2175 *
2176 * @v xhci xHCI device
2177 * @v slot Device slot
2178 * @v endpoint Endpoint
2179 * @ret rc Return status code
2180 */
2181 static inline int xhci_configure_endpoint ( struct xhci_device *xhci,
2182 struct xhci_slot *slot,
2183 struct xhci_endpoint *endpoint ) {
2184 int rc;
2185
2186 /* Configure endpoint */
2187 if ( ( rc = xhci_context ( xhci, slot, endpoint,
2188 XHCI_TRB_CONFIGURE_ENDPOINT,
2189 xhci_configure_endpoint_input ) ) != 0 ) {
2190 DBGC ( xhci, "XHCI %s slot %d ctx %d could not configure: %s\n",
2191 xhci->name, slot->id, endpoint->ctx, strerror ( rc ) );
2192 return rc;
2193 }
2194
2195 DBGC2 ( xhci, "XHCI %s slot %d ctx %d configured\n",
2196 xhci->name, slot->id, endpoint->ctx );
2197 return 0;
2198 }
2199
2200 /**
2201 * Populate deconfigure endpoint input context
2202 *
2203 * @v xhci xHCI device
2204 * @v slot Device slot
2205 * @v endpoint Endpoint
2206 * @v input Input context
2207 */
2208 static void
2209 xhci_deconfigure_endpoint_input ( struct xhci_device *xhci __unused,
2210 struct xhci_slot *slot __unused,
2211 struct xhci_endpoint *endpoint,
2212 void *input ) {
2213 struct xhci_control_context *control_ctx;
2214 struct xhci_slot_context *slot_ctx;
2215
2216 /* Populate control context */
2217 control_ctx = input;
2218 control_ctx->add = cpu_to_le32 ( 1 << XHCI_CTX_SLOT );
2219 control_ctx->drop = cpu_to_le32 ( 1 << endpoint->ctx );
2220
2221 /* Populate slot context */
2222 slot_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_SLOT ));
2223 slot_ctx->info = cpu_to_le32 ( XHCI_SLOT_INFO ( ( XHCI_CTX_END - 1 ),
2224 0, 0, 0 ) );
2225 }
2226
2227 /**
2228 * Deconfigure endpoint
2229 *
2230 * @v xhci xHCI device
2231 * @v slot Device slot
2232 * @v endpoint Endpoint
2233 * @ret rc Return status code
2234 */
2235 static inline int xhci_deconfigure_endpoint ( struct xhci_device *xhci,
2236 struct xhci_slot *slot,
2237 struct xhci_endpoint *endpoint ) {
2238 int rc;
2239
2240 /* Deconfigure endpoint */
2241 if ( ( rc = xhci_context ( xhci, slot, endpoint,
2242 XHCI_TRB_CONFIGURE_ENDPOINT,
2243 xhci_deconfigure_endpoint_input ) ) != 0 ) {
2244 DBGC ( xhci, "XHCI %s slot %d ctx %d could not deconfigure: "
2245 "%s\n", xhci->name, slot->id, endpoint->ctx,
2246 strerror ( rc ) );
2247 return rc;
2248 }
2249
2250 DBGC2 ( xhci, "XHCI %s slot %d ctx %d deconfigured\n",
2251 xhci->name, slot->id, endpoint->ctx );
2252 return 0;
2253 }
2254
2255 /**
2256 * Populate evaluate context input context
2257 *
2258 * @v xhci xHCI device
2259 * @v slot Device slot
2260 * @v endpoint Endpoint
2261 * @v input Input context
2262 */
2263 static void xhci_evaluate_context_input ( struct xhci_device *xhci,
2264 struct xhci_slot *slot __unused,
2265 struct xhci_endpoint *endpoint,
2266 void *input ) {
2267 struct xhci_control_context *control_ctx;
2268 struct xhci_slot_context *slot_ctx;
2269 struct xhci_endpoint_context *ep_ctx;
2270
2271 /* Populate control context */
2272 control_ctx = input;
2273 control_ctx->add = cpu_to_le32 ( ( 1 << XHCI_CTX_SLOT ) |
2274 ( 1 << endpoint->ctx ) );
2275
2276 /* Populate slot context */
2277 slot_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_SLOT ));
2278 slot_ctx->info = cpu_to_le32 ( XHCI_SLOT_INFO ( ( XHCI_CTX_END - 1 ),
2279 0, 0, 0 ) );
2280
2281 /* Populate endpoint context */
2282 ep_ctx = ( input + xhci_input_context_offset ( xhci, endpoint->ctx ) );
2283 ep_ctx->mtu = cpu_to_le16 ( endpoint->ep->mtu );
2284 }
2285
2286 /**
2287 * Evaluate context
2288 *
2289 * @v xhci xHCI device
2290 * @v slot Device slot
2291 * @v endpoint Endpoint
2292 * @ret rc Return status code
2293 */
2294 static inline int xhci_evaluate_context ( struct xhci_device *xhci,
2295 struct xhci_slot *slot,
2296 struct xhci_endpoint *endpoint ) {
2297 int rc;
2298
2299 /* Configure endpoint */
2300 if ( ( rc = xhci_context ( xhci, slot, endpoint,
2301 XHCI_TRB_EVALUATE_CONTEXT,
2302 xhci_evaluate_context_input ) ) != 0 ) {
2303 DBGC ( xhci, "XHCI %s slot %d ctx %d could not (re-)evaluate: "
2304 "%s\n", xhci->name, slot->id, endpoint->ctx,
2305 strerror ( rc ) );
2306 return rc;
2307 }
2308
2309 DBGC2 ( xhci, "XHCI %s slot %d ctx %d (re-)evaluated\n",
2310 xhci->name, slot->id, endpoint->ctx );
2311 return 0;
2312 }
2313
2314 /**
2315 * Reset endpoint
2316 *
2317 * @v xhci xHCI device
2318 * @v slot Device slot
2319 * @v endpoint Endpoint
2320 * @ret rc Return status code
2321 */
2322 static inline int xhci_reset_endpoint ( struct xhci_device *xhci,
2323 struct xhci_slot *slot,
2324 struct xhci_endpoint *endpoint ) {
2325 union xhci_trb trb;
2326 struct xhci_trb_reset_endpoint *reset = &trb.reset;
2327 int rc;
2328
2329 /* Construct command */
2330 memset ( reset, 0, sizeof ( *reset ) );
2331 reset->slot = slot->id;
2332 reset->endpoint = endpoint->ctx;
2333 reset->type = XHCI_TRB_RESET_ENDPOINT;
2334
2335 /* Issue command and wait for completion */
2336 if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) {
2337 DBGC ( xhci, "XHCI %s slot %d ctx %d could not reset endpoint "
2338 "in state %d: %s\n", xhci->name, slot->id, endpoint->ctx,
2339 endpoint->context->state, strerror ( rc ) );
2340 return rc;
2341 }
2342
2343 return 0;
2344 }
2345
2346 /**
2347 * Stop endpoint
2348 *
2349 * @v xhci xHCI device
2350 * @v slot Device slot
2351 * @v endpoint Endpoint
2352 * @ret rc Return status code
2353 */
2354 static inline int xhci_stop_endpoint ( struct xhci_device *xhci,
2355 struct xhci_slot *slot,
2356 struct xhci_endpoint *endpoint ) {
2357 union xhci_trb trb;
2358 struct xhci_trb_stop_endpoint *stop = &trb.stop;
2359 int rc;
2360
2361 /* Construct command */
2362 memset ( stop, 0, sizeof ( *stop ) );
2363 stop->slot = slot->id;
2364 stop->endpoint = endpoint->ctx;
2365 stop->type = XHCI_TRB_STOP_ENDPOINT;
2366
2367 /* Issue command and wait for completion */
2368 if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) {
2369 DBGC ( xhci, "XHCI %s slot %d ctx %d could not stop endpoint "
2370 "in state %d: %s\n", xhci->name, slot->id, endpoint->ctx,
2371 endpoint->context->state, strerror ( rc ) );
2372 return rc;
2373 }
2374
2375 return 0;
2376 }
2377
2378 /**
2379 * Set transfer ring dequeue pointer
2380 *
2381 * @v xhci xHCI device
2382 * @v slot Device slot
2383 * @v endpoint Endpoint
2384 * @ret rc Return status code
2385 */
2386 static inline int
2387 xhci_set_tr_dequeue_pointer ( struct xhci_device *xhci,
2388 struct xhci_slot *slot,
2389 struct xhci_endpoint *endpoint ) {
2390 union xhci_trb trb;
2391 struct xhci_trb_set_tr_dequeue_pointer *dequeue = &trb.dequeue;
2392 struct xhci_trb_ring *ring = &endpoint->ring;
2393 unsigned int cons;
2394 unsigned int mask;
2395 unsigned int index;
2396 unsigned int dcs;
2397 physaddr_t addr;
2398 int rc;
2399
2400 /* Construct command */
2401 memset ( dequeue, 0, sizeof ( *dequeue ) );
2402 cons = ring->cons;
2403 mask = ring->mask;
2404 dcs = ( ( ~( cons >> ring->shift ) ) & XHCI_EP_DCS );
2405 index = ( cons & mask );
2406 addr = dma ( &ring->map, &ring->trb[index] );
2407 dequeue->dequeue = cpu_to_le64 ( addr | dcs );
2408 dequeue->slot = slot->id;
2409 dequeue->endpoint = endpoint->ctx;
2410 dequeue->type = XHCI_TRB_SET_TR_DEQUEUE_POINTER;
2411
2412 /* Issue command and wait for completion */
2413 if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) {
2414 DBGC ( xhci, "XHCI %s slot %d ctx %d could not set TR dequeue "
2415 "pointer in state %d: %s\n", xhci->name, slot->id,
2416 endpoint->ctx, endpoint->context->state, strerror ( rc));
2417 return rc;
2418 }
2419
2420 return 0;
2421 }
2422
2423 /******************************************************************************
2424 *
2425 * Endpoint operations
2426 *
2427 ******************************************************************************
2428 */
2429
2430 /**
2431 * Open endpoint
2432 *
2433 * @v ep USB endpoint
2434 * @ret rc Return status code
2435 */
2436 static int xhci_endpoint_open ( struct usb_endpoint *ep ) {
2437 struct usb_device *usb = ep->usb;
2438 struct xhci_slot *slot = usb_get_hostdata ( usb );
2439 struct xhci_device *xhci = slot->xhci;
2440 struct xhci_endpoint *endpoint;
2441 unsigned int ctx;
2442 unsigned int type;
2443 unsigned int interval;
2444 int rc;
2445
2446 /* Calculate context index */
2447 ctx = XHCI_CTX ( ep->address );
2448 assert ( slot->endpoint[ctx] == NULL );
2449
2450 /* Calculate endpoint type */
2451 type = XHCI_EP_TYPE ( ep->attributes & USB_ENDPOINT_ATTR_TYPE_MASK );
2452 if ( type == XHCI_EP_TYPE ( USB_ENDPOINT_ATTR_CONTROL ) )
2453 type = XHCI_EP_TYPE_CONTROL;
2454 if ( ep->address & USB_DIR_IN )
2455 type |= XHCI_EP_TYPE_IN;
2456
2457 /* Calculate interval */
2458 if ( type & XHCI_EP_TYPE_PERIODIC ) {
2459 interval = ( fls ( ep->interval ) - 1 );
2460 } else {
2461 interval = ep->interval;
2462 }
2463
2464 /* Allocate and initialise structure */
2465 endpoint = zalloc ( sizeof ( *endpoint ) );
2466 if ( ! endpoint ) {
2467 rc = -ENOMEM;
2468 goto err_alloc;
2469 }
2470 usb_endpoint_set_hostdata ( ep, endpoint );
2471 slot->endpoint[ctx] = endpoint;
2472 endpoint->xhci = xhci;
2473 endpoint->slot = slot;
2474 endpoint->ep = ep;
2475 endpoint->ctx = ctx;
2476 endpoint->type = type;
2477 endpoint->interval = interval;
2478 endpoint->context = ( ( ( void * ) slot->context ) +
2479 xhci_device_context_offset ( xhci, ctx ) );
2480
2481 /* Allocate transfer ring */
2482 if ( ( rc = xhci_ring_alloc ( xhci, &endpoint->ring,
2483 XHCI_TRANSFER_TRBS_LOG2,
2484 slot->id, ctx, 0 ) ) != 0 )
2485 goto err_ring_alloc;
2486
2487 /* Configure endpoint, if applicable */
2488 if ( ( ctx != XHCI_CTX_EP0 ) &&
2489 ( ( rc = xhci_configure_endpoint ( xhci, slot, endpoint ) ) != 0 ))
2490 goto err_configure_endpoint;
2491
2492 DBGC2 ( xhci, "XHCI %s slot %d ctx %d ring [%08lx,%08lx)\n",
2493 xhci->name, slot->id, ctx, virt_to_phys ( endpoint->ring.trb ),
2494 ( virt_to_phys ( endpoint->ring.trb ) + endpoint->ring.len ) );
2495 return 0;
2496
2497 xhci_deconfigure_endpoint ( xhci, slot, endpoint );
2498 err_configure_endpoint:
2499 xhci_ring_free ( &endpoint->ring );
2500 err_ring_alloc:
2501 slot->endpoint[ctx] = NULL;
2502 free ( endpoint );
2503 err_alloc:
2504 return rc;
2505 }
2506
2507 /**
2508 * Close endpoint
2509 *
2510 * @v ep USB endpoint
2511 */
2512 static void xhci_endpoint_close ( struct usb_endpoint *ep ) {
2513 struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
2514 struct xhci_slot *slot = endpoint->slot;
2515 struct xhci_device *xhci = slot->xhci;
2516 struct io_buffer *iobuf;
2517 unsigned int ctx = endpoint->ctx;
2518
2519 /* Deconfigure endpoint, if applicable */
2520 if ( ctx != XHCI_CTX_EP0 )
2521 xhci_deconfigure_endpoint ( xhci, slot, endpoint );
2522
2523 /* Cancel any incomplete transfers */
2524 while ( xhci_ring_fill ( &endpoint->ring ) ) {
2525 iobuf = xhci_dequeue_multi ( &endpoint->ring );
2526 iob_unmap ( iobuf );
2527 usb_complete_err ( ep, iobuf, -ECANCELED );
2528 }
2529
2530 /* Free endpoint */
2531 xhci_ring_free ( &endpoint->ring );
2532 slot->endpoint[ctx] = NULL;
2533 free ( endpoint );
2534 }
2535
2536 /**
2537 * Reset endpoint
2538 *
2539 * @v ep USB endpoint
2540 * @ret rc Return status code
2541 */
2542 static int xhci_endpoint_reset ( struct usb_endpoint *ep ) {
2543 struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
2544 struct xhci_slot *slot = endpoint->slot;
2545 struct xhci_device *xhci = slot->xhci;
2546 int rc;
2547
2548 /* Reset endpoint context */
2549 if ( ( rc = xhci_reset_endpoint ( xhci, slot, endpoint ) ) != 0 )
2550 return rc;
2551
2552 /* Set transfer ring dequeue pointer */
2553 if ( ( rc = xhci_set_tr_dequeue_pointer ( xhci, slot, endpoint ) ) != 0)
2554 return rc;
2555
2556 /* Ring doorbell to resume processing */
2557 xhci_doorbell ( &endpoint->ring );
2558
2559 DBGC ( xhci, "XHCI %s slot %d ctx %d reset\n",
2560 xhci->name, slot->id, endpoint->ctx );
2561 return 0;
2562 }
2563
2564 /**
2565 * Update MTU
2566 *
2567 * @v ep USB endpoint
2568 * @ret rc Return status code
2569 */
2570 static int xhci_endpoint_mtu ( struct usb_endpoint *ep ) {
2571 struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
2572 struct xhci_slot *slot = endpoint->slot;
2573 struct xhci_device *xhci = slot->xhci;
2574 int rc;
2575
2576 /* Evalulate context */
2577 if ( ( rc = xhci_evaluate_context ( xhci, slot, endpoint ) ) != 0 )
2578 return rc;
2579
2580 return 0;
2581 }
2582
2583 /**
2584 * Enqueue message transfer
2585 *
2586 * @v ep USB endpoint
2587 * @v iobuf I/O buffer
2588 * @ret rc Return status code
2589 */
2590 static int xhci_endpoint_message ( struct usb_endpoint *ep,
2591 struct io_buffer *iobuf ) {
2592 struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
2593 struct xhci_device *xhci = endpoint->xhci;
2594 struct usb_setup_packet *packet;
2595 unsigned int input;
2596 size_t len;
2597 union xhci_trb trbs[ 1 /* setup */ + 1 /* possible data */ +
2598 1 /* status */ ];
2599 union xhci_trb *trb = trbs;
2600 struct xhci_trb_setup *setup;
2601 struct xhci_trb_data *data;
2602 struct xhci_trb_status *status;
2603 int rc;
2604
2605 /* Profile message transfers */
2606 profile_start ( &xhci_message_profiler );
2607
2608 /* Construct setup stage TRB */
2609 memset ( trbs, 0, sizeof ( trbs ) );
2610 assert ( iob_len ( iobuf ) >= sizeof ( *packet ) );
2611 packet = iobuf->data;
2612 iob_pull ( iobuf, sizeof ( *packet ) );
2613 setup = &(trb++)->setup;
2614 memcpy ( &setup->packet, packet, sizeof ( setup->packet ) );
2615 setup->len = cpu_to_le32 ( sizeof ( *packet ) );
2616 setup->flags = XHCI_TRB_IDT;
2617 setup->type = XHCI_TRB_SETUP;
2618 len = iob_len ( iobuf );
2619 input = ( packet->request & cpu_to_le16 ( USB_DIR_IN ) );
2620 if ( len )
2621 setup->direction = ( input ? XHCI_SETUP_IN : XHCI_SETUP_OUT );
2622
2623 /* Map I/O buffer */
2624 if ( ( rc = iob_map ( iobuf, xhci->dma, len,
2625 ( input ? DMA_RX : DMA_TX ) ) ) != 0 )
2626 goto err_map;
2627
2628 /* Construct data stage TRB, if applicable */
2629 if ( len ) {
2630 data = &(trb++)->data;
2631 data->data = cpu_to_le64 ( iob_dma ( iobuf ) );
2632 data->len = cpu_to_le32 ( len );
2633 data->type = XHCI_TRB_DATA;
2634 data->direction = ( input ? XHCI_DATA_IN : XHCI_DATA_OUT );
2635 }
2636
2637 /* Construct status stage TRB */
2638 status = &(trb++)->status;
2639 status->flags = XHCI_TRB_IOC;
2640 status->type = XHCI_TRB_STATUS;
2641 status->direction =
2642 ( ( len && input ) ? XHCI_STATUS_OUT : XHCI_STATUS_IN );
2643
2644 /* Enqueue TRBs */
2645 if ( ( rc = xhci_enqueue_multi ( &endpoint->ring, iobuf, trbs,
2646 ( trb - trbs ) ) ) != 0 )
2647 goto err_enqueue;
2648
2649 /* Ring the doorbell */
2650 xhci_doorbell ( &endpoint->ring );
2651
2652 profile_stop ( &xhci_message_profiler );
2653 return 0;
2654
2655 err_enqueue:
2656 iob_unmap ( iobuf );
2657 err_map:
2658 return rc;
2659 }
2660
2661 /**
2662 * Calculate number of TRBs
2663 *
2664 * @v len Length of data
2665 * @v zlp Append a zero-length packet
2666 * @ret count Number of transfer descriptors
2667 */
2668 static unsigned int xhci_endpoint_count ( size_t len, int zlp ) {
2669 unsigned int count;
2670
2671 /* Split into 64kB TRBs */
2672 count = ( ( len + XHCI_MTU - 1 ) / XHCI_MTU );
2673
2674 /* Append a zero-length TRB if applicable */
2675 if ( zlp || ( count == 0 ) )
2676 count++;
2677
2678 return count;
2679 }
2680
2681 /**
2682 * Enqueue stream transfer
2683 *
2684 * @v ep USB endpoint
2685 * @v iobuf I/O buffer
2686 * @v zlp Append a zero-length packet
2687 * @ret rc Return status code
2688 */
2689 static int xhci_endpoint_stream ( struct usb_endpoint *ep,
2690 struct io_buffer *iobuf, int zlp ) {
2691 struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
2692 struct xhci_device *xhci = endpoint->xhci;
2693 size_t len = iob_len ( iobuf );
2694 unsigned int count = xhci_endpoint_count ( len, zlp );
2695 union xhci_trb trbs[count];
2696 union xhci_trb *trb = trbs;
2697 struct xhci_trb_normal *normal;
2698 physaddr_t data;
2699 unsigned int i;
2700 size_t trb_len;
2701 int rc;
2702
2703 /* Profile stream transfers */
2704 profile_start ( &xhci_stream_profiler );
2705
2706 /* Map I/O buffer */
2707 if ( ( rc = iob_map ( iobuf, xhci->dma, len,
2708 ( ( ep->address & USB_DIR_IN ) ?
2709 DMA_RX : DMA_TX ) ) ) != 0 )
2710 goto err_map;
2711 data = iob_dma ( iobuf );
2712
2713 /* Construct normal TRBs */
2714 memset ( &trbs, 0, sizeof ( trbs ) );
2715 for ( i = 0 ; i < count ; i ++ ) {
2716
2717 /* Calculate TRB length */
2718 trb_len = XHCI_MTU;
2719 if ( trb_len > len )
2720 trb_len = len;
2721
2722 /* Construct normal TRB */
2723 normal = &trb->normal;
2724 normal->data = cpu_to_le64 ( data );
2725 normal->len = cpu_to_le32 ( trb_len );
2726 normal->type = XHCI_TRB_NORMAL;
2727 normal->flags = XHCI_TRB_CH;
2728
2729 /* Move to next TRB */
2730 data += trb_len;
2731 len -= trb_len;
2732 trb++;
2733 }
2734
2735 /* Mark zero-length packet (if present) as a separate transfer */
2736 if ( zlp && ( count > 1 ) )
2737 trb[-2].normal.flags = 0;
2738
2739 /* Generate completion for final TRB */
2740 trb[-1].normal.flags = XHCI_TRB_IOC;
2741
2742 /* Enqueue TRBs */
2743 if ( ( rc = xhci_enqueue_multi ( &endpoint->ring, iobuf, trbs,
2744 count ) ) != 0 )
2745 goto err_enqueue;
2746
2747 /* Ring the doorbell */
2748 xhci_doorbell ( &endpoint->ring );
2749
2750 profile_stop ( &xhci_stream_profiler );
2751 return 0;
2752
2753 err_enqueue:
2754 iob_unmap ( iobuf );
2755 err_map:
2756 return rc;
2757 }
2758
2759 /******************************************************************************
2760 *
2761 * Device operations
2762 *
2763 ******************************************************************************
2764 */
2765
2766 /**
2767 * Open device
2768 *
2769 * @v usb USB device
2770 * @ret rc Return status code
2771 */
2772 static int xhci_device_open ( struct usb_device *usb ) {
2773 struct xhci_device *xhci = usb_bus_get_hostdata ( usb->port->hub->bus );
2774 struct usb_port *tt = usb_transaction_translator ( usb );
2775 struct xhci_slot *slot;
2776 struct xhci_slot *tt_slot;
2777 size_t len;
2778 int type;
2779 int id;
2780 int rc;
2781
2782 /* Determine applicable slot type */
2783 type = xhci_port_slot_type ( xhci, usb->port->address );
2784 if ( type < 0 ) {
2785 rc = type;
2786 DBGC ( xhci, "XHCI %s-%d has no slot type\n",
2787 xhci->name, usb->port->address );
2788 goto err_type;
2789 }
2790
2791 /* Allocate a device slot number */
2792 id = xhci_enable_slot ( xhci, type );
2793 if ( id < 0 ) {
2794 rc = id;
2795 goto err_enable_slot;
2796 }
2797 assert ( ( id > 0 ) && ( ( unsigned int ) id <= xhci->slots ) );
2798 assert ( xhci->slot[id] == NULL );
2799
2800 /* Allocate and initialise structure */
2801 slot = zalloc ( sizeof ( *slot ) );
2802 if ( ! slot ) {
2803 rc = -ENOMEM;
2804 goto err_alloc;
2805 }
2806 usb_set_hostdata ( usb, slot );
2807 xhci->slot[id] = slot;
2808 slot->xhci = xhci;
2809 slot->usb = usb;
2810 slot->id = id;
2811 if ( tt ) {
2812 tt_slot = usb_get_hostdata ( tt->hub->usb );
2813 slot->tt_id = tt_slot->id;
2814 slot->tt_port = tt->address;
2815 }
2816
2817 /* Allocate a device context */
2818 len = xhci_device_context_offset ( xhci, XHCI_CTX_END );
2819 slot->context = dma_alloc ( xhci->dma, &slot->map, len,
2820 xhci_align ( len ) );
2821 if ( ! slot->context ) {
2822 rc = -ENOMEM;
2823 goto err_alloc_context;
2824 }
2825 memset ( slot->context, 0, len );
2826
2827 /* Set device context base address */
2828 assert ( xhci->dcbaa.context[id] == 0 );
2829 xhci->dcbaa.context[id] = cpu_to_le64 ( dma ( &slot->map,
2830 slot->context ) );
2831
2832 DBGC2 ( xhci, "XHCI %s slot %d device context [%08lx,%08lx) for %s\n",
2833 xhci->name, slot->id, virt_to_phys ( slot->context ),
2834 ( virt_to_phys ( slot->context ) + len ), usb->name );
2835 return 0;
2836
2837 xhci->dcbaa.context[id] = 0;
2838 dma_free ( &slot->map, slot->context, len );
2839 err_alloc_context:
2840 xhci->slot[id] = NULL;
2841 free ( slot );
2842 err_alloc:
2843 xhci_disable_slot ( xhci, id );
2844 err_enable_slot:
2845 err_type:
2846 return rc;
2847 }
2848
2849 /**
2850 * Close device
2851 *
2852 * @v usb USB device
2853 */
2854 static void xhci_device_close ( struct usb_device *usb ) {
2855 struct xhci_slot *slot = usb_get_hostdata ( usb );
2856 struct xhci_device *xhci = slot->xhci;
2857 size_t len = xhci_device_context_offset ( xhci, XHCI_CTX_END );
2858 unsigned int id = slot->id;
2859 int rc;
2860
2861 /* Disable slot */
2862 if ( ( rc = xhci_disable_slot ( xhci, id ) ) != 0 ) {
2863 /* Slot is still enabled. Leak the slot context,
2864 * since the controller may still write to this
2865 * memory, and leave the DCBAA entry intact.
2866 *
2867 * If the controller later reports that this same slot
2868 * has been re-enabled, then some assertions will be
2869 * triggered.
2870 */
2871 DBGC ( xhci, "XHCI %s slot %d leaking context memory\n",
2872 xhci->name, slot->id );
2873 slot->context = NULL;
2874 }
2875
2876 /* Free slot */
2877 if ( slot->context ) {
2878 dma_free ( &slot->map, slot->context, len );
2879 xhci->dcbaa.context[id] = 0;
2880 }
2881 xhci->slot[id] = NULL;
2882 free ( slot );
2883 }
2884
2885 /**
2886 * Assign device address
2887 *
2888 * @v usb USB device
2889 * @ret rc Return status code
2890 */
2891 static int xhci_device_address ( struct usb_device *usb ) {
2892 struct xhci_slot *slot = usb_get_hostdata ( usb );
2893 struct xhci_device *xhci = slot->xhci;
2894 struct usb_port *root_port;
2895 int psiv;
2896 int rc;
2897
2898 /* Calculate route string */
2899 slot->route = usb_route_string ( usb );
2900
2901 /* Calculate root hub port number */
2902 root_port = usb_root_hub_port ( usb );
2903 slot->port = root_port->address;
2904
2905 /* Calculate protocol speed ID */
2906 psiv = xhci_port_psiv ( xhci, slot->port, usb->speed );
2907 if ( psiv < 0 ) {
2908 rc = psiv;
2909 return rc;
2910 }
2911 slot->psiv = psiv;
2912
2913 /* Address device */
2914 if ( ( rc = xhci_address_device ( xhci, slot ) ) != 0 )
2915 return rc;
2916
2917 return 0;
2918 }
2919
2920 /******************************************************************************
2921 *
2922 * Bus operations
2923 *
2924 ******************************************************************************
2925 */
2926
2927 /**
2928 * Open USB bus
2929 *
2930 * @v bus USB bus
2931 * @ret rc Return status code
2932 */
2933 static int xhci_bus_open ( struct usb_bus *bus ) {
2934 struct xhci_device *xhci = usb_bus_get_hostdata ( bus );
2935 int rc;
2936
2937 /* Allocate device slot array */
2938 xhci->slot = zalloc ( ( xhci->slots + 1 ) * sizeof ( xhci->slot[0] ) );
2939 if ( ! xhci->slot ) {
2940 rc = -ENOMEM;
2941 goto err_slot_alloc;
2942 }
2943
2944 /* Allocate device context base address array */
2945 if ( ( rc = xhci_dcbaa_alloc ( xhci ) ) != 0 )
2946 goto err_dcbaa_alloc;
2947
2948 /* Allocate scratchpad buffers */
2949 if ( ( rc = xhci_scratchpad_alloc ( xhci ) ) != 0 )
2950 goto err_scratchpad_alloc;
2951
2952 /* Allocate command ring */
2953 if ( ( rc = xhci_command_alloc ( xhci ) ) != 0 )
2954 goto err_command_alloc;
2955
2956 /* Allocate event ring */
2957 if ( ( rc = xhci_event_alloc ( xhci ) ) != 0 )
2958 goto err_event_alloc;
2959
2960 /* Start controller */
2961 xhci_run ( xhci );
2962
2963 return 0;
2964
2965 xhci_stop ( xhci );
2966 xhci_event_free ( xhci );
2967 err_event_alloc:
2968 xhci_command_free ( xhci );
2969 err_command_alloc:
2970 xhci_scratchpad_free ( xhci );
2971 err_scratchpad_alloc:
2972 xhci_dcbaa_free ( xhci );
2973 err_dcbaa_alloc:
2974 free ( xhci->slot );
2975 err_slot_alloc:
2976 return rc;
2977 }
2978
2979 /**
2980 * Close USB bus
2981 *
2982 * @v bus USB bus
2983 */
2984 static void xhci_bus_close ( struct usb_bus *bus ) {
2985 struct xhci_device *xhci = usb_bus_get_hostdata ( bus );
2986 unsigned int i;
2987
2988 /* Sanity checks */
2989 assert ( xhci->slot != NULL );
2990 for ( i = 0 ; i <= xhci->slots ; i++ )
2991 assert ( xhci->slot[i] == NULL );
2992
2993 xhci_stop ( xhci );
2994 xhci_event_free ( xhci );
2995 xhci_command_free ( xhci );
2996 xhci_scratchpad_free ( xhci );
2997 xhci_dcbaa_free ( xhci );
2998 free ( xhci->slot );
2999 }
3000
3001 /**
3002 * Poll USB bus
3003 *
3004 * @v bus USB bus
3005 */
3006 static void xhci_bus_poll ( struct usb_bus *bus ) {
3007 struct xhci_device *xhci = usb_bus_get_hostdata ( bus );
3008
3009 /* Poll event ring */
3010 xhci_event_poll ( xhci );
3011 }
3012
3013 /******************************************************************************
3014 *
3015 * Hub operations
3016 *
3017 ******************************************************************************
3018 */
3019
3020 /**
3021 * Open hub
3022 *
3023 * @v hub USB hub
3024 * @ret rc Return status code
3025 */
3026 static int xhci_hub_open ( struct usb_hub *hub ) {
3027 struct xhci_slot *slot;
3028
3029 /* Do nothing if this is the root hub */
3030 if ( ! hub->usb )
3031 return 0;
3032
3033 /* Get device slot */
3034 slot = usb_get_hostdata ( hub->usb );
3035
3036 /* Update device slot hub parameters. We don't inform the
3037 * hardware of this information until the hub's interrupt
3038 * endpoint is opened, since the only mechanism for so doing
3039 * provided by the xHCI specification is a Configure Endpoint
3040 * command, and we can't issue that command until we have a
3041 * non-EP0 endpoint to configure.
3042 */
3043 slot->ports = hub->ports;
3044
3045 return 0;
3046 }
3047
3048 /**
3049 * Close hub
3050 *
3051 * @v hub USB hub
3052 */
3053 static void xhci_hub_close ( struct usb_hub *hub __unused ) {
3054
3055 /* Nothing to do */
3056 }
3057
3058 /******************************************************************************
3059 *
3060 * Root hub operations
3061 *
3062 ******************************************************************************
3063 */
3064
3065 /**
3066 * Open root hub
3067 *
3068 * @v hub USB hub
3069 * @ret rc Return status code
3070 */
3071 static int xhci_root_open ( struct usb_hub *hub ) {
3072 struct xhci_device *xhci = usb_hub_get_drvdata ( hub );
3073 struct usb_port *port;
3074 uint32_t portsc;
3075 unsigned int i;
3076
3077 /* Enable power to all ports */
3078 for ( i = 1 ; i <= xhci->ports ; i++ ) {
3079 portsc = readl ( xhci->op + XHCI_OP_PORTSC ( i ) );
3080 portsc &= XHCI_PORTSC_PRESERVE;
3081 portsc |= XHCI_PORTSC_PP;
3082 writel ( portsc, xhci->op + XHCI_OP_PORTSC ( i ) );
3083 }
3084
3085 /* xHCI spec requires us to potentially wait 20ms after
3086 * enabling power to a port.
3087 */
3088 mdelay ( XHCI_PORT_POWER_DELAY_MS );
3089
3090 /* USB3 ports may power up as Disabled */
3091 for ( i = 1 ; i <= xhci->ports ; i++ ) {
3092 portsc = readl ( xhci->op + XHCI_OP_PORTSC ( i ) );
3093 port = usb_port ( hub, i );
3094 if ( ( port->protocol >= USB_PROTO_3_0 ) &&
3095 ( ( portsc & XHCI_PORTSC_PLS_MASK ) ==
3096 XHCI_PORTSC_PLS_DISABLED ) ) {
3097 /* Force link state to RxDetect */
3098 portsc &= XHCI_PORTSC_PRESERVE;
3099 portsc |= ( XHCI_PORTSC_PLS_RXDETECT | XHCI_PORTSC_LWS);
3100 writel ( portsc, xhci->op + XHCI_OP_PORTSC ( i ) );
3101 }
3102 }
3103
3104 /* Some xHCI cards seem to require an additional delay after
3105 * setting the link state to RxDetect.
3106 */
3107 mdelay ( XHCI_LINK_STATE_DELAY_MS );
3108
3109 return 0;
3110 }
3111
3112 /**
3113 * Close root hub
3114 *
3115 * @v hub USB hub
3116 */
3117 static void xhci_root_close ( struct usb_hub *hub __unused ) {
3118
3119 /* Nothing to do */
3120 }
3121
3122 /**
3123 * Enable port
3124 *
3125 * @v hub USB hub
3126 * @v port USB port
3127 * @ret rc Return status code
3128 */
3129 static int xhci_root_enable ( struct usb_hub *hub, struct usb_port *port ) {
3130 struct xhci_device *xhci = usb_hub_get_drvdata ( hub );
3131 uint32_t portsc;
3132 unsigned int i;
3133
3134 /* Reset port */
3135 portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port->address ) );
3136 portsc &= XHCI_PORTSC_PRESERVE;
3137 portsc |= XHCI_PORTSC_PR;
3138 writel ( portsc, xhci->op + XHCI_OP_PORTSC ( port->address ) );
3139
3140 /* Wait for port to become enabled */
3141 for ( i = 0 ; i < XHCI_PORT_RESET_MAX_WAIT_MS ; i++ ) {
3142
3143 /* Check port status */
3144 portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port->address ) );
3145 if ( portsc & XHCI_PORTSC_PED )
3146 return 0;
3147
3148 /* Delay */
3149 mdelay ( 1 );
3150 }
3151
3152 DBGC ( xhci, "XHCI %s-%d timed out waiting for port to enable\n",
3153 xhci->name, port->address );
3154 return -ETIMEDOUT;
3155 }
3156
3157 /**
3158 * Disable port
3159 *
3160 * @v hub USB hub
3161 * @v port USB port
3162 * @ret rc Return status code
3163 */
3164 static int xhci_root_disable ( struct usb_hub *hub, struct usb_port *port ) {
3165 struct xhci_device *xhci = usb_hub_get_drvdata ( hub );
3166 uint32_t portsc;
3167
3168 /* Disable port */
3169 portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port->address ) );
3170 portsc &= XHCI_PORTSC_PRESERVE;
3171 portsc |= XHCI_PORTSC_PED;
3172 writel ( portsc, xhci->op + XHCI_OP_PORTSC ( port->address ) );
3173
3174 /* Allow time for link state to stabilise */
3175 mdelay ( XHCI_LINK_STATE_DELAY_MS );
3176
3177 /* Set link state to RxDetect for USB3 ports */
3178 if ( port->protocol >= USB_PROTO_3_0 ) {
3179 portsc &= XHCI_PORTSC_PRESERVE;
3180 portsc |= ( XHCI_PORTSC_PLS_RXDETECT | XHCI_PORTSC_LWS );
3181 writel ( portsc, xhci->op + XHCI_OP_PORTSC ( port->address ) );
3182 }
3183
3184 /* Allow time for link state to stabilise */
3185 mdelay ( XHCI_LINK_STATE_DELAY_MS );
3186
3187 return 0;
3188 }
3189
3190 /**
3191 * Update root hub port speed
3192 *
3193 * @v hub USB hub
3194 * @v port USB port
3195 * @ret rc Return status code
3196 */
3197 static int xhci_root_speed ( struct usb_hub *hub, struct usb_port *port ) {
3198 struct xhci_device *xhci = usb_hub_get_drvdata ( hub );
3199 uint32_t portsc;
3200 unsigned int psiv;
3201 int ccs;
3202 int ped;
3203 int csc;
3204 int speed;
3205 int rc;
3206
3207 /* Read port status */
3208 portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port->address ) );
3209 DBGC2 ( xhci, "XHCI %s-%d status is %08x\n",
3210 xhci->name, port->address, portsc );
3211 ccs = ( portsc & XHCI_PORTSC_CCS );
3212 ped = ( portsc & XHCI_PORTSC_PED );
3213 csc = ( portsc & XHCI_PORTSC_CSC );
3214 psiv = XHCI_PORTSC_PSIV ( portsc );
3215
3216 /* Record disconnections and clear changes */
3217 port->disconnected |= csc;
3218 portsc &= ( XHCI_PORTSC_PRESERVE | XHCI_PORTSC_CHANGE );
3219 writel ( portsc, xhci->op + XHCI_OP_PORTSC ( port->address ) );
3220
3221 /* Port speed is not valid unless port is connected */
3222 if ( ! ccs ) {
3223 port->speed = USB_SPEED_NONE;
3224 return 0;
3225 }
3226
3227 /* For USB2 ports, the PSIV field is not valid until the port
3228 * completes reset and becomes enabled.
3229 */
3230 if ( ( port->protocol < USB_PROTO_3_0 ) && ! ped ) {
3231 port->speed = USB_SPEED_FULL;
3232 return 0;
3233 }
3234
3235 /* Get port speed and map to generic USB speed */
3236 speed = xhci_port_speed ( xhci, port->address, psiv );
3237 if ( speed < 0 ) {
3238 rc = speed;
3239 return rc;
3240 }
3241
3242 port->speed = speed;
3243 return 0;
3244 }
3245
3246 /**
3247 * Clear transaction translator buffer
3248 *
3249 * @v hub USB hub
3250 * @v port USB port
3251 * @v ep USB endpoint
3252 * @ret rc Return status code
3253 */
3254 static int xhci_root_clear_tt ( struct usb_hub *hub, struct usb_port *port,
3255 struct usb_endpoint *ep ) {
3256 struct xhci_device *xhci = usb_hub_get_drvdata ( hub );
3257
3258 /* Should never be called; this is a root hub */
3259 DBGC ( xhci, "XHCI %s-%d nonsensical CLEAR_TT for %s %s\n", xhci->name,
3260 port->address, ep->usb->name, usb_endpoint_name ( ep ) );
3261
3262 return -ENOTSUP;
3263 }
3264
3265 /******************************************************************************
3266 *
3267 * PCI interface
3268 *
3269 ******************************************************************************
3270 */
3271
3272 /** USB host controller operations */
3273 static struct usb_host_operations xhci_operations = {
3274 .endpoint = {
3275 .open = xhci_endpoint_open,
3276 .close = xhci_endpoint_close,
3277 .reset = xhci_endpoint_reset,
3278 .mtu = xhci_endpoint_mtu,
3279 .message = xhci_endpoint_message,
3280 .stream = xhci_endpoint_stream,
3281 },
3282 .device = {
3283 .open = xhci_device_open,
3284 .close = xhci_device_close,
3285 .address = xhci_device_address,
3286 },
3287 .bus = {
3288 .open = xhci_bus_open,
3289 .close = xhci_bus_close,
3290 .poll = xhci_bus_poll,
3291 },
3292 .hub = {
3293 .open = xhci_hub_open,
3294 .close = xhci_hub_close,
3295 },
3296 .root = {
3297 .open = xhci_root_open,
3298 .close = xhci_root_close,
3299 .enable = xhci_root_enable,
3300 .disable = xhci_root_disable,
3301 .speed = xhci_root_speed,
3302 .clear_tt = xhci_root_clear_tt,
3303 },
3304 };
3305
3306 /**
3307 * Fix Intel PCH-specific quirks
3308 *
3309 * @v xhci xHCI device
3310 * @v pci PCI device
3311 */
3312 static void xhci_pch_fix ( struct xhci_device *xhci, struct pci_device *pci ) {
3313 struct xhci_pch *pch = &xhci->pch;
3314 uint32_t xusb2pr;
3315 uint32_t xusb2prm;
3316 uint32_t usb3pssen;
3317 uint32_t usb3prm;
3318
3319 /* Enable SuperSpeed capability. Do this before rerouting
3320 * USB2 ports, so that USB3 devices connect at SuperSpeed.
3321 */
3322 pci_read_config_dword ( pci, XHCI_PCH_USB3PSSEN, &usb3pssen );
3323 pci_read_config_dword ( pci, XHCI_PCH_USB3PRM, &usb3prm );
3324 if ( usb3prm & ~usb3pssen ) {
3325 DBGC ( xhci, "XHCI %s enabling SuperSpeed on ports %08x\n",
3326 xhci->name, ( usb3prm & ~usb3pssen ) );
3327 }
3328 pch->usb3pssen = usb3pssen;
3329 usb3pssen |= usb3prm;
3330 pci_write_config_dword ( pci, XHCI_PCH_USB3PSSEN, usb3pssen );
3331
3332 /* Route USB2 ports from EHCI to xHCI */
3333 pci_read_config_dword ( pci, XHCI_PCH_XUSB2PR, &xusb2pr );
3334 pci_read_config_dword ( pci, XHCI_PCH_XUSB2PRM, &xusb2prm );
3335 if ( xusb2prm & ~xusb2pr ) {
3336 DBGC ( xhci, "XHCI %s routing ports %08x from EHCI to xHCI\n",
3337 xhci->name, ( xusb2prm & ~xusb2pr ) );
3338 }
3339 pch->xusb2pr = xusb2pr;
3340 xusb2pr |= xusb2prm;
3341 pci_write_config_dword ( pci, XHCI_PCH_XUSB2PR, xusb2pr );
3342 }
3343
3344 /**
3345 * Undo Intel PCH-specific quirk fixes
3346 *
3347 * @v xhci xHCI device
3348 * @v pci PCI device
3349 */
3350 static void xhci_pch_undo ( struct xhci_device *xhci, struct pci_device *pci ) {
3351 struct xhci_pch *pch = &xhci->pch;
3352
3353 /* Restore USB2 port routing to original state */
3354 pci_write_config_dword ( pci, XHCI_PCH_XUSB2PR, pch->xusb2pr );
3355
3356 /* Restore SuperSpeed capability to original state */
3357 pci_write_config_dword ( pci, XHCI_PCH_USB3PSSEN, pch->usb3pssen );
3358 }
3359
3360 /**
3361 * Probe PCI device
3362 *
3363 * @v pci PCI device
3364 * @ret rc Return status code
3365 */
3366 static int xhci_probe ( struct pci_device *pci ) {
3367 struct xhci_device *xhci;
3368 struct usb_port *port;
3369 unsigned long bar_start;
3370 size_t bar_size;
3371 unsigned int i;
3372 int rc;
3373
3374 /* Allocate and initialise structure */
3375 xhci = zalloc ( sizeof ( *xhci ) );
3376 if ( ! xhci ) {
3377 rc = -ENOMEM;
3378 goto err_alloc;
3379 }
3380 xhci->name = pci->dev.name;
3381 xhci->quirks = pci->id->driver_data;
3382
3383 /* Fix up PCI device */
3384 adjust_pci_device ( pci );
3385
3386 /* Map registers */
3387 bar_start = pci_bar_start ( pci, XHCI_BAR );
3388 bar_size = pci_bar_size ( pci, XHCI_BAR );
3389 xhci->regs = pci_ioremap ( pci, bar_start, bar_size );
3390 if ( ! xhci->regs ) {
3391 rc = -ENODEV;
3392 goto err_ioremap;
3393 }
3394
3395 /* Initialise xHCI device */
3396 xhci_init ( xhci, xhci->regs );
3397
3398 /* Configure DMA device */
3399 xhci->dma = &pci->dma;
3400 if ( xhci->addr64 )
3401 dma_set_mask_64bit ( xhci->dma );
3402
3403 /* Initialise USB legacy support and claim ownership */
3404 xhci_legacy_init ( xhci );
3405 xhci_legacy_claim ( xhci );
3406
3407 /* Fix Intel PCH-specific quirks, if applicable */
3408 if ( xhci->quirks & XHCI_PCH )
3409 xhci_pch_fix ( xhci, pci );
3410
3411 /* Reset device */
3412 if ( ( rc = xhci_reset ( xhci ) ) != 0 )
3413 goto err_reset;
3414
3415 /* Allocate USB bus */
3416 xhci->bus = alloc_usb_bus ( &pci->dev, xhci->ports, XHCI_MTU,
3417 &xhci_operations );
3418 if ( ! xhci->bus ) {
3419 rc = -ENOMEM;
3420 goto err_alloc_bus;
3421 }
3422 usb_bus_set_hostdata ( xhci->bus, xhci );
3423 usb_hub_set_drvdata ( xhci->bus->hub, xhci );
3424
3425 /* Set port protocols */
3426 for ( i = 1 ; i <= xhci->ports ; i++ ) {
3427 port = usb_port ( xhci->bus->hub, i );
3428 port->protocol = xhci_port_protocol ( xhci, i );
3429 }
3430
3431 /* Register USB bus */
3432 if ( ( rc = register_usb_bus ( xhci->bus ) ) != 0 )
3433 goto err_register;
3434
3435 pci_set_drvdata ( pci, xhci );
3436 return 0;
3437
3438 unregister_usb_bus ( xhci->bus );
3439 err_register:
3440 free_usb_bus ( xhci->bus );
3441 err_alloc_bus:
3442 xhci_reset ( xhci );
3443 err_reset:
3444 if ( xhci->quirks & XHCI_PCH )
3445 xhci_pch_undo ( xhci, pci );
3446 xhci_legacy_release ( xhci );
3447 iounmap ( xhci->regs );
3448 err_ioremap:
3449 free ( xhci );
3450 err_alloc:
3451 return rc;
3452 }
3453
3454 /**
3455 * Remove PCI device
3456 *
3457 * @v pci PCI device
3458 */
3459 static void xhci_remove ( struct pci_device *pci ) {
3460 struct xhci_device *xhci = pci_get_drvdata ( pci );
3461 struct usb_bus *bus = xhci->bus;
3462 uint16_t command;
3463
3464 /* Some systems are observed to disable bus mastering on
3465 * Thunderbolt controllers before we get a chance to shut
3466 * down. Detect this and avoid attempting any DMA operations,
3467 * which are guaranteed to fail and may end up spuriously
3468 * completing after the operating system kernel starts up.
3469 */
3470 pci_read_config_word ( pci, PCI_COMMAND, &command );
3471 if ( ! ( command & PCI_COMMAND_MASTER ) ) {
3472 DBGC ( xhci, "XHCI %s DMA was disabled\n", xhci->name );
3473 xhci_fail ( xhci );
3474 }
3475
3476 /* Unregister and free USB bus */
3477 unregister_usb_bus ( bus );
3478 free_usb_bus ( bus );
3479
3480 /* Reset device and undo any PCH-specific fixes */
3481 xhci_reset ( xhci );
3482 if ( xhci->quirks & XHCI_PCH )
3483 xhci_pch_undo ( xhci, pci );
3484
3485 /* Release ownership back to BIOS */
3486 xhci_legacy_release ( xhci );
3487
3488 /* Unmap registers */
3489 iounmap ( xhci->regs );
3490
3491 /* Free device */
3492 free ( xhci );
3493 }
3494
3495 /** XHCI PCI device IDs */
3496 static struct pci_device_id xhci_ids[] = {
3497 PCI_ROM ( 0x8086, 0x9d2f, "xhci-skylake", "xHCI (Skylake)", ( XHCI_PCH | XHCI_BAD_PSIV ) ),
3498 PCI_ROM ( 0x8086, 0xffff, "xhci-pch", "xHCI (Intel PCH)", XHCI_PCH ),
3499 PCI_ROM ( 0xffff, 0xffff, "xhci", "xHCI", 0 ),
3500 };
3501
3502 /** XHCI PCI driver */
3503 struct pci_driver xhci_driver __pci_driver = {
3504 .ids = xhci_ids,
3505 .id_count = ( sizeof ( xhci_ids ) / sizeof ( xhci_ids[0] ) ),
3506 .class = PCI_CLASS_ID ( PCI_CLASS_SERIAL, PCI_CLASS_SERIAL_USB,
3507 PCI_CLASS_SERIAL_USB_XHCI ),
3508 .probe = xhci_probe,
3509 .remove = xhci_remove,
3510 };
3511
3512 /**
3513 * Prepare for exit
3514 *
3515 * @v booting System is shutting down for OS boot
3516 */
3517 static void xhci_shutdown ( int booting ) {
3518 /* If we are shutting down to boot an OS, then prevent the
3519 * release of ownership back to BIOS.
3520 */
3521 xhci_legacy_prevent_release = booting;
3522 }
3523
3524 /** Startup/shutdown function */
3525 struct startup_fn xhci_startup __startup_fn ( STARTUP_LATE ) = {
3526 .name = "xhci",
3527 .shutdown = xhci_shutdown,
3528 };