[efi] Add a USB host controller driver based on EFI_USB_IO_PROTOCOL
[ipxe.git] / src / drivers / usb / xhci.c
1 /*
2 * Copyright (C) 2014 Michael Brown <mbrown@fensystems.co.uk>.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA.
18 *
19 * You can also choose to distribute this program under the terms of
20 * the Unmodified Binary Distribution Licence (as given in the file
21 * COPYING.UBDL), provided that you have satisfied its requirements.
22 */
23
24 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
25
26 #include <stdlib.h>
27 #include <stdio.h>
28 #include <unistd.h>
29 #include <string.h>
30 #include <strings.h>
31 #include <errno.h>
32 #include <byteswap.h>
33 #include <ipxe/malloc.h>
34 #include <ipxe/umalloc.h>
35 #include <ipxe/pci.h>
36 #include <ipxe/usb.h>
37 #include <ipxe/init.h>
38 #include <ipxe/profile.h>
39 #include "xhci.h"
40
41 /** @file
42 *
43 * USB eXtensible Host Controller Interface (xHCI) driver
44 *
45 */
46
47 /** Message transfer profiler */
48 static struct profiler xhci_message_profiler __profiler =
49 { .name = "xhci.message" };
50
51 /** Stream transfer profiler */
52 static struct profiler xhci_stream_profiler __profiler =
53 { .name = "xhci.stream" };
54
55 /** Event ring profiler */
56 static struct profiler xhci_event_profiler __profiler =
57 { .name = "xhci.event" };
58
59 /** Transfer event profiler */
60 static struct profiler xhci_transfer_profiler __profiler =
61 { .name = "xhci.transfer" };
62
63 /* Disambiguate the various error causes */
64 #define EIO_DATA \
65 __einfo_error ( EINFO_EIO_DATA )
66 #define EINFO_EIO_DATA \
67 __einfo_uniqify ( EINFO_EIO, ( 2 - 0 ), \
68 "Data buffer error" )
69 #define EIO_BABBLE \
70 __einfo_error ( EINFO_EIO_BABBLE )
71 #define EINFO_EIO_BABBLE \
72 __einfo_uniqify ( EINFO_EIO, ( 3 - 0 ), \
73 "Babble detected" )
74 #define EIO_USB \
75 __einfo_error ( EINFO_EIO_USB )
76 #define EINFO_EIO_USB \
77 __einfo_uniqify ( EINFO_EIO, ( 4 - 0 ), \
78 "USB transaction error" )
79 #define EIO_TRB \
80 __einfo_error ( EINFO_EIO_TRB )
81 #define EINFO_EIO_TRB \
82 __einfo_uniqify ( EINFO_EIO, ( 5 - 0 ), \
83 "TRB error" )
84 #define EIO_STALL \
85 __einfo_error ( EINFO_EIO_STALL )
86 #define EINFO_EIO_STALL \
87 __einfo_uniqify ( EINFO_EIO, ( 6 - 0 ), \
88 "Stall error" )
89 #define EIO_RESOURCE \
90 __einfo_error ( EINFO_EIO_RESOURCE )
91 #define EINFO_EIO_RESOURCE \
92 __einfo_uniqify ( EINFO_EIO, ( 7 - 0 ), \
93 "Resource error" )
94 #define EIO_BANDWIDTH \
95 __einfo_error ( EINFO_EIO_BANDWIDTH )
96 #define EINFO_EIO_BANDWIDTH \
97 __einfo_uniqify ( EINFO_EIO, ( 8 - 0 ), \
98 "Bandwidth error" )
99 #define EIO_NO_SLOTS \
100 __einfo_error ( EINFO_EIO_NO_SLOTS )
101 #define EINFO_EIO_NO_SLOTS \
102 __einfo_uniqify ( EINFO_EIO, ( 9 - 0 ), \
103 "No slots available" )
104 #define EIO_STREAM_TYPE \
105 __einfo_error ( EINFO_EIO_STREAM_TYPE )
106 #define EINFO_EIO_STREAM_TYPE \
107 __einfo_uniqify ( EINFO_EIO, ( 10 - 0 ), \
108 "Invalid stream type" )
109 #define EIO_SLOT \
110 __einfo_error ( EINFO_EIO_SLOT )
111 #define EINFO_EIO_SLOT \
112 __einfo_uniqify ( EINFO_EIO, ( 11 - 0 ), \
113 "Slot not enabled" )
114 #define EIO_ENDPOINT \
115 __einfo_error ( EINFO_EIO_ENDPOINT )
116 #define EINFO_EIO_ENDPOINT \
117 __einfo_uniqify ( EINFO_EIO, ( 12 - 0 ), \
118 "Endpoint not enabled" )
119 #define EIO_SHORT \
120 __einfo_error ( EINFO_EIO_SHORT )
121 #define EINFO_EIO_SHORT \
122 __einfo_uniqify ( EINFO_EIO, ( 13 - 0 ), \
123 "Short packet" )
124 #define EIO_UNDERRUN \
125 __einfo_error ( EINFO_EIO_UNDERRUN )
126 #define EINFO_EIO_UNDERRUN \
127 __einfo_uniqify ( EINFO_EIO, ( 14 - 0 ), \
128 "Ring underrun" )
129 #define EIO_OVERRUN \
130 __einfo_error ( EINFO_EIO_OVERRUN )
131 #define EINFO_EIO_OVERRUN \
132 __einfo_uniqify ( EINFO_EIO, ( 15 - 0 ), \
133 "Ring overrun" )
134 #define EIO_VF_RING_FULL \
135 __einfo_error ( EINFO_EIO_VF_RING_FULL )
136 #define EINFO_EIO_VF_RING_FULL \
137 __einfo_uniqify ( EINFO_EIO, ( 16 - 0 ), \
138 "Virtual function event ring full" )
139 #define EIO_PARAMETER \
140 __einfo_error ( EINFO_EIO_PARAMETER )
141 #define EINFO_EIO_PARAMETER \
142 __einfo_uniqify ( EINFO_EIO, ( 17 - 0 ), \
143 "Parameter error" )
144 #define EIO_BANDWIDTH_OVERRUN \
145 __einfo_error ( EINFO_EIO_BANDWIDTH_OVERRUN )
146 #define EINFO_EIO_BANDWIDTH_OVERRUN \
147 __einfo_uniqify ( EINFO_EIO, ( 18 - 0 ), \
148 "Bandwidth overrun" )
149 #define EIO_CONTEXT \
150 __einfo_error ( EINFO_EIO_CONTEXT )
151 #define EINFO_EIO_CONTEXT \
152 __einfo_uniqify ( EINFO_EIO, ( 19 - 0 ), \
153 "Context state error" )
154 #define EIO_NO_PING \
155 __einfo_error ( EINFO_EIO_NO_PING )
156 #define EINFO_EIO_NO_PING \
157 __einfo_uniqify ( EINFO_EIO, ( 20 - 0 ), \
158 "No ping response" )
159 #define EIO_RING_FULL \
160 __einfo_error ( EINFO_EIO_RING_FULL )
161 #define EINFO_EIO_RING_FULL \
162 __einfo_uniqify ( EINFO_EIO, ( 21 - 0 ), \
163 "Event ring full" )
164 #define EIO_INCOMPATIBLE \
165 __einfo_error ( EINFO_EIO_INCOMPATIBLE )
166 #define EINFO_EIO_INCOMPATIBLE \
167 __einfo_uniqify ( EINFO_EIO, ( 22 - 0 ), \
168 "Incompatible device" )
169 #define EIO_MISSED \
170 __einfo_error ( EINFO_EIO_MISSED )
171 #define EINFO_EIO_MISSED \
172 __einfo_uniqify ( EINFO_EIO, ( 23 - 0 ), \
173 "Missed service error" )
174 #define EIO_CMD_STOPPED \
175 __einfo_error ( EINFO_EIO_CMD_STOPPED )
176 #define EINFO_EIO_CMD_STOPPED \
177 __einfo_uniqify ( EINFO_EIO, ( 24 - 0 ), \
178 "Command ring stopped" )
179 #define EIO_CMD_ABORTED \
180 __einfo_error ( EINFO_EIO_CMD_ABORTED )
181 #define EINFO_EIO_CMD_ABORTED \
182 __einfo_uniqify ( EINFO_EIO, ( 25 - 0 ), \
183 "Command aborted" )
184 #define EIO_STOP \
185 __einfo_error ( EINFO_EIO_STOP )
186 #define EINFO_EIO_STOP \
187 __einfo_uniqify ( EINFO_EIO, ( 26 - 0 ), \
188 "Stopped" )
189 #define EIO_STOP_LEN \
190 __einfo_error ( EINFO_EIO_STOP_LEN )
191 #define EINFO_EIO_STOP_LEN \
192 __einfo_uniqify ( EINFO_EIO, ( 27 - 0 ), \
193 "Stopped - length invalid" )
194 #define EIO_STOP_SHORT \
195 __einfo_error ( EINFO_EIO_STOP_SHORT )
196 #define EINFO_EIO_STOP_SHORT \
197 __einfo_uniqify ( EINFO_EIO, ( 28 - 0 ), \
198 "Stopped - short packet" )
199 #define EIO_LATENCY \
200 __einfo_error ( EINFO_EIO_LATENCY )
201 #define EINFO_EIO_LATENCY \
202 __einfo_uniqify ( EINFO_EIO, ( 29 - 0 ), \
203 "Maximum exit latency too large" )
204 #define EIO_ISOCH \
205 __einfo_error ( EINFO_EIO_ISOCH )
206 #define EINFO_EIO_ISOCH \
207 __einfo_uniqify ( EINFO_EIO, ( 31 - 0 ), \
208 "Isochronous buffer overrun" )
209 #define EPROTO_LOST \
210 __einfo_error ( EINFO_EPROTO_LOST )
211 #define EINFO_EPROTO_LOST \
212 __einfo_uniqify ( EINFO_EPROTO, ( 32 - 32 ), \
213 "Event lost" )
214 #define EPROTO_UNDEFINED \
215 __einfo_error ( EINFO_EPROTO_UNDEFINED )
216 #define EINFO_EPROTO_UNDEFINED \
217 __einfo_uniqify ( EINFO_EPROTO, ( 33 - 32 ), \
218 "Undefined error" )
219 #define EPROTO_STREAM_ID \
220 __einfo_error ( EINFO_EPROTO_STREAM_ID )
221 #define EINFO_EPROTO_STREAM_ID \
222 __einfo_uniqify ( EINFO_EPROTO, ( 34 - 32 ), \
223 "Invalid stream ID" )
224 #define EPROTO_SECONDARY \
225 __einfo_error ( EINFO_EPROTO_SECONDARY )
226 #define EINFO_EPROTO_SECONDARY \
227 __einfo_uniqify ( EINFO_EPROTO, ( 35 - 32 ), \
228 "Secondary bandwidth error" )
229 #define EPROTO_SPLIT \
230 __einfo_error ( EINFO_EPROTO_SPLIT )
231 #define EINFO_EPROTO_SPLIT \
232 __einfo_uniqify ( EINFO_EPROTO, ( 36 - 32 ), \
233 "Split transaction error" )
234 #define ECODE(code) \
235 ( ( (code) < 32 ) ? \
236 EUNIQ ( EINFO_EIO, ( (code) & 31 ), EIO_DATA, EIO_BABBLE, \
237 EIO_USB, EIO_TRB, EIO_STALL, EIO_RESOURCE, \
238 EIO_BANDWIDTH, EIO_NO_SLOTS, EIO_STREAM_TYPE, \
239 EIO_SLOT, EIO_ENDPOINT, EIO_SHORT, EIO_UNDERRUN, \
240 EIO_OVERRUN, EIO_VF_RING_FULL, EIO_PARAMETER, \
241 EIO_BANDWIDTH_OVERRUN, EIO_CONTEXT, EIO_NO_PING, \
242 EIO_RING_FULL, EIO_INCOMPATIBLE, EIO_MISSED, \
243 EIO_CMD_STOPPED, EIO_CMD_ABORTED, EIO_STOP, \
244 EIO_STOP_LEN, EIO_STOP_SHORT, EIO_LATENCY, \
245 EIO_ISOCH ) : \
246 ( (code) < 64 ) ? \
247 EUNIQ ( EINFO_EPROTO, ( (code) & 31 ), EPROTO_LOST, \
248 EPROTO_UNDEFINED, EPROTO_STREAM_ID, \
249 EPROTO_SECONDARY, EPROTO_SPLIT ) : \
250 EFAULT )
251
252 /******************************************************************************
253 *
254 * Register access
255 *
256 ******************************************************************************
257 */
258
259 /**
260 * Initialise device
261 *
262 * @v xhci xHCI device
263 * @v regs MMIO registers
264 */
265 static void xhci_init ( struct xhci_device *xhci, void *regs ) {
266 uint32_t hcsparams1;
267 uint32_t hcsparams2;
268 uint32_t hccparams1;
269 uint32_t pagesize;
270 size_t caplength;
271 size_t rtsoff;
272 size_t dboff;
273
274 /* Locate capability, operational, runtime, and doorbell registers */
275 xhci->cap = regs;
276 caplength = readb ( xhci->cap + XHCI_CAP_CAPLENGTH );
277 rtsoff = readl ( xhci->cap + XHCI_CAP_RTSOFF );
278 dboff = readl ( xhci->cap + XHCI_CAP_DBOFF );
279 xhci->op = ( xhci->cap + caplength );
280 xhci->run = ( xhci->cap + rtsoff );
281 xhci->db = ( xhci->cap + dboff );
282 DBGC2 ( xhci, "XHCI %s cap %08lx op %08lx run %08lx db %08lx\n",
283 xhci->name, virt_to_phys ( xhci->cap ),
284 virt_to_phys ( xhci->op ), virt_to_phys ( xhci->run ),
285 virt_to_phys ( xhci->db ) );
286
287 /* Read structural parameters 1 */
288 hcsparams1 = readl ( xhci->cap + XHCI_CAP_HCSPARAMS1 );
289 xhci->slots = XHCI_HCSPARAMS1_SLOTS ( hcsparams1 );
290 xhci->intrs = XHCI_HCSPARAMS1_INTRS ( hcsparams1 );
291 xhci->ports = XHCI_HCSPARAMS1_PORTS ( hcsparams1 );
292 DBGC ( xhci, "XHCI %s has %d slots %d intrs %d ports\n",
293 xhci->name, xhci->slots, xhci->intrs, xhci->ports );
294
295 /* Read structural parameters 2 */
296 hcsparams2 = readl ( xhci->cap + XHCI_CAP_HCSPARAMS2 );
297 xhci->scratchpads = XHCI_HCSPARAMS2_SCRATCHPADS ( hcsparams2 );
298 DBGC2 ( xhci, "XHCI %s needs %d scratchpads\n",
299 xhci->name, xhci->scratchpads );
300
301 /* Read capability parameters 1 */
302 hccparams1 = readl ( xhci->cap + XHCI_CAP_HCCPARAMS1 );
303 xhci->addr64 = XHCI_HCCPARAMS1_ADDR64 ( hccparams1 );
304 xhci->csz_shift = XHCI_HCCPARAMS1_CSZ_SHIFT ( hccparams1 );
305 xhci->xecp = XHCI_HCCPARAMS1_XECP ( hccparams1 );
306
307 /* Read page size */
308 pagesize = readl ( xhci->op + XHCI_OP_PAGESIZE );
309 xhci->pagesize = XHCI_PAGESIZE ( pagesize );
310 assert ( xhci->pagesize != 0 );
311 assert ( ( ( xhci->pagesize ) & ( xhci->pagesize - 1 ) ) == 0 );
312 DBGC2 ( xhci, "XHCI %s page size %zd bytes\n",
313 xhci->name, xhci->pagesize );
314 }
315
316 /**
317 * Find extended capability
318 *
319 * @v xhci xHCI device
320 * @v id Capability ID
321 * @v offset Offset to previous extended capability instance, or zero
322 * @ret offset Offset to extended capability, or zero if not found
323 */
324 static unsigned int xhci_extended_capability ( struct xhci_device *xhci,
325 unsigned int id,
326 unsigned int offset ) {
327 uint32_t xecp;
328 unsigned int next;
329
330 /* Locate the extended capability */
331 while ( 1 ) {
332
333 /* Locate first or next capability as applicable */
334 if ( offset ) {
335 xecp = readl ( xhci->cap + offset );
336 next = XHCI_XECP_NEXT ( xecp );
337 } else {
338 next = xhci->xecp;
339 }
340 if ( ! next )
341 return 0;
342 offset += next;
343
344 /* Check if this is the requested capability */
345 xecp = readl ( xhci->cap + offset );
346 if ( XHCI_XECP_ID ( xecp ) == id )
347 return offset;
348 }
349 }
350
351 /**
352 * Write potentially 64-bit register
353 *
354 * @v xhci xHCI device
355 * @v value Value
356 * @v reg Register address
357 * @ret rc Return status code
358 */
359 static inline __attribute__ (( always_inline )) int
360 xhci_writeq ( struct xhci_device *xhci, physaddr_t value, void *reg ) {
361
362 /* If this is a 32-bit build, then this can never fail
363 * (allowing the compiler to optimise out the error path).
364 */
365 if ( sizeof ( value ) <= sizeof ( uint32_t ) ) {
366 writel ( value, reg );
367 writel ( 0, ( reg + sizeof ( uint32_t ) ) );
368 return 0;
369 }
370
371 /* If the device does not support 64-bit addresses and this
372 * address is outside the 32-bit address space, then fail.
373 */
374 if ( ( value & ~0xffffffffULL ) && ! xhci->addr64 ) {
375 DBGC ( xhci, "XHCI %s cannot access address %lx\n",
376 xhci->name, value );
377 return -ENOTSUP;
378 }
379
380 /* If this is a 64-bit build, then writeq() is available */
381 writeq ( value, reg );
382 return 0;
383 }
384
385 /**
386 * Calculate buffer alignment
387 *
388 * @v len Length
389 * @ret align Buffer alignment
390 *
391 * Determine alignment required for a buffer which must be aligned to
392 * at least XHCI_MIN_ALIGN and which must not cross a page boundary.
393 */
394 static inline size_t xhci_align ( size_t len ) {
395 size_t align;
396
397 /* Align to own length (rounded up to a power of two) */
398 align = ( 1 << fls ( len - 1 ) );
399
400 /* Round up to XHCI_MIN_ALIGN if needed */
401 if ( align < XHCI_MIN_ALIGN )
402 align = XHCI_MIN_ALIGN;
403
404 return align;
405 }
406
407 /**
408 * Calculate device context offset
409 *
410 * @v xhci xHCI device
411 * @v ctx Context index
412 */
413 static inline size_t xhci_device_context_offset ( struct xhci_device *xhci,
414 unsigned int ctx ) {
415
416 return ( XHCI_DCI ( ctx ) << xhci->csz_shift );
417 }
418
419 /**
420 * Calculate input context offset
421 *
422 * @v xhci xHCI device
423 * @v ctx Context index
424 */
425 static inline size_t xhci_input_context_offset ( struct xhci_device *xhci,
426 unsigned int ctx ) {
427
428 return ( XHCI_ICI ( ctx ) << xhci->csz_shift );
429 }
430
431 /******************************************************************************
432 *
433 * Diagnostics
434 *
435 ******************************************************************************
436 */
437
438 /**
439 * Dump host controller registers
440 *
441 * @v xhci xHCI device
442 */
443 static inline void xhci_dump ( struct xhci_device *xhci ) {
444 uint32_t usbcmd;
445 uint32_t usbsts;
446 uint32_t pagesize;
447 uint32_t dnctrl;
448 uint32_t config;
449
450 /* Do nothing unless debugging is enabled */
451 if ( ! DBG_LOG )
452 return;
453
454 /* Dump USBCMD */
455 usbcmd = readl ( xhci->op + XHCI_OP_USBCMD );
456 DBGC ( xhci, "XHCI %s USBCMD %08x%s%s\n", xhci->name, usbcmd,
457 ( ( usbcmd & XHCI_USBCMD_RUN ) ? " run" : "" ),
458 ( ( usbcmd & XHCI_USBCMD_HCRST ) ? " hcrst" : "" ) );
459
460 /* Dump USBSTS */
461 usbsts = readl ( xhci->op + XHCI_OP_USBSTS );
462 DBGC ( xhci, "XHCI %s USBSTS %08x%s\n", xhci->name, usbsts,
463 ( ( usbsts & XHCI_USBSTS_HCH ) ? " hch" : "" ) );
464
465 /* Dump PAGESIZE */
466 pagesize = readl ( xhci->op + XHCI_OP_PAGESIZE );
467 DBGC ( xhci, "XHCI %s PAGESIZE %08x\n", xhci->name, pagesize );
468
469 /* Dump DNCTRL */
470 dnctrl = readl ( xhci->op + XHCI_OP_DNCTRL );
471 DBGC ( xhci, "XHCI %s DNCTRL %08x\n", xhci->name, dnctrl );
472
473 /* Dump CONFIG */
474 config = readl ( xhci->op + XHCI_OP_CONFIG );
475 DBGC ( xhci, "XHCI %s CONFIG %08x\n", xhci->name, config );
476 }
477
478 /**
479 * Dump port registers
480 *
481 * @v xhci xHCI device
482 * @v port Port number
483 */
484 static inline void xhci_dump_port ( struct xhci_device *xhci,
485 unsigned int port ) {
486 uint32_t portsc;
487 uint32_t portpmsc;
488 uint32_t portli;
489 uint32_t porthlpmc;
490
491 /* Do nothing unless debugging is enabled */
492 if ( ! DBG_LOG )
493 return;
494
495 /* Dump PORTSC */
496 portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port ) );
497 DBGC ( xhci, "XHCI %s-%d PORTSC %08x%s%s%s%s psiv=%d\n",
498 xhci->name, port, portsc,
499 ( ( portsc & XHCI_PORTSC_CCS ) ? " ccs" : "" ),
500 ( ( portsc & XHCI_PORTSC_PED ) ? " ped" : "" ),
501 ( ( portsc & XHCI_PORTSC_PR ) ? " pr" : "" ),
502 ( ( portsc & XHCI_PORTSC_PP ) ? " pp" : "" ),
503 XHCI_PORTSC_PSIV ( portsc ) );
504
505 /* Dump PORTPMSC */
506 portpmsc = readl ( xhci->op + XHCI_OP_PORTPMSC ( port ) );
507 DBGC ( xhci, "XHCI %s-%d PORTPMSC %08x\n", xhci->name, port, portpmsc );
508
509 /* Dump PORTLI */
510 portli = readl ( xhci->op + XHCI_OP_PORTLI ( port ) );
511 DBGC ( xhci, "XHCI %s-%d PORTLI %08x\n", xhci->name, port, portli );
512
513 /* Dump PORTHLPMC */
514 porthlpmc = readl ( xhci->op + XHCI_OP_PORTHLPMC ( port ) );
515 DBGC ( xhci, "XHCI %s-%d PORTHLPMC %08x\n",
516 xhci->name, port, porthlpmc );
517 }
518
519 /******************************************************************************
520 *
521 * USB legacy support
522 *
523 ******************************************************************************
524 */
525
526 /** Prevent the release of ownership back to BIOS */
527 static int xhci_legacy_prevent_release;
528
529 /**
530 * Initialise USB legacy support
531 *
532 * @v xhci xHCI device
533 */
534 static void xhci_legacy_init ( struct xhci_device *xhci ) {
535 unsigned int legacy;
536 uint8_t bios;
537
538 /* Locate USB legacy support capability (if present) */
539 legacy = xhci_extended_capability ( xhci, XHCI_XECP_ID_LEGACY, 0 );
540 if ( ! legacy ) {
541 /* Not an error; capability may not be present */
542 DBGC ( xhci, "XHCI %s has no USB legacy support capability\n",
543 xhci->name );
544 return;
545 }
546
547 /* Check if legacy USB support is enabled */
548 bios = readb ( xhci->cap + legacy + XHCI_USBLEGSUP_BIOS );
549 if ( ! ( bios & XHCI_USBLEGSUP_BIOS_OWNED ) ) {
550 /* Not an error; already owned by OS */
551 DBGC ( xhci, "XHCI %s USB legacy support already disabled\n",
552 xhci->name );
553 return;
554 }
555
556 /* Record presence of USB legacy support capability */
557 xhci->legacy = legacy;
558 }
559
560 /**
561 * Claim ownership from BIOS
562 *
563 * @v xhci xHCI device
564 */
565 static void xhci_legacy_claim ( struct xhci_device *xhci ) {
566 uint32_t ctlsts;
567 uint8_t bios;
568 unsigned int i;
569
570 /* Do nothing unless legacy support capability is present */
571 if ( ! xhci->legacy )
572 return;
573
574 /* Claim ownership */
575 writeb ( XHCI_USBLEGSUP_OS_OWNED,
576 xhci->cap + xhci->legacy + XHCI_USBLEGSUP_OS );
577
578 /* Wait for BIOS to release ownership */
579 for ( i = 0 ; i < XHCI_USBLEGSUP_MAX_WAIT_MS ; i++ ) {
580
581 /* Check if BIOS has released ownership */
582 bios = readb ( xhci->cap + xhci->legacy + XHCI_USBLEGSUP_BIOS );
583 if ( ! ( bios & XHCI_USBLEGSUP_BIOS_OWNED ) ) {
584 DBGC ( xhci, "XHCI %s claimed ownership from BIOS\n",
585 xhci->name );
586 ctlsts = readl ( xhci->cap + xhci->legacy +
587 XHCI_USBLEGSUP_CTLSTS );
588 if ( ctlsts ) {
589 DBGC ( xhci, "XHCI %s warning: BIOS retained "
590 "SMIs: %08x\n", xhci->name, ctlsts );
591 }
592 return;
593 }
594
595 /* Delay */
596 mdelay ( 1 );
597 }
598
599 /* BIOS did not release ownership. Claim it forcibly by
600 * disabling all SMIs.
601 */
602 DBGC ( xhci, "XHCI %s could not claim ownership from BIOS: forcibly "
603 "disabling SMIs\n", xhci->name );
604 writel ( 0, xhci->cap + xhci->legacy + XHCI_USBLEGSUP_CTLSTS );
605 }
606
607 /**
608 * Release ownership back to BIOS
609 *
610 * @v xhci xHCI device
611 */
612 static void xhci_legacy_release ( struct xhci_device *xhci ) {
613
614 /* Do nothing unless legacy support capability is present */
615 if ( ! xhci->legacy )
616 return;
617
618 /* Do nothing if releasing ownership is prevented */
619 if ( xhci_legacy_prevent_release ) {
620 DBGC ( xhci, "XHCI %s not releasing ownership to BIOS\n",
621 xhci->name );
622 return;
623 }
624
625 /* Release ownership */
626 writeb ( 0, xhci->cap + xhci->legacy + XHCI_USBLEGSUP_OS );
627 DBGC ( xhci, "XHCI %s released ownership to BIOS\n", xhci->name );
628 }
629
630 /******************************************************************************
631 *
632 * Supported protocols
633 *
634 ******************************************************************************
635 */
636
637 /**
638 * Transcribe port speed (for debugging)
639 *
640 * @v psi Protocol speed ID
641 * @ret speed Transcribed speed
642 */
643 static inline const char * xhci_speed_name ( uint32_t psi ) {
644 static const char *exponents[4] = { "", "k", "M", "G" };
645 static char buf[ 10 /* "xxxxxXbps" + NUL */ ];
646 unsigned int mantissa;
647 unsigned int exponent;
648
649 /* Extract mantissa and exponent */
650 mantissa = XHCI_SUPPORTED_PSI_MANTISSA ( psi );
651 exponent = XHCI_SUPPORTED_PSI_EXPONENT ( psi );
652
653 /* Transcribe speed */
654 snprintf ( buf, sizeof ( buf ), "%d%sbps",
655 mantissa, exponents[exponent] );
656 return buf;
657 }
658
659 /**
660 * Find supported protocol extended capability for a port
661 *
662 * @v xhci xHCI device
663 * @v port Port number
664 * @ret supported Offset to extended capability, or zero if not found
665 */
666 static unsigned int xhci_supported_protocol ( struct xhci_device *xhci,
667 unsigned int port ) {
668 unsigned int supported = 0;
669 unsigned int offset;
670 unsigned int count;
671 uint32_t ports;
672
673 /* Iterate over all supported protocol structures */
674 while ( ( supported = xhci_extended_capability ( xhci,
675 XHCI_XECP_ID_SUPPORTED,
676 supported ) ) ) {
677
678 /* Determine port range */
679 ports = readl ( xhci->cap + supported + XHCI_SUPPORTED_PORTS );
680 offset = XHCI_SUPPORTED_PORTS_OFFSET ( ports );
681 count = XHCI_SUPPORTED_PORTS_COUNT ( ports );
682
683 /* Check if port lies within this range */
684 if ( ( port - offset ) < count )
685 return supported;
686 }
687
688 DBGC ( xhci, "XHCI %s-%d has no supported protocol\n",
689 xhci->name, port );
690 return 0;
691 }
692
693 /**
694 * Find port protocol
695 *
696 * @v xhci xHCI device
697 * @v port Port number
698 * @ret protocol USB protocol, or zero if not found
699 */
700 static unsigned int xhci_port_protocol ( struct xhci_device *xhci,
701 unsigned int port ) {
702 unsigned int supported = xhci_supported_protocol ( xhci, port );
703 union {
704 uint32_t raw;
705 char text[5];
706 } name;
707 unsigned int protocol;
708 unsigned int type;
709 unsigned int psic;
710 unsigned int psiv;
711 unsigned int i;
712 uint32_t revision;
713 uint32_t ports;
714 uint32_t slot;
715 uint32_t psi;
716
717 /* Fail if there is no supported protocol */
718 if ( ! supported )
719 return 0;
720
721 /* Determine protocol version */
722 revision = readl ( xhci->cap + supported + XHCI_SUPPORTED_REVISION );
723 protocol = XHCI_SUPPORTED_REVISION_VER ( revision );
724
725 /* Describe port protocol */
726 if ( DBG_EXTRA ) {
727 name.raw = cpu_to_le32 ( readl ( xhci->cap + supported +
728 XHCI_SUPPORTED_NAME ) );
729 name.text[4] = '\0';
730 slot = readl ( xhci->cap + supported + XHCI_SUPPORTED_SLOT );
731 type = XHCI_SUPPORTED_SLOT_TYPE ( slot );
732 DBGC2 ( xhci, "XHCI %s-%d %sv%04x type %d",
733 xhci->name, port, name.text, protocol, type );
734 ports = readl ( xhci->cap + supported + XHCI_SUPPORTED_PORTS );
735 psic = XHCI_SUPPORTED_PORTS_PSIC ( ports );
736 if ( psic ) {
737 DBGC2 ( xhci, " speeds" );
738 for ( i = 0 ; i < psic ; i++ ) {
739 psi = readl ( xhci->cap + supported +
740 XHCI_SUPPORTED_PSI ( i ) );
741 psiv = XHCI_SUPPORTED_PSI_VALUE ( psi );
742 DBGC2 ( xhci, " %d:%s", psiv,
743 xhci_speed_name ( psi ) );
744 }
745 }
746 if ( xhci->quirks & XHCI_BAD_PSIV )
747 DBGC2 ( xhci, " (ignored)" );
748 DBGC2 ( xhci, "\n" );
749 }
750
751 return protocol;
752 }
753
754 /**
755 * Find port slot type
756 *
757 * @v xhci xHCI device
758 * @v port Port number
759 * @ret type Slot type, or negative error
760 */
761 static int xhci_port_slot_type ( struct xhci_device *xhci, unsigned int port ) {
762 unsigned int supported = xhci_supported_protocol ( xhci, port );
763 unsigned int type;
764 uint32_t slot;
765
766 /* Fail if there is no supported protocol */
767 if ( ! supported )
768 return -ENOTSUP;
769
770 /* Get slot type */
771 slot = readl ( xhci->cap + supported + XHCI_SUPPORTED_SLOT );
772 type = XHCI_SUPPORTED_SLOT_TYPE ( slot );
773
774 return type;
775 }
776
777 /**
778 * Find port speed
779 *
780 * @v xhci xHCI device
781 * @v port Port number
782 * @v psiv Protocol speed ID value
783 * @ret speed Port speed, or negative error
784 */
785 static int xhci_port_speed ( struct xhci_device *xhci, unsigned int port,
786 unsigned int psiv ) {
787 unsigned int supported = xhci_supported_protocol ( xhci, port );
788 unsigned int psic;
789 unsigned int mantissa;
790 unsigned int exponent;
791 unsigned int speed;
792 unsigned int i;
793 uint32_t ports;
794 uint32_t psi;
795
796 /* Fail if there is no supported protocol */
797 if ( ! supported )
798 return -ENOTSUP;
799
800 /* Get protocol speed ID count */
801 ports = readl ( xhci->cap + supported + XHCI_SUPPORTED_PORTS );
802 psic = XHCI_SUPPORTED_PORTS_PSIC ( ports );
803
804 /* Use the default mappings if applicable */
805 if ( ( psic == 0 ) || ( xhci->quirks & XHCI_BAD_PSIV ) ) {
806 switch ( psiv ) {
807 case XHCI_SPEED_LOW : return USB_SPEED_LOW;
808 case XHCI_SPEED_FULL : return USB_SPEED_FULL;
809 case XHCI_SPEED_HIGH : return USB_SPEED_HIGH;
810 case XHCI_SPEED_SUPER : return USB_SPEED_SUPER;
811 default:
812 DBGC ( xhci, "XHCI %s-%d non-standard PSI value %d\n",
813 xhci->name, port, psiv );
814 return -ENOTSUP;
815 }
816 }
817
818 /* Iterate over PSI dwords looking for a match */
819 for ( i = 0 ; i < psic ; i++ ) {
820 psi = readl ( xhci->cap + supported + XHCI_SUPPORTED_PSI ( i ));
821 if ( psiv == XHCI_SUPPORTED_PSI_VALUE ( psi ) ) {
822 mantissa = XHCI_SUPPORTED_PSI_MANTISSA ( psi );
823 exponent = XHCI_SUPPORTED_PSI_EXPONENT ( psi );
824 speed = USB_SPEED ( mantissa, exponent );
825 return speed;
826 }
827 }
828
829 DBGC ( xhci, "XHCI %s-%d spurious PSI value %d\n",
830 xhci->name, port, psiv );
831 return -ENOENT;
832 }
833
834 /**
835 * Find protocol speed ID value
836 *
837 * @v xhci xHCI device
838 * @v port Port number
839 * @v speed USB speed
840 * @ret psiv Protocol speed ID value, or negative error
841 */
842 static int xhci_port_psiv ( struct xhci_device *xhci, unsigned int port,
843 unsigned int speed ) {
844 unsigned int supported = xhci_supported_protocol ( xhci, port );
845 unsigned int psic;
846 unsigned int mantissa;
847 unsigned int exponent;
848 unsigned int psiv;
849 unsigned int i;
850 uint32_t ports;
851 uint32_t psi;
852
853 /* Fail if there is no supported protocol */
854 if ( ! supported )
855 return -ENOTSUP;
856
857 /* Get protocol speed ID count */
858 ports = readl ( xhci->cap + supported + XHCI_SUPPORTED_PORTS );
859 psic = XHCI_SUPPORTED_PORTS_PSIC ( ports );
860
861 /* Use the default mappings if applicable */
862 if ( ( psic == 0 ) || ( xhci->quirks & XHCI_BAD_PSIV ) ) {
863 switch ( speed ) {
864 case USB_SPEED_LOW : return XHCI_SPEED_LOW;
865 case USB_SPEED_FULL : return XHCI_SPEED_FULL;
866 case USB_SPEED_HIGH : return XHCI_SPEED_HIGH;
867 case USB_SPEED_SUPER : return XHCI_SPEED_SUPER;
868 default:
869 DBGC ( xhci, "XHCI %s-%d non-standard speed %d\n",
870 xhci->name, port, speed );
871 return -ENOTSUP;
872 }
873 }
874
875 /* Iterate over PSI dwords looking for a match */
876 for ( i = 0 ; i < psic ; i++ ) {
877 psi = readl ( xhci->cap + supported + XHCI_SUPPORTED_PSI ( i ));
878 mantissa = XHCI_SUPPORTED_PSI_MANTISSA ( psi );
879 exponent = XHCI_SUPPORTED_PSI_EXPONENT ( psi );
880 if ( speed == USB_SPEED ( mantissa, exponent ) ) {
881 psiv = XHCI_SUPPORTED_PSI_VALUE ( psi );
882 return psiv;
883 }
884 }
885
886 DBGC ( xhci, "XHCI %s-%d unrepresentable speed %#x\n",
887 xhci->name, port, speed );
888 return -ENOENT;
889 }
890
891 /******************************************************************************
892 *
893 * Device context base address array
894 *
895 ******************************************************************************
896 */
897
898 /**
899 * Allocate device context base address array
900 *
901 * @v xhci xHCI device
902 * @ret rc Return status code
903 */
904 static int xhci_dcbaa_alloc ( struct xhci_device *xhci ) {
905 size_t len;
906 physaddr_t dcbaap;
907 int rc;
908
909 /* Allocate and initialise structure. Must be at least
910 * 64-byte aligned and must not cross a page boundary, so
911 * align on its own size (rounded up to a power of two and
912 * with a minimum of 64 bytes).
913 */
914 len = ( ( xhci->slots + 1 ) * sizeof ( xhci->dcbaa[0] ) );
915 xhci->dcbaa = malloc_dma ( len, xhci_align ( len ) );
916 if ( ! xhci->dcbaa ) {
917 DBGC ( xhci, "XHCI %s could not allocate DCBAA\n", xhci->name );
918 rc = -ENOMEM;
919 goto err_alloc;
920 }
921 memset ( xhci->dcbaa, 0, len );
922
923 /* Program DCBAA pointer */
924 dcbaap = virt_to_phys ( xhci->dcbaa );
925 if ( ( rc = xhci_writeq ( xhci, dcbaap,
926 xhci->op + XHCI_OP_DCBAAP ) ) != 0 )
927 goto err_writeq;
928
929 DBGC2 ( xhci, "XHCI %s DCBAA at [%08lx,%08lx)\n",
930 xhci->name, dcbaap, ( dcbaap + len ) );
931 return 0;
932
933 err_writeq:
934 free_dma ( xhci->dcbaa, len );
935 err_alloc:
936 return rc;
937 }
938
939 /**
940 * Free device context base address array
941 *
942 * @v xhci xHCI device
943 */
944 static void xhci_dcbaa_free ( struct xhci_device *xhci ) {
945 size_t len;
946 unsigned int i;
947
948 /* Sanity check */
949 for ( i = 0 ; i <= xhci->slots ; i++ )
950 assert ( xhci->dcbaa[i] == 0 );
951
952 /* Clear DCBAA pointer */
953 xhci_writeq ( xhci, 0, xhci->op + XHCI_OP_DCBAAP );
954
955 /* Free DCBAA */
956 len = ( ( xhci->slots + 1 ) * sizeof ( xhci->dcbaa[0] ) );
957 free_dma ( xhci->dcbaa, len );
958 }
959
960 /******************************************************************************
961 *
962 * Scratchpad buffers
963 *
964 ******************************************************************************
965 */
966
967 /**
968 * Allocate scratchpad buffers
969 *
970 * @v xhci xHCI device
971 * @ret rc Return status code
972 */
973 static int xhci_scratchpad_alloc ( struct xhci_device *xhci ) {
974 size_t array_len;
975 size_t len;
976 physaddr_t phys;
977 unsigned int i;
978 int rc;
979
980 /* Do nothing if no scratchpad buffers are used */
981 if ( ! xhci->scratchpads )
982 return 0;
983
984 /* Allocate scratchpads */
985 len = ( xhci->scratchpads * xhci->pagesize );
986 xhci->scratchpad = umalloc ( len );
987 if ( ! xhci->scratchpad ) {
988 DBGC ( xhci, "XHCI %s could not allocate scratchpad buffers\n",
989 xhci->name );
990 rc = -ENOMEM;
991 goto err_alloc;
992 }
993 memset_user ( xhci->scratchpad, 0, 0, len );
994
995 /* Allocate scratchpad array */
996 array_len = ( xhci->scratchpads * sizeof ( xhci->scratchpad_array[0] ));
997 xhci->scratchpad_array =
998 malloc_dma ( array_len, xhci_align ( array_len ) );
999 if ( ! xhci->scratchpad_array ) {
1000 DBGC ( xhci, "XHCI %s could not allocate scratchpad buffer "
1001 "array\n", xhci->name );
1002 rc = -ENOMEM;
1003 goto err_alloc_array;
1004 }
1005
1006 /* Populate scratchpad array */
1007 for ( i = 0 ; i < xhci->scratchpads ; i++ ) {
1008 phys = user_to_phys ( xhci->scratchpad, ( i * xhci->pagesize ));
1009 xhci->scratchpad_array[i] = phys;
1010 }
1011
1012 /* Set scratchpad array pointer */
1013 assert ( xhci->dcbaa != NULL );
1014 xhci->dcbaa[0] = cpu_to_le64 ( virt_to_phys ( xhci->scratchpad_array ));
1015
1016 DBGC2 ( xhci, "XHCI %s scratchpad [%08lx,%08lx) array [%08lx,%08lx)\n",
1017 xhci->name, user_to_phys ( xhci->scratchpad, 0 ),
1018 user_to_phys ( xhci->scratchpad, len ),
1019 virt_to_phys ( xhci->scratchpad_array ),
1020 ( virt_to_phys ( xhci->scratchpad_array ) + array_len ) );
1021 return 0;
1022
1023 free_dma ( xhci->scratchpad_array, array_len );
1024 err_alloc_array:
1025 ufree ( xhci->scratchpad );
1026 err_alloc:
1027 return rc;
1028 }
1029
1030 /**
1031 * Free scratchpad buffers
1032 *
1033 * @v xhci xHCI device
1034 */
1035 static void xhci_scratchpad_free ( struct xhci_device *xhci ) {
1036 size_t array_len;
1037
1038 /* Do nothing if no scratchpad buffers are used */
1039 if ( ! xhci->scratchpads )
1040 return;
1041
1042 /* Clear scratchpad array pointer */
1043 assert ( xhci->dcbaa != NULL );
1044 xhci->dcbaa[0] = 0;
1045
1046 /* Free scratchpad array */
1047 array_len = ( xhci->scratchpads * sizeof ( xhci->scratchpad_array[0] ));
1048 free_dma ( xhci->scratchpad_array, array_len );
1049
1050 /* Free scratchpads */
1051 ufree ( xhci->scratchpad );
1052 }
1053
1054 /******************************************************************************
1055 *
1056 * Run / stop / reset
1057 *
1058 ******************************************************************************
1059 */
1060
1061 /**
1062 * Start xHCI device
1063 *
1064 * @v xhci xHCI device
1065 */
1066 static void xhci_run ( struct xhci_device *xhci ) {
1067 uint32_t config;
1068 uint32_t usbcmd;
1069
1070 /* Configure number of device slots */
1071 config = readl ( xhci->op + XHCI_OP_CONFIG );
1072 config &= ~XHCI_CONFIG_MAX_SLOTS_EN_MASK;
1073 config |= XHCI_CONFIG_MAX_SLOTS_EN ( xhci->slots );
1074 writel ( config, xhci->op + XHCI_OP_CONFIG );
1075
1076 /* Set run/stop bit */
1077 usbcmd = readl ( xhci->op + XHCI_OP_USBCMD );
1078 usbcmd |= XHCI_USBCMD_RUN;
1079 writel ( usbcmd, xhci->op + XHCI_OP_USBCMD );
1080 }
1081
1082 /**
1083 * Stop xHCI device
1084 *
1085 * @v xhci xHCI device
1086 * @ret rc Return status code
1087 */
1088 static int xhci_stop ( struct xhci_device *xhci ) {
1089 uint32_t usbcmd;
1090 uint32_t usbsts;
1091 unsigned int i;
1092
1093 /* Clear run/stop bit */
1094 usbcmd = readl ( xhci->op + XHCI_OP_USBCMD );
1095 usbcmd &= ~XHCI_USBCMD_RUN;
1096 writel ( usbcmd, xhci->op + XHCI_OP_USBCMD );
1097
1098 /* Wait for device to stop */
1099 for ( i = 0 ; i < XHCI_STOP_MAX_WAIT_MS ; i++ ) {
1100
1101 /* Check if device is stopped */
1102 usbsts = readl ( xhci->op + XHCI_OP_USBSTS );
1103 if ( usbsts & XHCI_USBSTS_HCH )
1104 return 0;
1105
1106 /* Delay */
1107 mdelay ( 1 );
1108 }
1109
1110 DBGC ( xhci, "XHCI %s timed out waiting for stop\n", xhci->name );
1111 return -ETIMEDOUT;
1112 }
1113
1114 /**
1115 * Reset xHCI device
1116 *
1117 * @v xhci xHCI device
1118 * @ret rc Return status code
1119 */
1120 static int xhci_reset ( struct xhci_device *xhci ) {
1121 uint32_t usbcmd;
1122 unsigned int i;
1123 int rc;
1124
1125 /* The xHCI specification states that resetting a running
1126 * device may result in undefined behaviour, so try stopping
1127 * it first.
1128 */
1129 if ( ( rc = xhci_stop ( xhci ) ) != 0 ) {
1130 /* Ignore errors and attempt to reset the device anyway */
1131 }
1132
1133 /* Reset device */
1134 writel ( XHCI_USBCMD_HCRST, xhci->op + XHCI_OP_USBCMD );
1135
1136 /* Wait for reset to complete */
1137 for ( i = 0 ; i < XHCI_RESET_MAX_WAIT_MS ; i++ ) {
1138
1139 /* Check if reset is complete */
1140 usbcmd = readl ( xhci->op + XHCI_OP_USBCMD );
1141 if ( ! ( usbcmd & XHCI_USBCMD_HCRST ) )
1142 return 0;
1143
1144 /* Delay */
1145 mdelay ( 1 );
1146 }
1147
1148 DBGC ( xhci, "XHCI %s timed out waiting for reset\n", xhci->name );
1149 return -ETIMEDOUT;
1150 }
1151
1152 /******************************************************************************
1153 *
1154 * Transfer request blocks
1155 *
1156 ******************************************************************************
1157 */
1158
1159 /**
1160 * Allocate transfer request block ring
1161 *
1162 * @v xhci xHCI device
1163 * @v ring TRB ring
1164 * @v shift Ring size (log2)
1165 * @v slot Device slot
1166 * @v target Doorbell target
1167 * @v stream Doorbell stream ID
1168 * @ret rc Return status code
1169 */
1170 static int xhci_ring_alloc ( struct xhci_device *xhci,
1171 struct xhci_trb_ring *ring,
1172 unsigned int shift, unsigned int slot,
1173 unsigned int target, unsigned int stream ) {
1174 struct xhci_trb_link *link;
1175 unsigned int count;
1176 int rc;
1177
1178 /* Sanity check */
1179 assert ( shift > 0 );
1180
1181 /* Initialise structure */
1182 memset ( ring, 0, sizeof ( *ring ) );
1183 ring->shift = shift;
1184 count = ( 1U << shift );
1185 ring->mask = ( count - 1 );
1186 ring->len = ( ( count + 1 /* Link TRB */ ) * sizeof ( ring->trb[0] ) );
1187 ring->db = ( xhci->db + ( slot * sizeof ( ring->dbval ) ) );
1188 ring->dbval = XHCI_DBVAL ( target, stream );
1189
1190 /* Allocate I/O buffers */
1191 ring->iobuf = zalloc ( count * sizeof ( ring->iobuf[0] ) );
1192 if ( ! ring->iobuf ) {
1193 rc = -ENOMEM;
1194 goto err_alloc_iobuf;
1195 }
1196
1197 /* Allocate TRBs */
1198 ring->trb = malloc_dma ( ring->len, xhci_align ( ring->len ) );
1199 if ( ! ring->trb ) {
1200 rc = -ENOMEM;
1201 goto err_alloc_trb;
1202 }
1203 memset ( ring->trb, 0, ring->len );
1204
1205 /* Initialise Link TRB */
1206 link = &ring->trb[count].link;
1207 link->next = cpu_to_le64 ( virt_to_phys ( ring->trb ) );
1208 link->flags = XHCI_TRB_TC;
1209 link->type = XHCI_TRB_LINK;
1210 ring->link = link;
1211
1212 return 0;
1213
1214 free_dma ( ring->trb, ring->len );
1215 err_alloc_trb:
1216 free ( ring->iobuf );
1217 err_alloc_iobuf:
1218 return rc;
1219 }
1220
1221 /**
1222 * Reset transfer request block ring
1223 *
1224 * @v ring TRB ring
1225 */
1226 static void xhci_ring_reset ( struct xhci_trb_ring *ring ) {
1227 unsigned int count = ( 1U << ring->shift );
1228
1229 /* Reset producer and consumer counters */
1230 ring->prod = 0;
1231 ring->cons = 0;
1232
1233 /* Reset TRBs (except Link TRB) */
1234 memset ( ring->trb, 0, ( count * sizeof ( ring->trb[0] ) ) );
1235 }
1236
1237 /**
1238 * Free transfer request block ring
1239 *
1240 * @v ring TRB ring
1241 */
1242 static void xhci_ring_free ( struct xhci_trb_ring *ring ) {
1243 unsigned int count = ( 1U << ring->shift );
1244 unsigned int i;
1245
1246 /* Sanity checks */
1247 assert ( ring->cons == ring->prod );
1248 for ( i = 0 ; i < count ; i++ )
1249 assert ( ring->iobuf[i] == NULL );
1250
1251 /* Free TRBs */
1252 free_dma ( ring->trb, ring->len );
1253
1254 /* Free I/O buffers */
1255 free ( ring->iobuf );
1256 }
1257
1258 /**
1259 * Enqueue a transfer request block
1260 *
1261 * @v ring TRB ring
1262 * @v iobuf I/O buffer (if any)
1263 * @v trb Transfer request block (with empty Cycle flag)
1264 * @ret rc Return status code
1265 *
1266 * This operation does not implicitly ring the doorbell register.
1267 */
1268 static int xhci_enqueue ( struct xhci_trb_ring *ring, struct io_buffer *iobuf,
1269 const union xhci_trb *trb ) {
1270 union xhci_trb *dest;
1271 unsigned int prod;
1272 unsigned int mask;
1273 unsigned int index;
1274 unsigned int cycle;
1275
1276 /* Sanity check */
1277 assert ( ! ( trb->common.flags & XHCI_TRB_C ) );
1278
1279 /* Fail if ring is full */
1280 if ( ! xhci_ring_remaining ( ring ) )
1281 return -ENOBUFS;
1282
1283 /* Update producer counter (and link TRB, if applicable) */
1284 prod = ring->prod++;
1285 mask = ring->mask;
1286 cycle = ( ( ~( prod >> ring->shift ) ) & XHCI_TRB_C );
1287 index = ( prod & mask );
1288 if ( index == 0 )
1289 ring->link->flags = ( XHCI_TRB_TC | ( cycle ^ XHCI_TRB_C ) );
1290
1291 /* Record I/O buffer */
1292 ring->iobuf[index] = iobuf;
1293
1294 /* Enqueue TRB */
1295 dest = &ring->trb[index];
1296 dest->template.parameter = trb->template.parameter;
1297 dest->template.status = trb->template.status;
1298 wmb();
1299 dest->template.control = ( trb->template.control |
1300 cpu_to_le32 ( cycle ) );
1301
1302 return 0;
1303 }
1304
1305 /**
1306 * Dequeue a transfer request block
1307 *
1308 * @v ring TRB ring
1309 * @ret iobuf I/O buffer
1310 */
1311 static struct io_buffer * xhci_dequeue ( struct xhci_trb_ring *ring ) {
1312 struct io_buffer *iobuf;
1313 unsigned int cons;
1314 unsigned int mask;
1315 unsigned int index;
1316
1317 /* Sanity check */
1318 assert ( xhci_ring_fill ( ring ) != 0 );
1319
1320 /* Update consumer counter */
1321 cons = ring->cons++;
1322 mask = ring->mask;
1323 index = ( cons & mask );
1324
1325 /* Retrieve I/O buffer */
1326 iobuf = ring->iobuf[index];
1327 ring->iobuf[index] = NULL;
1328
1329 return iobuf;
1330 }
1331
1332 /**
1333 * Enqueue multiple transfer request blocks
1334 *
1335 * @v ring TRB ring
1336 * @v iobuf I/O buffer
1337 * @v trbs Transfer request blocks (with empty Cycle flag)
1338 * @v count Number of transfer request blocks
1339 * @ret rc Return status code
1340 *
1341 * This operation does not implicitly ring the doorbell register.
1342 */
1343 static int xhci_enqueue_multi ( struct xhci_trb_ring *ring,
1344 struct io_buffer *iobuf,
1345 const union xhci_trb *trbs,
1346 unsigned int count ) {
1347 const union xhci_trb *trb = trbs;
1348 int rc;
1349
1350 /* Sanity check */
1351 assert ( iobuf != NULL );
1352
1353 /* Fail if ring does not have sufficient space */
1354 if ( xhci_ring_remaining ( ring ) < count )
1355 return -ENOBUFS;
1356
1357 /* Enqueue each TRB, recording the I/O buffer with the final TRB */
1358 while ( count-- ) {
1359 rc = xhci_enqueue ( ring, ( count ? NULL : iobuf ), trb++ );
1360 assert ( rc == 0 ); /* Should never be able to fail */
1361 }
1362
1363 return 0;
1364 }
1365
1366 /**
1367 * Dequeue multiple transfer request blocks
1368 *
1369 * @v ring TRB ring
1370 * @ret iobuf I/O buffer
1371 */
1372 static struct io_buffer * xhci_dequeue_multi ( struct xhci_trb_ring *ring ) {
1373 struct io_buffer *iobuf;
1374
1375 /* Dequeue TRBs until we reach the final TRB for an I/O buffer */
1376 do {
1377 iobuf = xhci_dequeue ( ring );
1378 } while ( iobuf == NULL );
1379
1380 return iobuf;
1381 }
1382
1383 /**
1384 * Ring doorbell register
1385 *
1386 * @v ring TRB ring
1387 */
1388 static inline __attribute__ (( always_inline )) void
1389 xhci_doorbell ( struct xhci_trb_ring *ring ) {
1390
1391 wmb();
1392 writel ( ring->dbval, ring->db );
1393 }
1394
1395 /******************************************************************************
1396 *
1397 * Command and event rings
1398 *
1399 ******************************************************************************
1400 */
1401
1402 /**
1403 * Allocate command ring
1404 *
1405 * @v xhci xHCI device
1406 * @ret rc Return status code
1407 */
1408 static int xhci_command_alloc ( struct xhci_device *xhci ) {
1409 physaddr_t crp;
1410 int rc;
1411
1412 /* Allocate TRB ring */
1413 if ( ( rc = xhci_ring_alloc ( xhci, &xhci->command, XHCI_CMD_TRBS_LOG2,
1414 0, 0, 0 ) ) != 0 )
1415 goto err_ring_alloc;
1416
1417 /* Program command ring control register */
1418 crp = virt_to_phys ( xhci->command.trb );
1419 if ( ( rc = xhci_writeq ( xhci, ( crp | XHCI_CRCR_RCS ),
1420 xhci->op + XHCI_OP_CRCR ) ) != 0 )
1421 goto err_writeq;
1422
1423 DBGC2 ( xhci, "XHCI %s CRCR at [%08lx,%08lx)\n",
1424 xhci->name, crp, ( crp + xhci->command.len ) );
1425 return 0;
1426
1427 err_writeq:
1428 xhci_ring_free ( &xhci->command );
1429 err_ring_alloc:
1430 return rc;
1431 }
1432
1433 /**
1434 * Free command ring
1435 *
1436 * @v xhci xHCI device
1437 */
1438 static void xhci_command_free ( struct xhci_device *xhci ) {
1439
1440 /* Sanity check */
1441 assert ( ( readl ( xhci->op + XHCI_OP_CRCR ) & XHCI_CRCR_CRR ) == 0 );
1442
1443 /* Clear command ring control register */
1444 xhci_writeq ( xhci, 0, xhci->op + XHCI_OP_CRCR );
1445
1446 /* Free TRB ring */
1447 xhci_ring_free ( &xhci->command );
1448 }
1449
1450 /**
1451 * Allocate event ring
1452 *
1453 * @v xhci xHCI device
1454 * @ret rc Return status code
1455 */
1456 static int xhci_event_alloc ( struct xhci_device *xhci ) {
1457 struct xhci_event_ring *event = &xhci->event;
1458 unsigned int count;
1459 size_t len;
1460 int rc;
1461
1462 /* Allocate event ring */
1463 count = ( 1 << XHCI_EVENT_TRBS_LOG2 );
1464 len = ( count * sizeof ( event->trb[0] ) );
1465 event->trb = malloc_dma ( len, xhci_align ( len ) );
1466 if ( ! event->trb ) {
1467 rc = -ENOMEM;
1468 goto err_alloc_trb;
1469 }
1470 memset ( event->trb, 0, len );
1471
1472 /* Allocate event ring segment table */
1473 event->segment = malloc_dma ( sizeof ( event->segment[0] ),
1474 xhci_align ( sizeof (event->segment[0])));
1475 if ( ! event->segment ) {
1476 rc = -ENOMEM;
1477 goto err_alloc_segment;
1478 }
1479 memset ( event->segment, 0, sizeof ( event->segment[0] ) );
1480 event->segment[0].base = cpu_to_le64 ( virt_to_phys ( event->trb ) );
1481 event->segment[0].count = cpu_to_le32 ( count );
1482
1483 /* Program event ring registers */
1484 writel ( 1, xhci->run + XHCI_RUN_ERSTSZ ( 0 ) );
1485 if ( ( rc = xhci_writeq ( xhci, virt_to_phys ( event->trb ),
1486 xhci->run + XHCI_RUN_ERDP ( 0 ) ) ) != 0 )
1487 goto err_writeq_erdp;
1488 if ( ( rc = xhci_writeq ( xhci, virt_to_phys ( event->segment ),
1489 xhci->run + XHCI_RUN_ERSTBA ( 0 ) ) ) != 0 )
1490 goto err_writeq_erstba;
1491
1492 DBGC2 ( xhci, "XHCI %s event ring [%08lx,%08lx) table [%08lx,%08lx)\n",
1493 xhci->name, virt_to_phys ( event->trb ),
1494 ( virt_to_phys ( event->trb ) + len ),
1495 virt_to_phys ( event->segment ),
1496 ( virt_to_phys ( event->segment ) +
1497 sizeof (event->segment[0] ) ) );
1498 return 0;
1499
1500 xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERSTBA ( 0 ) );
1501 err_writeq_erstba:
1502 xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERDP ( 0 ) );
1503 err_writeq_erdp:
1504 free_dma ( event->trb, len );
1505 err_alloc_segment:
1506 free_dma ( event->segment, sizeof ( event->segment[0] ) );
1507 err_alloc_trb:
1508 return rc;
1509 }
1510
1511 /**
1512 * Free event ring
1513 *
1514 * @v xhci xHCI device
1515 */
1516 static void xhci_event_free ( struct xhci_device *xhci ) {
1517 struct xhci_event_ring *event = &xhci->event;
1518 unsigned int count;
1519 size_t len;
1520
1521 /* Clear event ring registers */
1522 writel ( 0, xhci->run + XHCI_RUN_ERSTSZ ( 0 ) );
1523 xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERSTBA ( 0 ) );
1524 xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERDP ( 0 ) );
1525
1526 /* Free event ring segment table */
1527 free_dma ( event->segment, sizeof ( event->segment[0] ) );
1528
1529 /* Free event ring */
1530 count = ( 1 << XHCI_EVENT_TRBS_LOG2 );
1531 len = ( count * sizeof ( event->trb[0] ) );
1532 free_dma ( event->trb, len );
1533 }
1534
1535 /**
1536 * Handle transfer event
1537 *
1538 * @v xhci xHCI device
1539 * @v trb Transfer event TRB
1540 */
1541 static void xhci_transfer ( struct xhci_device *xhci,
1542 struct xhci_trb_transfer *trb ) {
1543 struct xhci_slot *slot;
1544 struct xhci_endpoint *endpoint;
1545 struct io_buffer *iobuf;
1546 int rc;
1547
1548 /* Profile transfer events */
1549 profile_start ( &xhci_transfer_profiler );
1550
1551 /* Identify slot */
1552 if ( ( trb->slot > xhci->slots ) ||
1553 ( ( slot = xhci->slot[trb->slot] ) == NULL ) ) {
1554 DBGC ( xhci, "XHCI %s transfer event invalid slot %d:\n",
1555 xhci->name, trb->slot );
1556 DBGC_HDA ( xhci, 0, trb, sizeof ( *trb ) );
1557 return;
1558 }
1559
1560 /* Identify endpoint */
1561 if ( ( trb->endpoint > XHCI_CTX_END ) ||
1562 ( ( endpoint = slot->endpoint[trb->endpoint] ) == NULL ) ) {
1563 DBGC ( xhci, "XHCI %s slot %d transfer event invalid epid "
1564 "%d:\n", xhci->name, slot->id, trb->endpoint );
1565 DBGC_HDA ( xhci, 0, trb, sizeof ( *trb ) );
1566 return;
1567 }
1568
1569 /* Dequeue TRB(s) */
1570 iobuf = xhci_dequeue_multi ( &endpoint->ring );
1571 assert ( iobuf != NULL );
1572
1573 /* Check for errors */
1574 if ( ! ( ( trb->code == XHCI_CMPLT_SUCCESS ) ||
1575 ( trb->code == XHCI_CMPLT_SHORT ) ) ) {
1576
1577 /* Construct error */
1578 rc = -ECODE ( trb->code );
1579 DBGC ( xhci, "XHCI %s slot %d ctx %d failed (code %d): %s\n",
1580 xhci->name, slot->id, endpoint->ctx, trb->code,
1581 strerror ( rc ) );
1582 DBGC_HDA ( xhci, 0, trb, sizeof ( *trb ) );
1583
1584 /* Sanity check */
1585 assert ( ( endpoint->context->state & XHCI_ENDPOINT_STATE_MASK )
1586 != XHCI_ENDPOINT_RUNNING );
1587
1588 /* Report failure to USB core */
1589 usb_complete_err ( endpoint->ep, iobuf, rc );
1590 return;
1591 }
1592
1593 /* Record actual transfer size */
1594 iob_unput ( iobuf, le16_to_cpu ( trb->residual ) );
1595
1596 /* Sanity check (for successful completions only) */
1597 assert ( xhci_ring_consumed ( &endpoint->ring ) ==
1598 le64_to_cpu ( trb->transfer ) );
1599
1600 /* Report completion to USB core */
1601 usb_complete ( endpoint->ep, iobuf );
1602 profile_stop ( &xhci_transfer_profiler );
1603 }
1604
1605 /**
1606 * Handle command completion event
1607 *
1608 * @v xhci xHCI device
1609 * @v trb Command completion event
1610 */
1611 static void xhci_complete ( struct xhci_device *xhci,
1612 struct xhci_trb_complete *trb ) {
1613 int rc;
1614
1615 /* Ignore "command ring stopped" notifications */
1616 if ( trb->code == XHCI_CMPLT_CMD_STOPPED ) {
1617 DBGC2 ( xhci, "XHCI %s command ring stopped\n", xhci->name );
1618 return;
1619 }
1620
1621 /* Ignore unexpected completions */
1622 if ( ! xhci->pending ) {
1623 rc = -ECODE ( trb->code );
1624 DBGC ( xhci, "XHCI %s unexpected completion (code %d): %s\n",
1625 xhci->name, trb->code, strerror ( rc ) );
1626 DBGC_HDA ( xhci, 0, trb, sizeof ( *trb ) );
1627 return;
1628 }
1629
1630 /* Dequeue command TRB */
1631 xhci_dequeue ( &xhci->command );
1632
1633 /* Sanity check */
1634 assert ( xhci_ring_consumed ( &xhci->command ) ==
1635 le64_to_cpu ( trb->command ) );
1636
1637 /* Record completion */
1638 memcpy ( xhci->pending, trb, sizeof ( *xhci->pending ) );
1639 xhci->pending = NULL;
1640 }
1641
1642 /**
1643 * Handle port status event
1644 *
1645 * @v xhci xHCI device
1646 * @v trb Port status event
1647 */
1648 static void xhci_port_status ( struct xhci_device *xhci,
1649 struct xhci_trb_port_status *trb ) {
1650 struct usb_port *port = usb_port ( xhci->bus->hub, trb->port );
1651 uint32_t portsc;
1652
1653 /* Sanity check */
1654 assert ( ( trb->port > 0 ) && ( trb->port <= xhci->ports ) );
1655
1656 /* Record disconnections and clear changes */
1657 portsc = readl ( xhci->op + XHCI_OP_PORTSC ( trb->port ) );
1658 port->disconnected |= ( portsc & XHCI_PORTSC_CSC );
1659 portsc &= ( XHCI_PORTSC_PRESERVE | XHCI_PORTSC_CHANGE );
1660 writel ( portsc, xhci->op + XHCI_OP_PORTSC ( trb->port ) );
1661
1662 /* Report port status change */
1663 usb_port_changed ( port );
1664 }
1665
1666 /**
1667 * Handle host controller event
1668 *
1669 * @v xhci xHCI device
1670 * @v trb Host controller event
1671 */
1672 static void xhci_host_controller ( struct xhci_device *xhci,
1673 struct xhci_trb_host_controller *trb ) {
1674 int rc;
1675
1676 /* Construct error */
1677 rc = -ECODE ( trb->code );
1678 DBGC ( xhci, "XHCI %s host controller event (code %d): %s\n",
1679 xhci->name, trb->code, strerror ( rc ) );
1680 }
1681
1682 /**
1683 * Poll event ring
1684 *
1685 * @v xhci xHCI device
1686 */
1687 static void xhci_event_poll ( struct xhci_device *xhci ) {
1688 struct xhci_event_ring *event = &xhci->event;
1689 union xhci_trb *trb;
1690 unsigned int shift = XHCI_EVENT_TRBS_LOG2;
1691 unsigned int count = ( 1 << shift );
1692 unsigned int mask = ( count - 1 );
1693 unsigned int consumed;
1694 unsigned int type;
1695
1696 /* Poll for events */
1697 profile_start ( &xhci_event_profiler );
1698 for ( consumed = 0 ; ; consumed++ ) {
1699
1700 /* Stop if we reach an empty TRB */
1701 rmb();
1702 trb = &event->trb[ event->cons & mask ];
1703 if ( ! ( ( trb->common.flags ^
1704 ( event->cons >> shift ) ) & XHCI_TRB_C ) )
1705 break;
1706
1707 /* Handle TRB */
1708 type = ( trb->common.type & XHCI_TRB_TYPE_MASK );
1709 switch ( type ) {
1710
1711 case XHCI_TRB_TRANSFER :
1712 xhci_transfer ( xhci, &trb->transfer );
1713 break;
1714
1715 case XHCI_TRB_COMPLETE :
1716 xhci_complete ( xhci, &trb->complete );
1717 break;
1718
1719 case XHCI_TRB_PORT_STATUS:
1720 xhci_port_status ( xhci, &trb->port );
1721 break;
1722
1723 case XHCI_TRB_HOST_CONTROLLER:
1724 xhci_host_controller ( xhci, &trb->host );
1725 break;
1726
1727 default:
1728 DBGC ( xhci, "XHCI %s unrecognised event %#x\n:",
1729 xhci->name, event->cons );
1730 DBGC_HDA ( xhci, virt_to_phys ( trb ),
1731 trb, sizeof ( *trb ) );
1732 break;
1733 }
1734
1735 /* Consume this TRB */
1736 event->cons++;
1737 }
1738
1739 /* Update dequeue pointer if applicable */
1740 if ( consumed ) {
1741 xhci_writeq ( xhci, virt_to_phys ( trb ),
1742 xhci->run + XHCI_RUN_ERDP ( 0 ) );
1743 profile_stop ( &xhci_event_profiler );
1744 }
1745 }
1746
1747 /**
1748 * Abort command
1749 *
1750 * @v xhci xHCI device
1751 */
1752 static void xhci_abort ( struct xhci_device *xhci ) {
1753 physaddr_t crp;
1754
1755 /* Abort the command */
1756 DBGC2 ( xhci, "XHCI %s aborting command\n", xhci->name );
1757 xhci_writeq ( xhci, XHCI_CRCR_CA, xhci->op + XHCI_OP_CRCR );
1758
1759 /* Allow time for command to abort */
1760 mdelay ( XHCI_COMMAND_ABORT_DELAY_MS );
1761
1762 /* Sanity check */
1763 assert ( ( readl ( xhci->op + XHCI_OP_CRCR ) & XHCI_CRCR_CRR ) == 0 );
1764
1765 /* Consume (and ignore) any final command status */
1766 xhci_event_poll ( xhci );
1767
1768 /* Reset the command ring control register */
1769 xhci_ring_reset ( &xhci->command );
1770 crp = virt_to_phys ( xhci->command.trb );
1771 xhci_writeq ( xhci, ( crp | XHCI_CRCR_RCS ), xhci->op + XHCI_OP_CRCR );
1772 }
1773
1774 /**
1775 * Issue command and wait for completion
1776 *
1777 * @v xhci xHCI device
1778 * @v trb Transfer request block (with empty Cycle flag)
1779 * @ret rc Return status code
1780 *
1781 * On a successful completion, the TRB will be overwritten with the
1782 * completion.
1783 */
1784 static int xhci_command ( struct xhci_device *xhci, union xhci_trb *trb ) {
1785 struct xhci_trb_complete *complete = &trb->complete;
1786 unsigned int i;
1787 int rc;
1788
1789 /* Record the pending command */
1790 xhci->pending = trb;
1791
1792 /* Enqueue the command */
1793 if ( ( rc = xhci_enqueue ( &xhci->command, NULL, trb ) ) != 0 )
1794 goto err_enqueue;
1795
1796 /* Ring the command doorbell */
1797 xhci_doorbell ( &xhci->command );
1798
1799 /* Wait for the command to complete */
1800 for ( i = 0 ; i < XHCI_COMMAND_MAX_WAIT_MS ; i++ ) {
1801
1802 /* Poll event ring */
1803 xhci_event_poll ( xhci );
1804
1805 /* Check for completion */
1806 if ( ! xhci->pending ) {
1807 if ( complete->code != XHCI_CMPLT_SUCCESS ) {
1808 rc = -ECODE ( complete->code );
1809 DBGC ( xhci, "XHCI %s command failed (code "
1810 "%d): %s\n", xhci->name, complete->code,
1811 strerror ( rc ) );
1812 DBGC_HDA ( xhci, 0, trb, sizeof ( *trb ) );
1813 return rc;
1814 }
1815 return 0;
1816 }
1817
1818 /* Delay */
1819 mdelay ( 1 );
1820 }
1821
1822 /* Timeout */
1823 DBGC ( xhci, "XHCI %s timed out waiting for completion\n", xhci->name );
1824 rc = -ETIMEDOUT;
1825
1826 /* Abort command */
1827 xhci_abort ( xhci );
1828
1829 err_enqueue:
1830 xhci->pending = NULL;
1831 return rc;
1832 }
1833
1834 /**
1835 * Issue NOP and wait for completion
1836 *
1837 * @v xhci xHCI device
1838 * @ret rc Return status code
1839 */
1840 static inline int xhci_nop ( struct xhci_device *xhci ) {
1841 union xhci_trb trb;
1842 struct xhci_trb_common *nop = &trb.common;
1843 int rc;
1844
1845 /* Construct command */
1846 memset ( nop, 0, sizeof ( *nop ) );
1847 nop->flags = XHCI_TRB_IOC;
1848 nop->type = XHCI_TRB_NOP_CMD;
1849
1850 /* Issue command and wait for completion */
1851 if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 )
1852 return rc;
1853
1854 return 0;
1855 }
1856
1857 /**
1858 * Enable slot
1859 *
1860 * @v xhci xHCI device
1861 * @v type Slot type
1862 * @ret slot Device slot ID, or negative error
1863 */
1864 static inline int xhci_enable_slot ( struct xhci_device *xhci,
1865 unsigned int type ) {
1866 union xhci_trb trb;
1867 struct xhci_trb_enable_slot *enable = &trb.enable;
1868 struct xhci_trb_complete *enabled = &trb.complete;
1869 unsigned int slot;
1870 int rc;
1871
1872 /* Construct command */
1873 memset ( enable, 0, sizeof ( *enable ) );
1874 enable->slot = type;
1875 enable->type = XHCI_TRB_ENABLE_SLOT;
1876
1877 /* Issue command and wait for completion */
1878 if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) {
1879 DBGC ( xhci, "XHCI %s could not enable new slot: %s\n",
1880 xhci->name, strerror ( rc ) );
1881 return rc;
1882 }
1883
1884 /* Extract slot number */
1885 slot = enabled->slot;
1886
1887 DBGC2 ( xhci, "XHCI %s slot %d enabled\n", xhci->name, slot );
1888 return slot;
1889 }
1890
1891 /**
1892 * Disable slot
1893 *
1894 * @v xhci xHCI device
1895 * @v slot Device slot
1896 * @ret rc Return status code
1897 */
1898 static inline int xhci_disable_slot ( struct xhci_device *xhci,
1899 unsigned int slot ) {
1900 union xhci_trb trb;
1901 struct xhci_trb_disable_slot *disable = &trb.disable;
1902 int rc;
1903
1904 /* Construct command */
1905 memset ( disable, 0, sizeof ( *disable ) );
1906 disable->type = XHCI_TRB_DISABLE_SLOT;
1907 disable->slot = slot;
1908
1909 /* Issue command and wait for completion */
1910 if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) {
1911 DBGC ( xhci, "XHCI %s could not disable slot %d: %s\n",
1912 xhci->name, slot, strerror ( rc ) );
1913 return rc;
1914 }
1915
1916 DBGC2 ( xhci, "XHCI %s slot %d disabled\n", xhci->name, slot );
1917 return 0;
1918 }
1919
1920 /**
1921 * Issue context-based command and wait for completion
1922 *
1923 * @v xhci xHCI device
1924 * @v slot Device slot
1925 * @v endpoint Endpoint
1926 * @v type TRB type
1927 * @v populate Input context populater
1928 * @ret rc Return status code
1929 */
1930 static int xhci_context ( struct xhci_device *xhci, struct xhci_slot *slot,
1931 struct xhci_endpoint *endpoint, unsigned int type,
1932 void ( * populate ) ( struct xhci_device *xhci,
1933 struct xhci_slot *slot,
1934 struct xhci_endpoint *endpoint,
1935 void *input ) ) {
1936 union xhci_trb trb;
1937 struct xhci_trb_context *context = &trb.context;
1938 size_t len;
1939 void *input;
1940 int rc;
1941
1942 /* Allocate an input context */
1943 len = xhci_input_context_offset ( xhci, XHCI_CTX_END );
1944 input = malloc_dma ( len, xhci_align ( len ) );
1945 if ( ! input ) {
1946 rc = -ENOMEM;
1947 goto err_alloc;
1948 }
1949 memset ( input, 0, len );
1950
1951 /* Populate input context */
1952 populate ( xhci, slot, endpoint, input );
1953
1954 /* Construct command */
1955 memset ( context, 0, sizeof ( *context ) );
1956 context->type = type;
1957 context->input = cpu_to_le64 ( virt_to_phys ( input ) );
1958 context->slot = slot->id;
1959
1960 /* Issue command and wait for completion */
1961 if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 )
1962 goto err_command;
1963
1964 err_command:
1965 free_dma ( input, len );
1966 err_alloc:
1967 return rc;
1968 }
1969
1970 /**
1971 * Populate address device input context
1972 *
1973 * @v xhci xHCI device
1974 * @v slot Device slot
1975 * @v endpoint Endpoint
1976 * @v input Input context
1977 */
1978 static void xhci_address_device_input ( struct xhci_device *xhci,
1979 struct xhci_slot *slot,
1980 struct xhci_endpoint *endpoint,
1981 void *input ) {
1982 struct xhci_control_context *control_ctx;
1983 struct xhci_slot_context *slot_ctx;
1984 struct xhci_endpoint_context *ep_ctx;
1985
1986 /* Sanity checks */
1987 assert ( endpoint->ctx == XHCI_CTX_EP0 );
1988
1989 /* Populate control context */
1990 control_ctx = input;
1991 control_ctx->add = cpu_to_le32 ( ( 1 << XHCI_CTX_SLOT ) |
1992 ( 1 << XHCI_CTX_EP0 ) );
1993
1994 /* Populate slot context */
1995 slot_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_SLOT ));
1996 slot_ctx->info = cpu_to_le32 ( XHCI_SLOT_INFO ( 1, 0, slot->psiv,
1997 slot->route ) );
1998 slot_ctx->port = slot->port;
1999 slot_ctx->tt_id = slot->tt_id;
2000 slot_ctx->tt_port = slot->tt_port;
2001
2002 /* Populate control endpoint context */
2003 ep_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_EP0 ) );
2004 ep_ctx->type = XHCI_EP_TYPE_CONTROL;
2005 ep_ctx->burst = endpoint->ep->burst;
2006 ep_ctx->mtu = cpu_to_le16 ( endpoint->ep->mtu );
2007 ep_ctx->dequeue = cpu_to_le64 ( virt_to_phys ( endpoint->ring.trb ) |
2008 XHCI_EP_DCS );
2009 ep_ctx->trb_len = cpu_to_le16 ( XHCI_EP0_TRB_LEN );
2010 }
2011
2012 /**
2013 * Address device
2014 *
2015 * @v xhci xHCI device
2016 * @v slot Device slot
2017 * @ret rc Return status code
2018 */
2019 static inline int xhci_address_device ( struct xhci_device *xhci,
2020 struct xhci_slot *slot ) {
2021 struct usb_device *usb = slot->usb;
2022 struct xhci_slot_context *slot_ctx;
2023 int rc;
2024
2025 /* Assign device address */
2026 if ( ( rc = xhci_context ( xhci, slot, slot->endpoint[XHCI_CTX_EP0],
2027 XHCI_TRB_ADDRESS_DEVICE,
2028 xhci_address_device_input ) ) != 0 )
2029 return rc;
2030
2031 /* Get assigned address */
2032 slot_ctx = ( slot->context +
2033 xhci_device_context_offset ( xhci, XHCI_CTX_SLOT ) );
2034 usb->address = slot_ctx->address;
2035 DBGC2 ( xhci, "XHCI %s assigned address %d to %s\n",
2036 xhci->name, usb->address, usb->name );
2037
2038 return 0;
2039 }
2040
2041 /**
2042 * Populate configure endpoint input context
2043 *
2044 * @v xhci xHCI device
2045 * @v slot Device slot
2046 * @v endpoint Endpoint
2047 * @v input Input context
2048 */
2049 static void xhci_configure_endpoint_input ( struct xhci_device *xhci,
2050 struct xhci_slot *slot,
2051 struct xhci_endpoint *endpoint,
2052 void *input ) {
2053 struct xhci_control_context *control_ctx;
2054 struct xhci_slot_context *slot_ctx;
2055 struct xhci_endpoint_context *ep_ctx;
2056
2057 /* Populate control context */
2058 control_ctx = input;
2059 control_ctx->add = cpu_to_le32 ( ( 1 << XHCI_CTX_SLOT ) |
2060 ( 1 << endpoint->ctx ) );
2061
2062 /* Populate slot context */
2063 slot_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_SLOT ));
2064 slot_ctx->info = cpu_to_le32 ( XHCI_SLOT_INFO ( ( XHCI_CTX_END - 1 ),
2065 ( slot->ports ? 1 : 0 ),
2066 slot->psiv, 0 ) );
2067 slot_ctx->ports = slot->ports;
2068
2069 /* Populate endpoint context */
2070 ep_ctx = ( input + xhci_input_context_offset ( xhci, endpoint->ctx ) );
2071 ep_ctx->interval = endpoint->interval;
2072 ep_ctx->type = endpoint->type;
2073 ep_ctx->burst = endpoint->ep->burst;
2074 ep_ctx->mtu = cpu_to_le16 ( endpoint->ep->mtu );
2075 ep_ctx->dequeue = cpu_to_le64 ( virt_to_phys ( endpoint->ring.trb ) |
2076 XHCI_EP_DCS );
2077 ep_ctx->trb_len = cpu_to_le16 ( endpoint->ep->mtu ); /* best guess */
2078 }
2079
2080 /**
2081 * Configure endpoint
2082 *
2083 * @v xhci xHCI device
2084 * @v slot Device slot
2085 * @v endpoint Endpoint
2086 * @ret rc Return status code
2087 */
2088 static inline int xhci_configure_endpoint ( struct xhci_device *xhci,
2089 struct xhci_slot *slot,
2090 struct xhci_endpoint *endpoint ) {
2091 int rc;
2092
2093 /* Configure endpoint */
2094 if ( ( rc = xhci_context ( xhci, slot, endpoint,
2095 XHCI_TRB_CONFIGURE_ENDPOINT,
2096 xhci_configure_endpoint_input ) ) != 0 )
2097 return rc;
2098
2099 DBGC2 ( xhci, "XHCI %s slot %d ctx %d configured\n",
2100 xhci->name, slot->id, endpoint->ctx );
2101 return 0;
2102 }
2103
2104 /**
2105 * Populate deconfigure endpoint input context
2106 *
2107 * @v xhci xHCI device
2108 * @v slot Device slot
2109 * @v endpoint Endpoint
2110 * @v input Input context
2111 */
2112 static void
2113 xhci_deconfigure_endpoint_input ( struct xhci_device *xhci __unused,
2114 struct xhci_slot *slot __unused,
2115 struct xhci_endpoint *endpoint,
2116 void *input ) {
2117 struct xhci_control_context *control_ctx;
2118 struct xhci_slot_context *slot_ctx;
2119
2120 /* Populate control context */
2121 control_ctx = input;
2122 control_ctx->add = cpu_to_le32 ( 1 << XHCI_CTX_SLOT );
2123 control_ctx->drop = cpu_to_le32 ( 1 << endpoint->ctx );
2124
2125 /* Populate slot context */
2126 slot_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_SLOT ));
2127 slot_ctx->info = cpu_to_le32 ( XHCI_SLOT_INFO ( ( XHCI_CTX_END - 1 ),
2128 0, 0, 0 ) );
2129 }
2130
2131 /**
2132 * Deconfigure endpoint
2133 *
2134 * @v xhci xHCI device
2135 * @v slot Device slot
2136 * @v endpoint Endpoint
2137 * @ret rc Return status code
2138 */
2139 static inline int xhci_deconfigure_endpoint ( struct xhci_device *xhci,
2140 struct xhci_slot *slot,
2141 struct xhci_endpoint *endpoint ) {
2142 int rc;
2143
2144 /* Deconfigure endpoint */
2145 if ( ( rc = xhci_context ( xhci, slot, endpoint,
2146 XHCI_TRB_CONFIGURE_ENDPOINT,
2147 xhci_deconfigure_endpoint_input ) ) != 0 )
2148 return rc;
2149
2150 DBGC2 ( xhci, "XHCI %s slot %d ctx %d deconfigured\n",
2151 xhci->name, slot->id, endpoint->ctx );
2152 return 0;
2153 }
2154
2155 /**
2156 * Populate evaluate context input context
2157 *
2158 * @v xhci xHCI device
2159 * @v slot Device slot
2160 * @v endpoint Endpoint
2161 * @v input Input context
2162 */
2163 static void xhci_evaluate_context_input ( struct xhci_device *xhci,
2164 struct xhci_slot *slot __unused,
2165 struct xhci_endpoint *endpoint,
2166 void *input ) {
2167 struct xhci_control_context *control_ctx;
2168 struct xhci_slot_context *slot_ctx;
2169 struct xhci_endpoint_context *ep_ctx;
2170
2171 /* Populate control context */
2172 control_ctx = input;
2173 control_ctx->add = cpu_to_le32 ( ( 1 << XHCI_CTX_SLOT ) |
2174 ( 1 << endpoint->ctx ) );
2175
2176 /* Populate slot context */
2177 slot_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_SLOT ));
2178 slot_ctx->info = cpu_to_le32 ( XHCI_SLOT_INFO ( ( XHCI_CTX_END - 1 ),
2179 0, 0, 0 ) );
2180
2181 /* Populate endpoint context */
2182 ep_ctx = ( input + xhci_input_context_offset ( xhci, endpoint->ctx ) );
2183 ep_ctx->mtu = cpu_to_le16 ( endpoint->ep->mtu );
2184 }
2185
2186 /**
2187 * Evaluate context
2188 *
2189 * @v xhci xHCI device
2190 * @v slot Device slot
2191 * @v endpoint Endpoint
2192 * @ret rc Return status code
2193 */
2194 static inline int xhci_evaluate_context ( struct xhci_device *xhci,
2195 struct xhci_slot *slot,
2196 struct xhci_endpoint *endpoint ) {
2197 int rc;
2198
2199 /* Configure endpoint */
2200 if ( ( rc = xhci_context ( xhci, slot, endpoint,
2201 XHCI_TRB_EVALUATE_CONTEXT,
2202 xhci_evaluate_context_input ) ) != 0 )
2203 return rc;
2204
2205 DBGC2 ( xhci, "XHCI %s slot %d ctx %d (re-)evaluated\n",
2206 xhci->name, slot->id, endpoint->ctx );
2207 return 0;
2208 }
2209
2210 /**
2211 * Reset endpoint
2212 *
2213 * @v xhci xHCI device
2214 * @v slot Device slot
2215 * @v endpoint Endpoint
2216 * @ret rc Return status code
2217 */
2218 static inline int xhci_reset_endpoint ( struct xhci_device *xhci,
2219 struct xhci_slot *slot,
2220 struct xhci_endpoint *endpoint ) {
2221 union xhci_trb trb;
2222 struct xhci_trb_reset_endpoint *reset = &trb.reset;
2223 int rc;
2224
2225 /* Construct command */
2226 memset ( reset, 0, sizeof ( *reset ) );
2227 reset->slot = slot->id;
2228 reset->endpoint = endpoint->ctx;
2229 reset->type = XHCI_TRB_RESET_ENDPOINT;
2230
2231 /* Issue command and wait for completion */
2232 if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) {
2233 DBGC ( xhci, "XHCI %s slot %d ctx %d could not reset endpoint "
2234 "in state %d: %s\n", xhci->name, slot->id, endpoint->ctx,
2235 endpoint->context->state, strerror ( rc ) );
2236 return rc;
2237 }
2238
2239 return 0;
2240 }
2241
2242 /**
2243 * Stop endpoint
2244 *
2245 * @v xhci xHCI device
2246 * @v slot Device slot
2247 * @v endpoint Endpoint
2248 * @ret rc Return status code
2249 */
2250 static inline int xhci_stop_endpoint ( struct xhci_device *xhci,
2251 struct xhci_slot *slot,
2252 struct xhci_endpoint *endpoint ) {
2253 union xhci_trb trb;
2254 struct xhci_trb_stop_endpoint *stop = &trb.stop;
2255 int rc;
2256
2257 /* Construct command */
2258 memset ( stop, 0, sizeof ( *stop ) );
2259 stop->slot = slot->id;
2260 stop->endpoint = endpoint->ctx;
2261 stop->type = XHCI_TRB_STOP_ENDPOINT;
2262
2263 /* Issue command and wait for completion */
2264 if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) {
2265 DBGC ( xhci, "XHCI %s slot %d ctx %d could not stop endpoint "
2266 "in state %d: %s\n", xhci->name, slot->id, endpoint->ctx,
2267 endpoint->context->state, strerror ( rc ) );
2268 return rc;
2269 }
2270
2271 return 0;
2272 }
2273
2274 /**
2275 * Set transfer ring dequeue pointer
2276 *
2277 * @v xhci xHCI device
2278 * @v slot Device slot
2279 * @v endpoint Endpoint
2280 * @ret rc Return status code
2281 */
2282 static inline int
2283 xhci_set_tr_dequeue_pointer ( struct xhci_device *xhci,
2284 struct xhci_slot *slot,
2285 struct xhci_endpoint *endpoint ) {
2286 union xhci_trb trb;
2287 struct xhci_trb_set_tr_dequeue_pointer *dequeue = &trb.dequeue;
2288 struct xhci_trb_ring *ring = &endpoint->ring;
2289 unsigned int cons;
2290 unsigned int mask;
2291 unsigned int index;
2292 unsigned int dcs;
2293 int rc;
2294
2295 /* Construct command */
2296 memset ( dequeue, 0, sizeof ( *dequeue ) );
2297 cons = ring->cons;
2298 mask = ring->mask;
2299 dcs = ( ( ~( cons >> ring->shift ) ) & XHCI_EP_DCS );
2300 index = ( cons & mask );
2301 dequeue->dequeue =
2302 cpu_to_le64 ( virt_to_phys ( &ring->trb[index] ) | dcs );
2303 dequeue->slot = slot->id;
2304 dequeue->endpoint = endpoint->ctx;
2305 dequeue->type = XHCI_TRB_SET_TR_DEQUEUE_POINTER;
2306
2307 /* Issue command and wait for completion */
2308 if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) {
2309 DBGC ( xhci, "XHCI %s slot %d ctx %d could not set TR dequeue "
2310 "pointer in state %d: %s\n", xhci->name, slot->id,
2311 endpoint->ctx, endpoint->context->state, strerror ( rc));
2312 return rc;
2313 }
2314
2315 return 0;
2316 }
2317
2318 /******************************************************************************
2319 *
2320 * Endpoint operations
2321 *
2322 ******************************************************************************
2323 */
2324
2325 /**
2326 * Open endpoint
2327 *
2328 * @v ep USB endpoint
2329 * @ret rc Return status code
2330 */
2331 static int xhci_endpoint_open ( struct usb_endpoint *ep ) {
2332 struct usb_device *usb = ep->usb;
2333 struct xhci_slot *slot = usb_get_hostdata ( usb );
2334 struct xhci_device *xhci = slot->xhci;
2335 struct xhci_endpoint *endpoint;
2336 unsigned int ctx;
2337 unsigned int type;
2338 unsigned int interval;
2339 int rc;
2340
2341 /* Calculate context index */
2342 ctx = XHCI_CTX ( ep->address );
2343 assert ( slot->endpoint[ctx] == NULL );
2344
2345 /* Calculate endpoint type */
2346 type = XHCI_EP_TYPE ( ep->attributes & USB_ENDPOINT_ATTR_TYPE_MASK );
2347 if ( type == XHCI_EP_TYPE ( USB_ENDPOINT_ATTR_CONTROL ) )
2348 type = XHCI_EP_TYPE_CONTROL;
2349 if ( ep->address & USB_DIR_IN )
2350 type |= XHCI_EP_TYPE_IN;
2351
2352 /* Calculate interval */
2353 if ( type & XHCI_EP_TYPE_PERIODIC ) {
2354 interval = ( fls ( ep->interval ) - 1 );
2355 } else {
2356 interval = ep->interval;
2357 }
2358
2359 /* Allocate and initialise structure */
2360 endpoint = zalloc ( sizeof ( *endpoint ) );
2361 if ( ! endpoint ) {
2362 rc = -ENOMEM;
2363 goto err_alloc;
2364 }
2365 usb_endpoint_set_hostdata ( ep, endpoint );
2366 slot->endpoint[ctx] = endpoint;
2367 endpoint->xhci = xhci;
2368 endpoint->slot = slot;
2369 endpoint->ep = ep;
2370 endpoint->ctx = ctx;
2371 endpoint->type = type;
2372 endpoint->interval = interval;
2373 endpoint->context = ( ( ( void * ) slot->context ) +
2374 xhci_device_context_offset ( xhci, ctx ) );
2375
2376 /* Allocate transfer ring */
2377 if ( ( rc = xhci_ring_alloc ( xhci, &endpoint->ring,
2378 XHCI_TRANSFER_TRBS_LOG2,
2379 slot->id, ctx, 0 ) ) != 0 )
2380 goto err_ring_alloc;
2381
2382 /* Configure endpoint, if applicable */
2383 if ( ( ctx != XHCI_CTX_EP0 ) &&
2384 ( ( rc = xhci_configure_endpoint ( xhci, slot, endpoint ) ) != 0 ))
2385 goto err_configure_endpoint;
2386
2387 DBGC2 ( xhci, "XHCI %s slot %d ctx %d ring [%08lx,%08lx)\n",
2388 xhci->name, slot->id, ctx, virt_to_phys ( endpoint->ring.trb ),
2389 ( virt_to_phys ( endpoint->ring.trb ) + endpoint->ring.len ) );
2390 return 0;
2391
2392 xhci_deconfigure_endpoint ( xhci, slot, endpoint );
2393 err_configure_endpoint:
2394 xhci_ring_free ( &endpoint->ring );
2395 err_ring_alloc:
2396 slot->endpoint[ctx] = NULL;
2397 free ( endpoint );
2398 err_alloc:
2399 return rc;
2400 }
2401
2402 /**
2403 * Close endpoint
2404 *
2405 * @v ep USB endpoint
2406 */
2407 static void xhci_endpoint_close ( struct usb_endpoint *ep ) {
2408 struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
2409 struct xhci_slot *slot = endpoint->slot;
2410 struct xhci_device *xhci = slot->xhci;
2411 struct io_buffer *iobuf;
2412 unsigned int ctx = endpoint->ctx;
2413
2414 /* Deconfigure endpoint, if applicable */
2415 if ( ctx != XHCI_CTX_EP0 )
2416 xhci_deconfigure_endpoint ( xhci, slot, endpoint );
2417
2418 /* Cancel any incomplete transfers */
2419 while ( xhci_ring_fill ( &endpoint->ring ) ) {
2420 iobuf = xhci_dequeue_multi ( &endpoint->ring );
2421 usb_complete_err ( ep, iobuf, -ECANCELED );
2422 }
2423
2424 /* Free endpoint */
2425 xhci_ring_free ( &endpoint->ring );
2426 slot->endpoint[ctx] = NULL;
2427 free ( endpoint );
2428 }
2429
2430 /**
2431 * Reset endpoint
2432 *
2433 * @v ep USB endpoint
2434 * @ret rc Return status code
2435 */
2436 static int xhci_endpoint_reset ( struct usb_endpoint *ep ) {
2437 struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
2438 struct xhci_slot *slot = endpoint->slot;
2439 struct xhci_device *xhci = slot->xhci;
2440 int rc;
2441
2442 /* Reset endpoint context */
2443 if ( ( rc = xhci_reset_endpoint ( xhci, slot, endpoint ) ) != 0 )
2444 return rc;
2445
2446 /* Set transfer ring dequeue pointer */
2447 if ( ( rc = xhci_set_tr_dequeue_pointer ( xhci, slot, endpoint ) ) != 0)
2448 return rc;
2449
2450 /* Ring doorbell to resume processing */
2451 xhci_doorbell ( &endpoint->ring );
2452
2453 DBGC ( xhci, "XHCI %s slot %d ctx %d reset\n",
2454 xhci->name, slot->id, endpoint->ctx );
2455 return 0;
2456 }
2457
2458 /**
2459 * Update MTU
2460 *
2461 * @v ep USB endpoint
2462 * @ret rc Return status code
2463 */
2464 static int xhci_endpoint_mtu ( struct usb_endpoint *ep ) {
2465 struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
2466 struct xhci_slot *slot = endpoint->slot;
2467 struct xhci_device *xhci = slot->xhci;
2468 int rc;
2469
2470 /* Evalulate context */
2471 if ( ( rc = xhci_evaluate_context ( xhci, slot, endpoint ) ) != 0 )
2472 return rc;
2473
2474 return 0;
2475 }
2476
2477 /**
2478 * Enqueue message transfer
2479 *
2480 * @v ep USB endpoint
2481 * @v iobuf I/O buffer
2482 * @ret rc Return status code
2483 */
2484 static int xhci_endpoint_message ( struct usb_endpoint *ep,
2485 struct io_buffer *iobuf ) {
2486 struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
2487 struct usb_setup_packet *packet;
2488 unsigned int input;
2489 size_t len;
2490 union xhci_trb trbs[ 1 /* setup */ + 1 /* possible data */ +
2491 1 /* status */ ];
2492 union xhci_trb *trb = trbs;
2493 struct xhci_trb_setup *setup;
2494 struct xhci_trb_data *data;
2495 struct xhci_trb_status *status;
2496 int rc;
2497
2498 /* Profile message transfers */
2499 profile_start ( &xhci_message_profiler );
2500
2501 /* Construct setup stage TRB */
2502 memset ( trbs, 0, sizeof ( trbs ) );
2503 assert ( iob_len ( iobuf ) >= sizeof ( *packet ) );
2504 packet = iobuf->data;
2505 iob_pull ( iobuf, sizeof ( *packet ) );
2506 setup = &(trb++)->setup;
2507 memcpy ( &setup->packet, packet, sizeof ( setup->packet ) );
2508 setup->len = cpu_to_le32 ( sizeof ( *packet ) );
2509 setup->flags = XHCI_TRB_IDT;
2510 setup->type = XHCI_TRB_SETUP;
2511 len = iob_len ( iobuf );
2512 input = ( packet->request & cpu_to_le16 ( USB_DIR_IN ) );
2513 if ( len )
2514 setup->direction = ( input ? XHCI_SETUP_IN : XHCI_SETUP_OUT );
2515
2516 /* Construct data stage TRB, if applicable */
2517 if ( len ) {
2518 data = &(trb++)->data;
2519 data->data = cpu_to_le64 ( virt_to_phys ( iobuf->data ) );
2520 data->len = cpu_to_le32 ( len );
2521 data->type = XHCI_TRB_DATA;
2522 data->direction = ( input ? XHCI_DATA_IN : XHCI_DATA_OUT );
2523 }
2524
2525 /* Construct status stage TRB */
2526 status = &(trb++)->status;
2527 status->flags = XHCI_TRB_IOC;
2528 status->type = XHCI_TRB_STATUS;
2529 status->direction =
2530 ( ( len && input ) ? XHCI_STATUS_OUT : XHCI_STATUS_IN );
2531
2532 /* Enqueue TRBs */
2533 if ( ( rc = xhci_enqueue_multi ( &endpoint->ring, iobuf, trbs,
2534 ( trb - trbs ) ) ) != 0 )
2535 return rc;
2536
2537 /* Ring the doorbell */
2538 xhci_doorbell ( &endpoint->ring );
2539
2540 profile_stop ( &xhci_message_profiler );
2541 return 0;
2542 }
2543
2544 /**
2545 * Enqueue stream transfer
2546 *
2547 * @v ep USB endpoint
2548 * @v iobuf I/O buffer
2549 * @v terminate Terminate using a short packet
2550 * @ret rc Return status code
2551 */
2552 static int xhci_endpoint_stream ( struct usb_endpoint *ep,
2553 struct io_buffer *iobuf, int terminate ) {
2554 struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
2555 union xhci_trb trbs[ 1 /* Normal */ + 1 /* Possible zero-length */ ];
2556 union xhci_trb *trb = trbs;
2557 struct xhci_trb_normal *normal;
2558 size_t len = iob_len ( iobuf );
2559 int rc;
2560
2561 /* Profile stream transfers */
2562 profile_start ( &xhci_stream_profiler );
2563
2564 /* Construct normal TRBs */
2565 memset ( &trbs, 0, sizeof ( trbs ) );
2566 normal = &(trb++)->normal;
2567 normal->data = cpu_to_le64 ( virt_to_phys ( iobuf->data ) );
2568 normal->len = cpu_to_le32 ( len );
2569 normal->type = XHCI_TRB_NORMAL;
2570 if ( terminate && ( ( len & ( ep->mtu - 1 ) ) == 0 ) ) {
2571 normal->flags = XHCI_TRB_CH;
2572 normal = &(trb++)->normal;
2573 normal->type = XHCI_TRB_NORMAL;
2574 }
2575 normal->flags = XHCI_TRB_IOC;
2576
2577 /* Enqueue TRBs */
2578 if ( ( rc = xhci_enqueue_multi ( &endpoint->ring, iobuf, trbs,
2579 ( trb - trbs ) ) ) != 0 )
2580 return rc;
2581
2582 /* Ring the doorbell */
2583 xhci_doorbell ( &endpoint->ring );
2584
2585 profile_stop ( &xhci_stream_profiler );
2586 return 0;
2587 }
2588
2589 /******************************************************************************
2590 *
2591 * Device operations
2592 *
2593 ******************************************************************************
2594 */
2595
2596 /**
2597 * Open device
2598 *
2599 * @v usb USB device
2600 * @ret rc Return status code
2601 */
2602 static int xhci_device_open ( struct usb_device *usb ) {
2603 struct xhci_device *xhci = usb_bus_get_hostdata ( usb->port->hub->bus );
2604 struct usb_port *tt = usb_transaction_translator ( usb );
2605 struct xhci_slot *slot;
2606 struct xhci_slot *tt_slot;
2607 size_t len;
2608 int type;
2609 int id;
2610 int rc;
2611
2612 /* Determine applicable slot type */
2613 type = xhci_port_slot_type ( xhci, usb->port->address );
2614 if ( type < 0 ) {
2615 rc = type;
2616 DBGC ( xhci, "XHCI %s-%d has no slot type\n",
2617 xhci->name, usb->port->address );
2618 goto err_type;
2619 }
2620
2621 /* Allocate a device slot number */
2622 id = xhci_enable_slot ( xhci, type );
2623 if ( id < 0 ) {
2624 rc = id;
2625 goto err_enable_slot;
2626 }
2627 assert ( ( id > 0 ) && ( ( unsigned int ) id <= xhci->slots ) );
2628 assert ( xhci->slot[id] == NULL );
2629
2630 /* Allocate and initialise structure */
2631 slot = zalloc ( sizeof ( *slot ) );
2632 if ( ! slot ) {
2633 rc = -ENOMEM;
2634 goto err_alloc;
2635 }
2636 usb_set_hostdata ( usb, slot );
2637 xhci->slot[id] = slot;
2638 slot->xhci = xhci;
2639 slot->usb = usb;
2640 slot->id = id;
2641 if ( tt ) {
2642 tt_slot = usb_get_hostdata ( tt->hub->usb );
2643 slot->tt_id = tt_slot->id;
2644 slot->tt_port = tt->address;
2645 }
2646
2647 /* Allocate a device context */
2648 len = xhci_device_context_offset ( xhci, XHCI_CTX_END );
2649 slot->context = malloc_dma ( len, xhci_align ( len ) );
2650 if ( ! slot->context ) {
2651 rc = -ENOMEM;
2652 goto err_alloc_context;
2653 }
2654 memset ( slot->context, 0, len );
2655
2656 /* Set device context base address */
2657 assert ( xhci->dcbaa[id] == 0 );
2658 xhci->dcbaa[id] = cpu_to_le64 ( virt_to_phys ( slot->context ) );
2659
2660 DBGC2 ( xhci, "XHCI %s slot %d device context [%08lx,%08lx) for %s\n",
2661 xhci->name, slot->id, virt_to_phys ( slot->context ),
2662 ( virt_to_phys ( slot->context ) + len ), usb->name );
2663 return 0;
2664
2665 xhci->dcbaa[id] = 0;
2666 free_dma ( slot->context, len );
2667 err_alloc_context:
2668 xhci->slot[id] = NULL;
2669 free ( slot );
2670 err_alloc:
2671 xhci_disable_slot ( xhci, id );
2672 err_enable_slot:
2673 err_type:
2674 return rc;
2675 }
2676
2677 /**
2678 * Close device
2679 *
2680 * @v usb USB device
2681 */
2682 static void xhci_device_close ( struct usb_device *usb ) {
2683 struct xhci_slot *slot = usb_get_hostdata ( usb );
2684 struct xhci_device *xhci = slot->xhci;
2685 size_t len = xhci_device_context_offset ( xhci, XHCI_CTX_END );
2686 unsigned int id = slot->id;
2687 int rc;
2688
2689 /* Disable slot */
2690 if ( ( rc = xhci_disable_slot ( xhci, id ) ) != 0 ) {
2691 /* Slot is still enabled. Leak the slot context,
2692 * since the controller may still write to this
2693 * memory, and leave the DCBAA entry intact.
2694 *
2695 * If the controller later reports that this same slot
2696 * has been re-enabled, then some assertions will be
2697 * triggered.
2698 */
2699 DBGC ( xhci, "XHCI %s slot %d leaking context memory\n",
2700 xhci->name, slot->id );
2701 slot->context = NULL;
2702 }
2703
2704 /* Free slot */
2705 if ( slot->context ) {
2706 free_dma ( slot->context, len );
2707 xhci->dcbaa[id] = 0;
2708 }
2709 xhci->slot[id] = NULL;
2710 free ( slot );
2711 }
2712
2713 /**
2714 * Assign device address
2715 *
2716 * @v usb USB device
2717 * @ret rc Return status code
2718 */
2719 static int xhci_device_address ( struct usb_device *usb ) {
2720 struct xhci_slot *slot = usb_get_hostdata ( usb );
2721 struct xhci_device *xhci = slot->xhci;
2722 struct usb_port *port = usb->port;
2723 struct usb_port *root_port;
2724 int psiv;
2725 int rc;
2726
2727 /* Calculate route string */
2728 slot->route = usb_route_string ( usb );
2729
2730 /* Calculate root hub port number */
2731 root_port = usb_root_hub_port ( usb );
2732 slot->port = root_port->address;
2733
2734 /* Calculate protocol speed ID */
2735 psiv = xhci_port_psiv ( xhci, slot->port, port->speed );
2736 if ( psiv < 0 ) {
2737 rc = psiv;
2738 return rc;
2739 }
2740 slot->psiv = psiv;
2741
2742 /* Address device */
2743 if ( ( rc = xhci_address_device ( xhci, slot ) ) != 0 )
2744 return rc;
2745
2746 return 0;
2747 }
2748
2749 /******************************************************************************
2750 *
2751 * Bus operations
2752 *
2753 ******************************************************************************
2754 */
2755
2756 /**
2757 * Open USB bus
2758 *
2759 * @v bus USB bus
2760 * @ret rc Return status code
2761 */
2762 static int xhci_bus_open ( struct usb_bus *bus ) {
2763 struct xhci_device *xhci = usb_bus_get_hostdata ( bus );
2764 int rc;
2765
2766 /* Allocate device slot array */
2767 xhci->slot = zalloc ( ( xhci->slots + 1 ) * sizeof ( xhci->slot[0] ) );
2768 if ( ! xhci->slot ) {
2769 rc = -ENOMEM;
2770 goto err_slot_alloc;
2771 }
2772
2773 /* Allocate device context base address array */
2774 if ( ( rc = xhci_dcbaa_alloc ( xhci ) ) != 0 )
2775 goto err_dcbaa_alloc;
2776
2777 /* Allocate scratchpad buffers */
2778 if ( ( rc = xhci_scratchpad_alloc ( xhci ) ) != 0 )
2779 goto err_scratchpad_alloc;
2780
2781 /* Allocate command ring */
2782 if ( ( rc = xhci_command_alloc ( xhci ) ) != 0 )
2783 goto err_command_alloc;
2784
2785 /* Allocate event ring */
2786 if ( ( rc = xhci_event_alloc ( xhci ) ) != 0 )
2787 goto err_event_alloc;
2788
2789 /* Start controller */
2790 xhci_run ( xhci );
2791
2792 return 0;
2793
2794 xhci_stop ( xhci );
2795 xhci_event_free ( xhci );
2796 err_event_alloc:
2797 xhci_command_free ( xhci );
2798 err_command_alloc:
2799 xhci_scratchpad_free ( xhci );
2800 err_scratchpad_alloc:
2801 xhci_dcbaa_free ( xhci );
2802 err_dcbaa_alloc:
2803 free ( xhci->slot );
2804 err_slot_alloc:
2805 return rc;
2806 }
2807
2808 /**
2809 * Close USB bus
2810 *
2811 * @v bus USB bus
2812 */
2813 static void xhci_bus_close ( struct usb_bus *bus ) {
2814 struct xhci_device *xhci = usb_bus_get_hostdata ( bus );
2815 unsigned int i;
2816
2817 /* Sanity checks */
2818 assert ( xhci->slot != NULL );
2819 for ( i = 0 ; i <= xhci->slots ; i++ )
2820 assert ( xhci->slot[i] == NULL );
2821
2822 xhci_stop ( xhci );
2823 xhci_event_free ( xhci );
2824 xhci_command_free ( xhci );
2825 xhci_scratchpad_free ( xhci );
2826 xhci_dcbaa_free ( xhci );
2827 free ( xhci->slot );
2828 }
2829
2830 /**
2831 * Poll USB bus
2832 *
2833 * @v bus USB bus
2834 */
2835 static void xhci_bus_poll ( struct usb_bus *bus ) {
2836 struct xhci_device *xhci = usb_bus_get_hostdata ( bus );
2837
2838 /* Poll event ring */
2839 xhci_event_poll ( xhci );
2840 }
2841
2842 /******************************************************************************
2843 *
2844 * Hub operations
2845 *
2846 ******************************************************************************
2847 */
2848
2849 /**
2850 * Open hub
2851 *
2852 * @v hub USB hub
2853 * @ret rc Return status code
2854 */
2855 static int xhci_hub_open ( struct usb_hub *hub ) {
2856 struct xhci_slot *slot;
2857
2858 /* Do nothing if this is the root hub */
2859 if ( ! hub->usb )
2860 return 0;
2861
2862 /* Get device slot */
2863 slot = usb_get_hostdata ( hub->usb );
2864
2865 /* Update device slot hub parameters. We don't inform the
2866 * hardware of this information until the hub's interrupt
2867 * endpoint is opened, since the only mechanism for so doing
2868 * provided by the xHCI specification is a Configure Endpoint
2869 * command, and we can't issue that command until we have a
2870 * non-EP0 endpoint to configure.
2871 */
2872 slot->ports = hub->ports;
2873
2874 return 0;
2875 }
2876
2877 /**
2878 * Close hub
2879 *
2880 * @v hub USB hub
2881 */
2882 static void xhci_hub_close ( struct usb_hub *hub __unused ) {
2883
2884 /* Nothing to do */
2885 }
2886
2887 /******************************************************************************
2888 *
2889 * Root hub operations
2890 *
2891 ******************************************************************************
2892 */
2893
2894 /**
2895 * Open root hub
2896 *
2897 * @v hub USB hub
2898 * @ret rc Return status code
2899 */
2900 static int xhci_root_open ( struct usb_hub *hub ) {
2901 struct usb_bus *bus = hub->bus;
2902 struct xhci_device *xhci = usb_bus_get_hostdata ( bus );
2903 struct usb_port *port;
2904 uint32_t portsc;
2905 unsigned int i;
2906
2907 /* Enable power to all ports */
2908 for ( i = 1 ; i <= xhci->ports ; i++ ) {
2909 portsc = readl ( xhci->op + XHCI_OP_PORTSC ( i ) );
2910 portsc &= XHCI_PORTSC_PRESERVE;
2911 portsc |= XHCI_PORTSC_PP;
2912 writel ( portsc, xhci->op + XHCI_OP_PORTSC ( i ) );
2913 }
2914
2915 /* xHCI spec requires us to potentially wait 20ms after
2916 * enabling power to a port.
2917 */
2918 mdelay ( XHCI_PORT_POWER_DELAY_MS );
2919
2920 /* USB3 ports may power up as Disabled */
2921 for ( i = 1 ; i <= xhci->ports ; i++ ) {
2922 portsc = readl ( xhci->op + XHCI_OP_PORTSC ( i ) );
2923 port = usb_port ( hub, i );
2924 if ( ( port->protocol >= USB_PROTO_3_0 ) &&
2925 ( ( portsc & XHCI_PORTSC_PLS_MASK ) ==
2926 XHCI_PORTSC_PLS_DISABLED ) ) {
2927 /* Force link state to RxDetect */
2928 portsc &= XHCI_PORTSC_PRESERVE;
2929 portsc |= ( XHCI_PORTSC_PLS_RXDETECT | XHCI_PORTSC_LWS);
2930 writel ( portsc, xhci->op + XHCI_OP_PORTSC ( i ) );
2931 }
2932 }
2933
2934 /* Some xHCI cards seem to require an additional delay after
2935 * setting the link state to RxDetect.
2936 */
2937 mdelay ( XHCI_LINK_STATE_DELAY_MS );
2938
2939 /* Record hub driver private data */
2940 usb_hub_set_drvdata ( hub, xhci );
2941
2942 return 0;
2943 }
2944
2945 /**
2946 * Close root hub
2947 *
2948 * @v hub USB hub
2949 */
2950 static void xhci_root_close ( struct usb_hub *hub ) {
2951
2952 /* Clear hub driver private data */
2953 usb_hub_set_drvdata ( hub, NULL );
2954 }
2955
2956 /**
2957 * Enable port
2958 *
2959 * @v hub USB hub
2960 * @v port USB port
2961 * @ret rc Return status code
2962 */
2963 static int xhci_root_enable ( struct usb_hub *hub, struct usb_port *port ) {
2964 struct xhci_device *xhci = usb_hub_get_drvdata ( hub );
2965 uint32_t portsc;
2966 unsigned int i;
2967
2968 /* Reset port */
2969 portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port->address ) );
2970 portsc &= XHCI_PORTSC_PRESERVE;
2971 portsc |= XHCI_PORTSC_PR;
2972 writel ( portsc, xhci->op + XHCI_OP_PORTSC ( port->address ) );
2973
2974 /* Wait for port to become enabled */
2975 for ( i = 0 ; i < XHCI_PORT_RESET_MAX_WAIT_MS ; i++ ) {
2976
2977 /* Check port status */
2978 portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port->address ) );
2979 if ( portsc & XHCI_PORTSC_PED )
2980 return 0;
2981
2982 /* Delay */
2983 mdelay ( 1 );
2984 }
2985
2986 DBGC ( xhci, "XHCI %s-%d timed out waiting for port to enable\n",
2987 xhci->name, port->address );
2988 return -ETIMEDOUT;
2989 }
2990
2991 /**
2992 * Disable port
2993 *
2994 * @v hub USB hub
2995 * @v port USB port
2996 * @ret rc Return status code
2997 */
2998 static int xhci_root_disable ( struct usb_hub *hub, struct usb_port *port ) {
2999 struct xhci_device *xhci = usb_hub_get_drvdata ( hub );
3000 uint32_t portsc;
3001
3002 /* Disable port */
3003 portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port->address ) );
3004 portsc &= XHCI_PORTSC_PRESERVE;
3005 portsc |= XHCI_PORTSC_PED;
3006 writel ( portsc, xhci->op + XHCI_OP_PORTSC ( port->address ) );
3007
3008 return 0;
3009 }
3010
3011 /**
3012 * Update root hub port speed
3013 *
3014 * @v hub USB hub
3015 * @v port USB port
3016 * @ret rc Return status code
3017 */
3018 static int xhci_root_speed ( struct usb_hub *hub, struct usb_port *port ) {
3019 struct xhci_device *xhci = usb_hub_get_drvdata ( hub );
3020 uint32_t portsc;
3021 unsigned int psiv;
3022 int ccs;
3023 int ped;
3024 int csc;
3025 int speed;
3026 int rc;
3027
3028 /* Read port status */
3029 portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port->address ) );
3030 DBGC2 ( xhci, "XHCI %s-%d status is %08x\n",
3031 xhci->name, port->address, portsc );
3032 ccs = ( portsc & XHCI_PORTSC_CCS );
3033 ped = ( portsc & XHCI_PORTSC_PED );
3034 csc = ( portsc & XHCI_PORTSC_CSC );
3035 psiv = XHCI_PORTSC_PSIV ( portsc );
3036
3037 /* Record disconnections and clear changes */
3038 port->disconnected |= csc;
3039 portsc &= ( XHCI_PORTSC_PRESERVE | XHCI_PORTSC_CHANGE );
3040 writel ( portsc, xhci->op + XHCI_OP_PORTSC ( port->address ) );
3041
3042 /* Port speed is not valid unless port is connected */
3043 if ( ! ccs ) {
3044 port->speed = USB_SPEED_NONE;
3045 return 0;
3046 }
3047
3048 /* For USB2 ports, the PSIV field is not valid until the port
3049 * completes reset and becomes enabled.
3050 */
3051 if ( ( port->protocol < USB_PROTO_3_0 ) && ! ped ) {
3052 port->speed = USB_SPEED_FULL;
3053 return 0;
3054 }
3055
3056 /* Get port speed and map to generic USB speed */
3057 speed = xhci_port_speed ( xhci, port->address, psiv );
3058 if ( speed < 0 ) {
3059 rc = speed;
3060 return rc;
3061 }
3062
3063 port->speed = speed;
3064 return 0;
3065 }
3066
3067 /**
3068 * Clear transaction translator buffer
3069 *
3070 * @v hub USB hub
3071 * @v port USB port
3072 * @v ep USB endpoint
3073 * @ret rc Return status code
3074 */
3075 static int xhci_root_clear_tt ( struct usb_hub *hub, struct usb_port *port,
3076 struct usb_endpoint *ep ) {
3077 struct xhci_device *xhci = usb_hub_get_drvdata ( hub );
3078
3079 /* Should never be called; this is a root hub */
3080 DBGC ( xhci, "XHCI %s-%d nonsensical CLEAR_TT for %s %s\n", xhci->name,
3081 port->address, ep->usb->name, usb_endpoint_name ( ep ) );
3082
3083 return -ENOTSUP;
3084 }
3085
3086 /******************************************************************************
3087 *
3088 * PCI interface
3089 *
3090 ******************************************************************************
3091 */
3092
3093 /** USB host controller operations */
3094 static struct usb_host_operations xhci_operations = {
3095 .endpoint = {
3096 .open = xhci_endpoint_open,
3097 .close = xhci_endpoint_close,
3098 .reset = xhci_endpoint_reset,
3099 .mtu = xhci_endpoint_mtu,
3100 .message = xhci_endpoint_message,
3101 .stream = xhci_endpoint_stream,
3102 },
3103 .device = {
3104 .open = xhci_device_open,
3105 .close = xhci_device_close,
3106 .address = xhci_device_address,
3107 },
3108 .bus = {
3109 .open = xhci_bus_open,
3110 .close = xhci_bus_close,
3111 .poll = xhci_bus_poll,
3112 },
3113 .hub = {
3114 .open = xhci_hub_open,
3115 .close = xhci_hub_close,
3116 },
3117 .root = {
3118 .open = xhci_root_open,
3119 .close = xhci_root_close,
3120 .enable = xhci_root_enable,
3121 .disable = xhci_root_disable,
3122 .speed = xhci_root_speed,
3123 .clear_tt = xhci_root_clear_tt,
3124 },
3125 };
3126
3127 /**
3128 * Fix Intel PCH-specific quirks
3129 *
3130 * @v xhci xHCI device
3131 * @v pci PCI device
3132 */
3133 static void xhci_pch_fix ( struct xhci_device *xhci, struct pci_device *pci ) {
3134 struct xhci_pch *pch = &xhci->pch;
3135 uint32_t xusb2pr;
3136 uint32_t xusb2prm;
3137 uint32_t usb3pssen;
3138 uint32_t usb3prm;
3139
3140 /* Enable SuperSpeed capability. Do this before rerouting
3141 * USB2 ports, so that USB3 devices connect at SuperSpeed.
3142 */
3143 pci_read_config_dword ( pci, XHCI_PCH_USB3PSSEN, &usb3pssen );
3144 pci_read_config_dword ( pci, XHCI_PCH_USB3PRM, &usb3prm );
3145 if ( usb3prm & ~usb3pssen ) {
3146 DBGC ( xhci, "XHCI %s enabling SuperSpeed on ports %08x\n",
3147 xhci->name, ( usb3prm & ~usb3pssen ) );
3148 }
3149 pch->usb3pssen = usb3pssen;
3150 usb3pssen |= usb3prm;
3151 pci_write_config_dword ( pci, XHCI_PCH_USB3PSSEN, usb3pssen );
3152
3153 /* Route USB2 ports from EHCI to xHCI */
3154 pci_read_config_dword ( pci, XHCI_PCH_XUSB2PR, &xusb2pr );
3155 pci_read_config_dword ( pci, XHCI_PCH_XUSB2PRM, &xusb2prm );
3156 if ( xusb2prm & ~xusb2pr ) {
3157 DBGC ( xhci, "XHCI %s routing ports %08x from EHCI to xHCI\n",
3158 xhci->name, ( xusb2prm & ~xusb2pr ) );
3159 }
3160 pch->xusb2pr = xusb2pr;
3161 xusb2pr |= xusb2prm;
3162 pci_write_config_dword ( pci, XHCI_PCH_XUSB2PR, xusb2pr );
3163 }
3164
3165 /**
3166 * Undo Intel PCH-specific quirk fixes
3167 *
3168 * @v xhci xHCI device
3169 * @v pci PCI device
3170 */
3171 static void xhci_pch_undo ( struct xhci_device *xhci, struct pci_device *pci ) {
3172 struct xhci_pch *pch = &xhci->pch;
3173
3174 /* Restore USB2 port routing to original state */
3175 pci_write_config_dword ( pci, XHCI_PCH_XUSB2PR, pch->xusb2pr );
3176
3177 /* Restore SuperSpeed capability to original state */
3178 pci_write_config_dword ( pci, XHCI_PCH_USB3PSSEN, pch->usb3pssen );
3179 }
3180
3181 /**
3182 * Probe PCI device
3183 *
3184 * @v pci PCI device
3185 * @ret rc Return status code
3186 */
3187 static int xhci_probe ( struct pci_device *pci ) {
3188 struct xhci_device *xhci;
3189 struct usb_port *port;
3190 unsigned long bar_start;
3191 size_t bar_size;
3192 unsigned int i;
3193 int rc;
3194
3195 /* Allocate and initialise structure */
3196 xhci = zalloc ( sizeof ( *xhci ) );
3197 if ( ! xhci ) {
3198 rc = -ENOMEM;
3199 goto err_alloc;
3200 }
3201 xhci->name = pci->dev.name;
3202 xhci->quirks = pci->id->driver_data;
3203
3204 /* Fix up PCI device */
3205 adjust_pci_device ( pci );
3206
3207 /* Map registers */
3208 bar_start = pci_bar_start ( pci, XHCI_BAR );
3209 bar_size = pci_bar_size ( pci, XHCI_BAR );
3210 xhci->regs = ioremap ( bar_start, bar_size );
3211 if ( ! xhci->regs ) {
3212 rc = -ENODEV;
3213 goto err_ioremap;
3214 }
3215
3216 /* Initialise xHCI device */
3217 xhci_init ( xhci, xhci->regs );
3218
3219 /* Initialise USB legacy support and claim ownership */
3220 xhci_legacy_init ( xhci );
3221 xhci_legacy_claim ( xhci );
3222
3223 /* Fix Intel PCH-specific quirks, if applicable */
3224 if ( xhci->quirks & XHCI_PCH )
3225 xhci_pch_fix ( xhci, pci );
3226
3227 /* Reset device */
3228 if ( ( rc = xhci_reset ( xhci ) ) != 0 )
3229 goto err_reset;
3230
3231 /* Allocate USB bus */
3232 xhci->bus = alloc_usb_bus ( &pci->dev, xhci->ports, XHCI_MTU,
3233 &xhci_operations );
3234 if ( ! xhci->bus ) {
3235 rc = -ENOMEM;
3236 goto err_alloc_bus;
3237 }
3238 usb_bus_set_hostdata ( xhci->bus, xhci );
3239 usb_hub_set_drvdata ( xhci->bus->hub, xhci );
3240
3241 /* Set port protocols */
3242 for ( i = 1 ; i <= xhci->ports ; i++ ) {
3243 port = usb_port ( xhci->bus->hub, i );
3244 port->protocol = xhci_port_protocol ( xhci, i );
3245 }
3246
3247 /* Register USB bus */
3248 if ( ( rc = register_usb_bus ( xhci->bus ) ) != 0 )
3249 goto err_register;
3250
3251 pci_set_drvdata ( pci, xhci );
3252 return 0;
3253
3254 unregister_usb_bus ( xhci->bus );
3255 err_register:
3256 free_usb_bus ( xhci->bus );
3257 err_alloc_bus:
3258 xhci_reset ( xhci );
3259 err_reset:
3260 if ( xhci->quirks & XHCI_PCH )
3261 xhci_pch_undo ( xhci, pci );
3262 xhci_legacy_release ( xhci );
3263 iounmap ( xhci->regs );
3264 err_ioremap:
3265 free ( xhci );
3266 err_alloc:
3267 return rc;
3268 }
3269
3270 /**
3271 * Remove PCI device
3272 *
3273 * @v pci PCI device
3274 */
3275 static void xhci_remove ( struct pci_device *pci ) {
3276 struct xhci_device *xhci = pci_get_drvdata ( pci );
3277 struct usb_bus *bus = xhci->bus;
3278
3279 unregister_usb_bus ( bus );
3280 free_usb_bus ( bus );
3281 xhci_reset ( xhci );
3282 if ( xhci->quirks & XHCI_PCH )
3283 xhci_pch_undo ( xhci, pci );
3284 xhci_legacy_release ( xhci );
3285 iounmap ( xhci->regs );
3286 free ( xhci );
3287 }
3288
3289 /** XHCI PCI device IDs */
3290 static struct pci_device_id xhci_ids[] = {
3291 PCI_ROM ( 0x8086, 0x9d2f, "xhci-skylake", "xHCI (Skylake)", ( XHCI_PCH | XHCI_BAD_PSIV ) ),
3292 PCI_ROM ( 0x8086, 0xffff, "xhci-pch", "xHCI (Intel PCH)", XHCI_PCH ),
3293 PCI_ROM ( 0xffff, 0xffff, "xhci", "xHCI", 0 ),
3294 };
3295
3296 /** XHCI PCI driver */
3297 struct pci_driver xhci_driver __pci_driver = {
3298 .ids = xhci_ids,
3299 .id_count = ( sizeof ( xhci_ids ) / sizeof ( xhci_ids[0] ) ),
3300 .class = PCI_CLASS_ID ( PCI_CLASS_SERIAL, PCI_CLASS_SERIAL_USB,
3301 PCI_CLASS_SERIAL_USB_XHCI ),
3302 .probe = xhci_probe,
3303 .remove = xhci_remove,
3304 };
3305
3306 /**
3307 * Prepare for exit
3308 *
3309 * @v booting System is shutting down for OS boot
3310 */
3311 static void xhci_shutdown ( int booting ) {
3312 /* If we are shutting down to boot an OS, then prevent the
3313 * release of ownership back to BIOS.
3314 */
3315 xhci_legacy_prevent_release = booting;
3316 }
3317
3318 /** Startup/shutdown function */
3319 struct startup_fn xhci_startup __startup_fn ( STARTUP_LATE ) = {
3320 .shutdown = xhci_shutdown,
3321 };