[xhci] Leak memory if controller fails to disable slot
[ipxe.git] / src / drivers / usb / xhci.c
1 /*
2 * Copyright (C) 2014 Michael Brown <mbrown@fensystems.co.uk>.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA.
18 */
19
20 FILE_LICENCE ( GPL2_OR_LATER );
21
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <unistd.h>
25 #include <string.h>
26 #include <strings.h>
27 #include <errno.h>
28 #include <byteswap.h>
29 #include <ipxe/malloc.h>
30 #include <ipxe/umalloc.h>
31 #include <ipxe/pci.h>
32 #include <ipxe/usb.h>
33 #include <ipxe/profile.h>
34 #include "xhci.h"
35
36 /** @file
37 *
38 * USB eXtensible Host Controller Interface (xHCI) driver
39 *
40 */
41
42 /** Message transfer profiler */
43 static struct profiler xhci_message_profiler __profiler =
44 { .name = "xhci.message" };
45
46 /** Stream transfer profiler */
47 static struct profiler xhci_stream_profiler __profiler =
48 { .name = "xhci.stream" };
49
50 /** Event ring profiler */
51 static struct profiler xhci_event_profiler __profiler =
52 { .name = "xhci.event" };
53
54 /** Transfer event profiler */
55 static struct profiler xhci_transfer_profiler __profiler =
56 { .name = "xhci.transfer" };
57
58 /* Disambiguate the various error causes */
59 #define EIO_DATA \
60 __einfo_error ( EINFO_EIO_DATA )
61 #define EINFO_EIO_DATA \
62 __einfo_uniqify ( EINFO_EIO, ( 2 - 0 ), \
63 "Data buffer error" )
64 #define EIO_BABBLE \
65 __einfo_error ( EINFO_EIO_BABBLE )
66 #define EINFO_EIO_BABBLE \
67 __einfo_uniqify ( EINFO_EIO, ( 3 - 0 ), \
68 "Babble detected" )
69 #define EIO_USB \
70 __einfo_error ( EINFO_EIO_USB )
71 #define EINFO_EIO_USB \
72 __einfo_uniqify ( EINFO_EIO, ( 4 - 0 ), \
73 "USB transaction error" )
74 #define EIO_TRB \
75 __einfo_error ( EINFO_EIO_TRB )
76 #define EINFO_EIO_TRB \
77 __einfo_uniqify ( EINFO_EIO, ( 5 - 0 ), \
78 "TRB error" )
79 #define EIO_STALL \
80 __einfo_error ( EINFO_EIO_STALL )
81 #define EINFO_EIO_STALL \
82 __einfo_uniqify ( EINFO_EIO, ( 6 - 0 ), \
83 "Stall error" )
84 #define EIO_RESOURCE \
85 __einfo_error ( EINFO_EIO_RESOURCE )
86 #define EINFO_EIO_RESOURCE \
87 __einfo_uniqify ( EINFO_EIO, ( 7 - 0 ), \
88 "Resource error" )
89 #define EIO_BANDWIDTH \
90 __einfo_error ( EINFO_EIO_BANDWIDTH )
91 #define EINFO_EIO_BANDWIDTH \
92 __einfo_uniqify ( EINFO_EIO, ( 8 - 0 ), \
93 "Bandwidth error" )
94 #define EIO_NO_SLOTS \
95 __einfo_error ( EINFO_EIO_NO_SLOTS )
96 #define EINFO_EIO_NO_SLOTS \
97 __einfo_uniqify ( EINFO_EIO, ( 9 - 0 ), \
98 "No slots available" )
99 #define EIO_STREAM_TYPE \
100 __einfo_error ( EINFO_EIO_STREAM_TYPE )
101 #define EINFO_EIO_STREAM_TYPE \
102 __einfo_uniqify ( EINFO_EIO, ( 10 - 0 ), \
103 "Invalid stream type" )
104 #define EIO_SLOT \
105 __einfo_error ( EINFO_EIO_SLOT )
106 #define EINFO_EIO_SLOT \
107 __einfo_uniqify ( EINFO_EIO, ( 11 - 0 ), \
108 "Slot not enabled" )
109 #define EIO_ENDPOINT \
110 __einfo_error ( EINFO_EIO_ENDPOINT )
111 #define EINFO_EIO_ENDPOINT \
112 __einfo_uniqify ( EINFO_EIO, ( 12 - 0 ), \
113 "Endpoint not enabled" )
114 #define EIO_SHORT \
115 __einfo_error ( EINFO_EIO_SHORT )
116 #define EINFO_EIO_SHORT \
117 __einfo_uniqify ( EINFO_EIO, ( 13 - 0 ), \
118 "Short packet" )
119 #define EIO_UNDERRUN \
120 __einfo_error ( EINFO_EIO_UNDERRUN )
121 #define EINFO_EIO_UNDERRUN \
122 __einfo_uniqify ( EINFO_EIO, ( 14 - 0 ), \
123 "Ring underrun" )
124 #define EIO_OVERRUN \
125 __einfo_error ( EINFO_EIO_OVERRUN )
126 #define EINFO_EIO_OVERRUN \
127 __einfo_uniqify ( EINFO_EIO, ( 15 - 0 ), \
128 "Ring overrun" )
129 #define EIO_VF_RING_FULL \
130 __einfo_error ( EINFO_EIO_VF_RING_FULL )
131 #define EINFO_EIO_VF_RING_FULL \
132 __einfo_uniqify ( EINFO_EIO, ( 16 - 0 ), \
133 "Virtual function event ring full" )
134 #define EIO_PARAMETER \
135 __einfo_error ( EINFO_EIO_PARAMETER )
136 #define EINFO_EIO_PARAMETER \
137 __einfo_uniqify ( EINFO_EIO, ( 17 - 0 ), \
138 "Parameter error" )
139 #define EIO_BANDWIDTH_OVERRUN \
140 __einfo_error ( EINFO_EIO_BANDWIDTH_OVERRUN )
141 #define EINFO_EIO_BANDWIDTH_OVERRUN \
142 __einfo_uniqify ( EINFO_EIO, ( 18 - 0 ), \
143 "Bandwidth overrun" )
144 #define EIO_CONTEXT \
145 __einfo_error ( EINFO_EIO_CONTEXT )
146 #define EINFO_EIO_CONTEXT \
147 __einfo_uniqify ( EINFO_EIO, ( 19 - 0 ), \
148 "Context state error" )
149 #define EIO_NO_PING \
150 __einfo_error ( EINFO_EIO_NO_PING )
151 #define EINFO_EIO_NO_PING \
152 __einfo_uniqify ( EINFO_EIO, ( 20 - 0 ), \
153 "No ping response" )
154 #define EIO_RING_FULL \
155 __einfo_error ( EINFO_EIO_RING_FULL )
156 #define EINFO_EIO_RING_FULL \
157 __einfo_uniqify ( EINFO_EIO, ( 21 - 0 ), \
158 "Event ring full" )
159 #define EIO_INCOMPATIBLE \
160 __einfo_error ( EINFO_EIO_INCOMPATIBLE )
161 #define EINFO_EIO_INCOMPATIBLE \
162 __einfo_uniqify ( EINFO_EIO, ( 22 - 0 ), \
163 "Incompatible device" )
164 #define EIO_MISSED \
165 __einfo_error ( EINFO_EIO_MISSED )
166 #define EINFO_EIO_MISSED \
167 __einfo_uniqify ( EINFO_EIO, ( 23 - 0 ), \
168 "Missed service error" )
169 #define EIO_CMD_STOPPED \
170 __einfo_error ( EINFO_EIO_CMD_STOPPED )
171 #define EINFO_EIO_CMD_STOPPED \
172 __einfo_uniqify ( EINFO_EIO, ( 24 - 0 ), \
173 "Command ring stopped" )
174 #define EIO_CMD_ABORTED \
175 __einfo_error ( EINFO_EIO_CMD_ABORTED )
176 #define EINFO_EIO_CMD_ABORTED \
177 __einfo_uniqify ( EINFO_EIO, ( 25 - 0 ), \
178 "Command aborted" )
179 #define EIO_STOP \
180 __einfo_error ( EINFO_EIO_STOP )
181 #define EINFO_EIO_STOP \
182 __einfo_uniqify ( EINFO_EIO, ( 26 - 0 ), \
183 "Stopped" )
184 #define EIO_STOP_LEN \
185 __einfo_error ( EINFO_EIO_STOP_LEN )
186 #define EINFO_EIO_STOP_LEN \
187 __einfo_uniqify ( EINFO_EIO, ( 27 - 0 ), \
188 "Stopped - length invalid" )
189 #define EIO_STOP_SHORT \
190 __einfo_error ( EINFO_EIO_STOP_SHORT )
191 #define EINFO_EIO_STOP_SHORT \
192 __einfo_uniqify ( EINFO_EIO, ( 28 - 0 ), \
193 "Stopped - short packet" )
194 #define EIO_LATENCY \
195 __einfo_error ( EINFO_EIO_LATENCY )
196 #define EINFO_EIO_LATENCY \
197 __einfo_uniqify ( EINFO_EIO, ( 29 - 0 ), \
198 "Maximum exit latency too large" )
199 #define EIO_ISOCH \
200 __einfo_error ( EINFO_EIO_ISOCH )
201 #define EINFO_EIO_ISOCH \
202 __einfo_uniqify ( EINFO_EIO, ( 31 - 0 ), \
203 "Isochronous buffer overrun" )
204 #define EPROTO_LOST \
205 __einfo_error ( EINFO_EPROTO_LOST )
206 #define EINFO_EPROTO_LOST \
207 __einfo_uniqify ( EINFO_EPROTO, ( 32 - 32 ), \
208 "Event lost" )
209 #define EPROTO_UNDEFINED \
210 __einfo_error ( EINFO_EPROTO_UNDEFINED )
211 #define EINFO_EPROTO_UNDEFINED \
212 __einfo_uniqify ( EINFO_EPROTO, ( 33 - 32 ), \
213 "Undefined error" )
214 #define EPROTO_STREAM_ID \
215 __einfo_error ( EINFO_EPROTO_STREAM_ID )
216 #define EINFO_EPROTO_STREAM_ID \
217 __einfo_uniqify ( EINFO_EPROTO, ( 34 - 32 ), \
218 "Invalid stream ID" )
219 #define EPROTO_SECONDARY \
220 __einfo_error ( EINFO_EPROTO_SECONDARY )
221 #define EINFO_EPROTO_SECONDARY \
222 __einfo_uniqify ( EINFO_EPROTO, ( 35 - 32 ), \
223 "Secondary bandwidth error" )
224 #define EPROTO_SPLIT \
225 __einfo_error ( EINFO_EPROTO_SPLIT )
226 #define EINFO_EPROTO_SPLIT \
227 __einfo_uniqify ( EINFO_EPROTO, ( 36 - 32 ), \
228 "Split transaction error" )
229 #define ECODE(code) \
230 ( ( (code) < 32 ) ? \
231 EUNIQ ( EINFO_EIO, ( (code) & 31 ), EIO_DATA, EIO_BABBLE, \
232 EIO_USB, EIO_TRB, EIO_STALL, EIO_RESOURCE, \
233 EIO_BANDWIDTH, EIO_NO_SLOTS, EIO_STREAM_TYPE, \
234 EIO_SLOT, EIO_ENDPOINT, EIO_SHORT, EIO_UNDERRUN, \
235 EIO_OVERRUN, EIO_VF_RING_FULL, EIO_PARAMETER, \
236 EIO_BANDWIDTH_OVERRUN, EIO_CONTEXT, EIO_NO_PING, \
237 EIO_RING_FULL, EIO_INCOMPATIBLE, EIO_MISSED, \
238 EIO_CMD_STOPPED, EIO_CMD_ABORTED, EIO_STOP, \
239 EIO_STOP_LEN, EIO_STOP_SHORT, EIO_LATENCY, \
240 EIO_ISOCH ) : \
241 ( (code) < 64 ) ? \
242 EUNIQ ( EINFO_EPROTO, ( (code) & 31 ), EPROTO_LOST, \
243 EPROTO_UNDEFINED, EPROTO_STREAM_ID, \
244 EPROTO_SECONDARY, EPROTO_SPLIT ) : \
245 EFAULT )
246
247 /******************************************************************************
248 *
249 * Register access
250 *
251 ******************************************************************************
252 */
253
254 /**
255 * Initialise device
256 *
257 * @v xhci xHCI device
258 * @v regs MMIO registers
259 */
260 static void xhci_init ( struct xhci_device *xhci, void *regs ) {
261 uint32_t hcsparams1;
262 uint32_t hcsparams2;
263 uint32_t hccparams1;
264 uint32_t pagesize;
265 size_t caplength;
266 size_t rtsoff;
267 size_t dboff;
268
269 /* Locate capability, operational, runtime, and doorbell registers */
270 xhci->cap = regs;
271 caplength = readb ( xhci->cap + XHCI_CAP_CAPLENGTH );
272 rtsoff = readl ( xhci->cap + XHCI_CAP_RTSOFF );
273 dboff = readl ( xhci->cap + XHCI_CAP_DBOFF );
274 xhci->op = ( xhci->cap + caplength );
275 xhci->run = ( xhci->cap + rtsoff );
276 xhci->db = ( xhci->cap + dboff );
277 DBGC2 ( xhci, "XHCI %p cap %08lx op %08lx run %08lx db %08lx\n",
278 xhci, virt_to_phys ( xhci->cap ), virt_to_phys ( xhci->op ),
279 virt_to_phys ( xhci->run ), virt_to_phys ( xhci->db ) );
280
281 /* Read structural parameters 1 */
282 hcsparams1 = readl ( xhci->cap + XHCI_CAP_HCSPARAMS1 );
283 xhci->slots = XHCI_HCSPARAMS1_SLOTS ( hcsparams1 );
284 xhci->intrs = XHCI_HCSPARAMS1_INTRS ( hcsparams1 );
285 xhci->ports = XHCI_HCSPARAMS1_PORTS ( hcsparams1 );
286 DBGC ( xhci, "XHCI %p has %d slots %d intrs %d ports\n",
287 xhci, xhci->slots, xhci->intrs, xhci->ports );
288
289 /* Read structural parameters 2 */
290 hcsparams2 = readl ( xhci->cap + XHCI_CAP_HCSPARAMS2 );
291 xhci->scratchpads = XHCI_HCSPARAMS2_SCRATCHPADS ( hcsparams2 );
292 DBGC2 ( xhci, "XHCI %p needs %d scratchpads\n",
293 xhci, xhci->scratchpads );
294
295 /* Read capability parameters 1 */
296 hccparams1 = readl ( xhci->cap + XHCI_CAP_HCCPARAMS1 );
297 xhci->addr64 = XHCI_HCCPARAMS1_ADDR64 ( hccparams1 );
298 xhci->csz_shift = XHCI_HCCPARAMS1_CSZ_SHIFT ( hccparams1 );
299 xhci->xecp = XHCI_HCCPARAMS1_XECP ( hccparams1 );
300
301 /* Read page size */
302 pagesize = readl ( xhci->op + XHCI_OP_PAGESIZE );
303 xhci->pagesize = XHCI_PAGESIZE ( pagesize );
304 assert ( xhci->pagesize != 0 );
305 assert ( ( ( xhci->pagesize ) & ( xhci->pagesize - 1 ) ) == 0 );
306 DBGC2 ( xhci, "XHCI %p page size %zd bytes\n",
307 xhci, xhci->pagesize );
308 }
309
310 /**
311 * Find extended capability
312 *
313 * @v xhci xHCI device
314 * @v id Capability ID
315 * @v offset Offset to previous extended capability instance, or zero
316 * @ret offset Offset to extended capability, or zero if not found
317 */
318 static unsigned int xhci_extended_capability ( struct xhci_device *xhci,
319 unsigned int id,
320 unsigned int offset ) {
321 uint32_t xecp;
322 unsigned int next;
323
324 /* Locate the extended capability */
325 while ( 1 ) {
326
327 /* Locate first or next capability as applicable */
328 if ( offset ) {
329 xecp = readl ( xhci->cap + offset );
330 next = XHCI_XECP_NEXT ( xecp );
331 } else {
332 next = xhci->xecp;
333 }
334 if ( ! next )
335 return 0;
336 offset += next;
337
338 /* Check if this is the requested capability */
339 xecp = readl ( xhci->cap + offset );
340 if ( XHCI_XECP_ID ( xecp ) == id )
341 return offset;
342 }
343 }
344
345 /**
346 * Write potentially 64-bit register
347 *
348 * @v xhci xHCI device
349 * @v value Value
350 * @v reg Register address
351 * @ret rc Return status code
352 */
353 static inline __attribute__ (( always_inline )) int
354 xhci_writeq ( struct xhci_device *xhci, physaddr_t value, void *reg ) {
355
356 /* If this is a 32-bit build, then this can never fail
357 * (allowing the compiler to optimise out the error path).
358 */
359 if ( sizeof ( value ) <= sizeof ( uint32_t ) ) {
360 writel ( value, reg );
361 writel ( 0, ( reg + sizeof ( uint32_t ) ) );
362 return 0;
363 }
364
365 /* If the device does not support 64-bit addresses and this
366 * address is outside the 32-bit address space, then fail.
367 */
368 if ( ( value & ~0xffffffffULL ) && ! xhci->addr64 ) {
369 DBGC ( xhci, "XHCI %p cannot access address %lx\n",
370 xhci, value );
371 return -ENOTSUP;
372 }
373
374 /* If this is a 64-bit build, then writeq() is available */
375 writeq ( value, reg );
376 return 0;
377 }
378
379 /**
380 * Calculate buffer alignment
381 *
382 * @v len Length
383 * @ret align Buffer alignment
384 *
385 * Determine alignment required for a buffer which must be aligned to
386 * at least XHCI_MIN_ALIGN and which must not cross a page boundary.
387 */
388 static inline size_t xhci_align ( size_t len ) {
389 size_t align;
390
391 /* Align to own length (rounded up to a power of two) */
392 align = ( 1 << fls ( len - 1 ) );
393
394 /* Round up to XHCI_MIN_ALIGN if needed */
395 if ( align < XHCI_MIN_ALIGN )
396 align = XHCI_MIN_ALIGN;
397
398 return align;
399 }
400
401 /**
402 * Calculate device context offset
403 *
404 * @v xhci xHCI device
405 * @v ctx Context index
406 */
407 static inline size_t xhci_device_context_offset ( struct xhci_device *xhci,
408 unsigned int ctx ) {
409
410 return ( XHCI_DCI ( ctx ) << xhci->csz_shift );
411 }
412
413 /**
414 * Calculate input context offset
415 *
416 * @v xhci xHCI device
417 * @v ctx Context index
418 */
419 static inline size_t xhci_input_context_offset ( struct xhci_device *xhci,
420 unsigned int ctx ) {
421
422 return ( XHCI_ICI ( ctx ) << xhci->csz_shift );
423 }
424
425 /******************************************************************************
426 *
427 * Diagnostics
428 *
429 ******************************************************************************
430 */
431
432 /**
433 * Dump host controller registers
434 *
435 * @v xhci xHCI device
436 */
437 static inline void xhci_dump ( struct xhci_device *xhci ) {
438 uint32_t usbcmd;
439 uint32_t usbsts;
440 uint32_t pagesize;
441 uint32_t dnctrl;
442 uint32_t config;
443
444 /* Do nothing unless debugging is enabled */
445 if ( ! DBG_LOG )
446 return;
447
448 /* Dump USBCMD */
449 usbcmd = readl ( xhci->op + XHCI_OP_USBCMD );
450 DBGC ( xhci, "XHCI %p USBCMD %08x%s%s\n", xhci, usbcmd,
451 ( ( usbcmd & XHCI_USBCMD_RUN ) ? " run" : "" ),
452 ( ( usbcmd & XHCI_USBCMD_HCRST ) ? " hcrst" : "" ) );
453
454 /* Dump USBSTS */
455 usbsts = readl ( xhci->op + XHCI_OP_USBSTS );
456 DBGC ( xhci, "XHCI %p USBSTS %08x%s\n", xhci, usbsts,
457 ( ( usbsts & XHCI_USBSTS_HCH ) ? " hch" : "" ) );
458
459 /* Dump PAGESIZE */
460 pagesize = readl ( xhci->op + XHCI_OP_PAGESIZE );
461 DBGC ( xhci, "XHCI %p PAGESIZE %08x\n", xhci, pagesize );
462
463 /* Dump DNCTRL */
464 dnctrl = readl ( xhci->op + XHCI_OP_DNCTRL );
465 DBGC ( xhci, "XHCI %p DNCTRL %08x\n", xhci, dnctrl );
466
467 /* Dump CONFIG */
468 config = readl ( xhci->op + XHCI_OP_CONFIG );
469 DBGC ( xhci, "XHCI %p CONFIG %08x\n", xhci, config );
470 }
471
472 /**
473 * Dump port registers
474 *
475 * @v xhci xHCI device
476 * @v port Port number
477 */
478 static inline void xhci_dump_port ( struct xhci_device *xhci,
479 unsigned int port ) {
480 uint32_t portsc;
481 uint32_t portpmsc;
482 uint32_t portli;
483 uint32_t porthlpmc;
484
485 /* Do nothing unless debugging is enabled */
486 if ( ! DBG_LOG )
487 return;
488
489 /* Dump PORTSC */
490 portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port ) );
491 DBGC ( xhci, "XHCI %p port %d PORTSC %08x%s%s%s%s psiv=%d\n",
492 xhci, port, portsc,
493 ( ( portsc & XHCI_PORTSC_CCS ) ? " ccs" : "" ),
494 ( ( portsc & XHCI_PORTSC_PED ) ? " ped" : "" ),
495 ( ( portsc & XHCI_PORTSC_PR ) ? " pr" : "" ),
496 ( ( portsc & XHCI_PORTSC_PP ) ? " pp" : "" ),
497 XHCI_PORTSC_PSIV ( portsc ) );
498
499 /* Dump PORTPMSC */
500 portpmsc = readl ( xhci->op + XHCI_OP_PORTPMSC ( port ) );
501 DBGC ( xhci, "XHCI %p port %d PORTPMSC %08x\n", xhci, port, portpmsc );
502
503 /* Dump PORTLI */
504 portli = readl ( xhci->op + XHCI_OP_PORTLI ( port ) );
505 DBGC ( xhci, "XHCI %p port %d PORTLI %08x\n", xhci, port, portli );
506
507 /* Dump PORTHLPMC */
508 porthlpmc = readl ( xhci->op + XHCI_OP_PORTHLPMC ( port ) );
509 DBGC ( xhci, "XHCI %p port %d PORTHLPMC %08x\n",
510 xhci, port, porthlpmc );
511 }
512
513 /******************************************************************************
514 *
515 * USB legacy support
516 *
517 ******************************************************************************
518 */
519
520 /**
521 * Initialise USB legacy support
522 *
523 * @v xhci xHCI device
524 */
525 static void xhci_legacy_init ( struct xhci_device *xhci ) {
526 unsigned int legacy;
527 uint8_t bios;
528
529 /* Locate USB legacy support capability (if present) */
530 legacy = xhci_extended_capability ( xhci, XHCI_XECP_ID_LEGACY, 0 );
531 if ( ! legacy ) {
532 /* Not an error; capability may not be present */
533 DBGC ( xhci, "XHCI %p has no USB legacy support capability\n",
534 xhci );
535 return;
536 }
537
538 /* Check if legacy USB support is enabled */
539 bios = readb ( xhci->cap + legacy + XHCI_USBLEGSUP_BIOS );
540 if ( ! ( bios & XHCI_USBLEGSUP_BIOS_OWNED ) ) {
541 /* Not an error; already owned by OS */
542 DBGC ( xhci, "XHCI %p USB legacy support already disabled\n",
543 xhci );
544 return;
545 }
546
547 /* Record presence of USB legacy support capability */
548 xhci->legacy = legacy;
549 }
550
551 /**
552 * Claim ownership from BIOS
553 *
554 * @v xhci xHCI device
555 * @ret rc Return status code
556 */
557 static int xhci_legacy_claim ( struct xhci_device *xhci ) {
558 uint32_t ctlsts;
559 uint8_t bios;
560 unsigned int i;
561
562 /* Do nothing unless legacy support capability is present */
563 if ( ! xhci->legacy )
564 return 0;
565
566 /* Claim ownership */
567 writeb ( XHCI_USBLEGSUP_OS_OWNED,
568 xhci->cap + xhci->legacy + XHCI_USBLEGSUP_OS );
569
570 /* Wait for BIOS to release ownership */
571 for ( i = 0 ; i < XHCI_USBLEGSUP_MAX_WAIT_MS ; i++ ) {
572
573 /* Check if BIOS has released ownership */
574 bios = readb ( xhci->cap + xhci->legacy + XHCI_USBLEGSUP_BIOS );
575 if ( ! ( bios & XHCI_USBLEGSUP_BIOS_OWNED ) ) {
576 DBGC ( xhci, "XHCI %p claimed ownership from BIOS\n",
577 xhci );
578 ctlsts = readl ( xhci->cap + xhci->legacy +
579 XHCI_USBLEGSUP_CTLSTS );
580 if ( ctlsts ) {
581 DBGC ( xhci, "XHCI %p warning: BIOS retained "
582 "SMIs: %08x\n", xhci, ctlsts );
583 }
584 return 0;
585 }
586
587 /* Delay */
588 mdelay ( 1 );
589 }
590
591 DBGC ( xhci, "XHCI %p timed out waiting for BIOS to release "
592 "ownership\n", xhci );
593 return -ETIMEDOUT;
594 }
595
596 /**
597 * Release ownership back to BIOS
598 *
599 * @v xhci xHCI device
600 */
601 static void xhci_legacy_release ( struct xhci_device *xhci ) {
602
603 /* Do nothing unless legacy support capability is present */
604 if ( ! xhci->legacy )
605 return;
606
607 /* Release ownership */
608 writeb ( 0, xhci->cap + xhci->legacy + XHCI_USBLEGSUP_OS );
609 DBGC ( xhci, "XHCI %p released ownership to BIOS\n", xhci );
610 }
611
612 /******************************************************************************
613 *
614 * Supported protocols
615 *
616 ******************************************************************************
617 */
618
619 /**
620 * Transcribe port speed (for debugging)
621 *
622 * @v psi Protocol speed ID
623 * @ret speed Transcribed speed
624 */
625 static inline const char * xhci_speed_name ( uint32_t psi ) {
626 static const char *exponents[4] = { "", "k", "M", "G" };
627 static char buf[ 10 /* "xxxxxXbps" + NUL */ ];
628 unsigned int mantissa;
629 unsigned int exponent;
630
631 /* Extract mantissa and exponent */
632 mantissa = XHCI_SUPPORTED_PSI_MANTISSA ( psi );
633 exponent = XHCI_SUPPORTED_PSI_EXPONENT ( psi );
634
635 /* Transcribe speed */
636 snprintf ( buf, sizeof ( buf ), "%d%sbps",
637 mantissa, exponents[exponent] );
638 return buf;
639 }
640
641 /**
642 * Find supported protocol extended capability for a port
643 *
644 * @v xhci xHCI device
645 * @v port Port number
646 * @ret supported Offset to extended capability, or zero if not found
647 */
648 static unsigned int xhci_supported_protocol ( struct xhci_device *xhci,
649 unsigned int port ) {
650 unsigned int supported = 0;
651 unsigned int offset;
652 unsigned int count;
653 uint32_t ports;
654
655 /* Iterate over all supported protocol structures */
656 while ( ( supported = xhci_extended_capability ( xhci,
657 XHCI_XECP_ID_SUPPORTED,
658 supported ) ) ) {
659
660 /* Determine port range */
661 ports = readl ( xhci->cap + supported + XHCI_SUPPORTED_PORTS );
662 offset = XHCI_SUPPORTED_PORTS_OFFSET ( ports );
663 count = XHCI_SUPPORTED_PORTS_COUNT ( ports );
664
665 /* Check if port lies within this range */
666 if ( ( port - offset ) < count )
667 return supported;
668 }
669
670 DBGC ( xhci, "XHCI %p port %d has no supported protocol\n",
671 xhci, port );
672 return 0;
673 }
674
675 /**
676 * Find port protocol
677 *
678 * @v xhci xHCI device
679 * @v port Port number
680 * @ret protocol USB protocol, or zero if not found
681 */
682 static unsigned int xhci_port_protocol ( struct xhci_device *xhci,
683 unsigned int port ) {
684 unsigned int supported = xhci_supported_protocol ( xhci, port );
685 union {
686 uint32_t raw;
687 char text[5];
688 } name;
689 unsigned int protocol;
690 unsigned int type;
691 unsigned int psic;
692 unsigned int psiv;
693 unsigned int i;
694 uint32_t revision;
695 uint32_t ports;
696 uint32_t slot;
697 uint32_t psi;
698
699 /* Fail if there is no supported protocol */
700 if ( ! supported )
701 return 0;
702
703 /* Determine protocol version */
704 revision = readl ( xhci->cap + supported + XHCI_SUPPORTED_REVISION );
705 protocol = XHCI_SUPPORTED_REVISION_VER ( revision );
706
707 /* Describe port protocol */
708 if ( DBG_EXTRA ) {
709 name.raw = cpu_to_le32 ( readl ( xhci->cap + supported +
710 XHCI_SUPPORTED_NAME ) );
711 name.text[4] = '\0';
712 slot = readl ( xhci->cap + supported + XHCI_SUPPORTED_SLOT );
713 type = XHCI_SUPPORTED_SLOT_TYPE ( slot );
714 DBGC2 ( xhci, "XHCI %p port %d %sv%04x type %d",
715 xhci, port, name.text, protocol, type );
716 ports = readl ( xhci->cap + supported + XHCI_SUPPORTED_PORTS );
717 psic = XHCI_SUPPORTED_PORTS_PSIC ( ports );
718 if ( psic ) {
719 DBGC2 ( xhci, " speeds" );
720 for ( i = 0 ; i < psic ; i++ ) {
721 psi = readl ( xhci->cap + supported +
722 XHCI_SUPPORTED_PSI ( i ) );
723 psiv = XHCI_SUPPORTED_PSI_VALUE ( psi );
724 DBGC2 ( xhci, " %d:%s", psiv,
725 xhci_speed_name ( psi ) );
726 }
727 }
728 DBGC2 ( xhci, "\n" );
729 }
730
731 return protocol;
732 }
733
734 /**
735 * Find port slot type
736 *
737 * @v xhci xHCI device
738 * @v port Port number
739 * @ret type Slot type, or negative error
740 */
741 static int xhci_port_slot_type ( struct xhci_device *xhci, unsigned int port ) {
742 unsigned int supported = xhci_supported_protocol ( xhci, port );
743 unsigned int type;
744 uint32_t slot;
745
746 /* Fail if there is no supported protocol */
747 if ( ! supported )
748 return -ENOTSUP;
749
750 /* Get slot type */
751 slot = readl ( xhci->cap + supported + XHCI_SUPPORTED_SLOT );
752 type = XHCI_SUPPORTED_SLOT_TYPE ( slot );
753
754 return type;
755 }
756
757 /**
758 * Find port speed
759 *
760 * @v xhci xHCI device
761 * @v port Port number
762 * @v psiv Protocol speed ID value
763 * @ret speed Port speed, or negative error
764 */
765 static int xhci_port_speed ( struct xhci_device *xhci, unsigned int port,
766 unsigned int psiv ) {
767 unsigned int supported = xhci_supported_protocol ( xhci, port );
768 unsigned int psic;
769 unsigned int mantissa;
770 unsigned int exponent;
771 unsigned int speed;
772 unsigned int i;
773 uint32_t ports;
774 uint32_t psi;
775
776 /* Fail if there is no supported protocol */
777 if ( ! supported )
778 return -ENOTSUP;
779
780 /* Get protocol speed ID count */
781 ports = readl ( xhci->cap + supported + XHCI_SUPPORTED_PORTS );
782 psic = XHCI_SUPPORTED_PORTS_PSIC ( ports );
783
784 /* Use the default mappings if applicable */
785 if ( ! psic ) {
786 switch ( psiv ) {
787 case XHCI_SPEED_LOW : return USB_SPEED_LOW;
788 case XHCI_SPEED_FULL : return USB_SPEED_FULL;
789 case XHCI_SPEED_HIGH : return USB_SPEED_HIGH;
790 case XHCI_SPEED_SUPER : return USB_SPEED_SUPER;
791 default:
792 DBGC ( xhci, "XHCI %p port %d non-standard PSI value "
793 "%d\n", xhci, port, psiv );
794 return -ENOTSUP;
795 }
796 }
797
798 /* Iterate over PSI dwords looking for a match */
799 for ( i = 0 ; i < psic ; i++ ) {
800 psi = readl ( xhci->cap + supported + XHCI_SUPPORTED_PSI ( i ));
801 if ( psiv == XHCI_SUPPORTED_PSI_VALUE ( psi ) ) {
802 mantissa = XHCI_SUPPORTED_PSI_MANTISSA ( psi );
803 exponent = XHCI_SUPPORTED_PSI_EXPONENT ( psi );
804 speed = USB_SPEED ( mantissa, exponent );
805 return speed;
806 }
807 }
808
809 DBGC ( xhci, "XHCI %p port %d spurious PSI value %d\n",
810 xhci, port, psiv );
811 return -ENOENT;
812 }
813
814 /**
815 * Find protocol speed ID value
816 *
817 * @v xhci xHCI device
818 * @v port Port number
819 * @v speed USB speed
820 * @ret psiv Protocol speed ID value, or negative error
821 */
822 static int xhci_port_psiv ( struct xhci_device *xhci, unsigned int port,
823 unsigned int speed ) {
824 unsigned int supported = xhci_supported_protocol ( xhci, port );
825 unsigned int psic;
826 unsigned int mantissa;
827 unsigned int exponent;
828 unsigned int psiv;
829 unsigned int i;
830 uint32_t ports;
831 uint32_t psi;
832
833 /* Fail if there is no supported protocol */
834 if ( ! supported )
835 return -ENOTSUP;
836
837 /* Get protocol speed ID count */
838 ports = readl ( xhci->cap + supported + XHCI_SUPPORTED_PORTS );
839 psic = XHCI_SUPPORTED_PORTS_PSIC ( ports );
840
841 /* Use the default mappings if applicable */
842 if ( ! psic ) {
843 switch ( speed ) {
844 case USB_SPEED_LOW : return XHCI_SPEED_LOW;
845 case USB_SPEED_FULL : return XHCI_SPEED_FULL;
846 case USB_SPEED_HIGH : return XHCI_SPEED_HIGH;
847 case USB_SPEED_SUPER : return XHCI_SPEED_SUPER;
848 default:
849 DBGC ( xhci, "XHCI %p port %d non-standad speed %d\n",
850 xhci, port, speed );
851 return -ENOTSUP;
852 }
853 }
854
855 /* Iterate over PSI dwords looking for a match */
856 for ( i = 0 ; i < psic ; i++ ) {
857 psi = readl ( xhci->cap + supported + XHCI_SUPPORTED_PSI ( i ));
858 mantissa = XHCI_SUPPORTED_PSI_MANTISSA ( psi );
859 exponent = XHCI_SUPPORTED_PSI_EXPONENT ( psi );
860 if ( speed == USB_SPEED ( mantissa, exponent ) ) {
861 psiv = XHCI_SUPPORTED_PSI_VALUE ( psi );
862 return psiv;
863 }
864 }
865
866 DBGC ( xhci, "XHCI %p port %d unrepresentable speed %#x\n",
867 xhci, port, speed );
868 return -ENOENT;
869 }
870
871 /******************************************************************************
872 *
873 * Device context base address array
874 *
875 ******************************************************************************
876 */
877
878 /**
879 * Allocate device context base address array
880 *
881 * @v xhci xHCI device
882 * @ret rc Return status code
883 */
884 static int xhci_dcbaa_alloc ( struct xhci_device *xhci ) {
885 size_t len;
886 physaddr_t dcbaap;
887 int rc;
888
889 /* Allocate and initialise structure. Must be at least
890 * 64-byte aligned and must not cross a page boundary, so
891 * align on its own size (rounded up to a power of two and
892 * with a minimum of 64 bytes).
893 */
894 len = ( ( xhci->slots + 1 ) * sizeof ( xhci->dcbaa[0] ) );
895 xhci->dcbaa = malloc_dma ( len, xhci_align ( len ) );
896 if ( ! xhci->dcbaa ) {
897 DBGC ( xhci, "XHCI %p could not allocate DCBAA\n", xhci );
898 rc = -ENOMEM;
899 goto err_alloc;
900 }
901 memset ( xhci->dcbaa, 0, len );
902
903 /* Program DCBAA pointer */
904 dcbaap = virt_to_phys ( xhci->dcbaa );
905 if ( ( rc = xhci_writeq ( xhci, dcbaap,
906 xhci->op + XHCI_OP_DCBAAP ) ) != 0 )
907 goto err_writeq;
908
909 DBGC2 ( xhci, "XHCI %p DCBAA at [%08lx,%08lx)\n",
910 xhci, dcbaap, ( dcbaap + len ) );
911 return 0;
912
913 err_writeq:
914 free_dma ( xhci->dcbaa, len );
915 err_alloc:
916 return rc;
917 }
918
919 /**
920 * Free device context base address array
921 *
922 * @v xhci xHCI device
923 */
924 static void xhci_dcbaa_free ( struct xhci_device *xhci ) {
925 size_t len;
926 unsigned int i;
927
928 /* Sanity check */
929 for ( i = 0 ; i <= xhci->slots ; i++ )
930 assert ( xhci->dcbaa[i] == 0 );
931
932 /* Clear DCBAA pointer */
933 xhci_writeq ( xhci, 0, xhci->op + XHCI_OP_DCBAAP );
934
935 /* Free DCBAA */
936 len = ( ( xhci->slots + 1 ) * sizeof ( xhci->dcbaa[0] ) );
937 free_dma ( xhci->dcbaa, len );
938 }
939
940 /******************************************************************************
941 *
942 * Scratchpad buffers
943 *
944 ******************************************************************************
945 */
946
947 /**
948 * Allocate scratchpad buffers
949 *
950 * @v xhci xHCI device
951 * @ret rc Return status code
952 */
953 static int xhci_scratchpad_alloc ( struct xhci_device *xhci ) {
954 size_t array_len;
955 size_t len;
956 physaddr_t phys;
957 unsigned int i;
958 int rc;
959
960 /* Do nothing if no scratchpad buffers are used */
961 if ( ! xhci->scratchpads )
962 return 0;
963
964 /* Allocate scratchpads */
965 len = ( xhci->scratchpads * xhci->pagesize );
966 xhci->scratchpad = umalloc ( len );
967 if ( ! xhci->scratchpad ) {
968 DBGC ( xhci, "XHCI %p could not allocate scratchpad buffers\n",
969 xhci );
970 rc = -ENOMEM;
971 goto err_alloc;
972 }
973 memset_user ( xhci->scratchpad, 0, 0, len );
974
975 /* Allocate scratchpad array */
976 array_len = ( xhci->scratchpads * sizeof ( xhci->scratchpad_array[0] ));
977 xhci->scratchpad_array =
978 malloc_dma ( array_len, xhci_align ( array_len ) );
979 if ( ! xhci->scratchpad_array ) {
980 DBGC ( xhci, "XHCI %p could not allocate scratchpad buffer "
981 "array\n", xhci );
982 rc = -ENOMEM;
983 goto err_alloc_array;
984 }
985
986 /* Populate scratchpad array */
987 for ( i = 0 ; i < xhci->scratchpads ; i++ ) {
988 phys = user_to_phys ( xhci->scratchpad, ( i * xhci->pagesize ));
989 xhci->scratchpad_array[i] = phys;
990 }
991
992 /* Set scratchpad array pointer */
993 assert ( xhci->dcbaa != NULL );
994 xhci->dcbaa[0] = cpu_to_le64 ( virt_to_phys ( xhci->scratchpad_array ));
995
996 DBGC2 ( xhci, "XHCI %p scratchpad [%08lx,%08lx) array [%08lx,%08lx)\n",
997 xhci, user_to_phys ( xhci->scratchpad, 0 ),
998 user_to_phys ( xhci->scratchpad, len ),
999 virt_to_phys ( xhci->scratchpad_array ),
1000 ( virt_to_phys ( xhci->scratchpad_array ) + array_len ) );
1001 return 0;
1002
1003 free_dma ( xhci->scratchpad_array, array_len );
1004 err_alloc_array:
1005 ufree ( xhci->scratchpad );
1006 err_alloc:
1007 return rc;
1008 }
1009
1010 /**
1011 * Free scratchpad buffers
1012 *
1013 * @v xhci xHCI device
1014 */
1015 static void xhci_scratchpad_free ( struct xhci_device *xhci ) {
1016 size_t array_len;
1017
1018 /* Do nothing if no scratchpad buffers are used */
1019 if ( ! xhci->scratchpads )
1020 return;
1021
1022 /* Clear scratchpad array pointer */
1023 assert ( xhci->dcbaa != NULL );
1024 xhci->dcbaa[0] = 0;
1025
1026 /* Free scratchpad array */
1027 array_len = ( xhci->scratchpads * sizeof ( xhci->scratchpad_array[0] ));
1028 free_dma ( xhci->scratchpad_array, array_len );
1029
1030 /* Free scratchpads */
1031 ufree ( xhci->scratchpad );
1032 }
1033
1034 /******************************************************************************
1035 *
1036 * Run / stop / reset
1037 *
1038 ******************************************************************************
1039 */
1040
1041 /**
1042 * Start xHCI device
1043 *
1044 * @v xhci xHCI device
1045 */
1046 static void xhci_run ( struct xhci_device *xhci ) {
1047 uint32_t config;
1048 uint32_t usbcmd;
1049
1050 /* Configure number of device slots */
1051 config = readl ( xhci->op + XHCI_OP_CONFIG );
1052 config &= ~XHCI_CONFIG_MAX_SLOTS_EN_MASK;
1053 config |= XHCI_CONFIG_MAX_SLOTS_EN ( xhci->slots );
1054 writel ( config, xhci->op + XHCI_OP_CONFIG );
1055
1056 /* Set run/stop bit */
1057 usbcmd = readl ( xhci->op + XHCI_OP_USBCMD );
1058 usbcmd |= XHCI_USBCMD_RUN;
1059 writel ( usbcmd, xhci->op + XHCI_OP_USBCMD );
1060 }
1061
1062 /**
1063 * Stop xHCI device
1064 *
1065 * @v xhci xHCI device
1066 * @ret rc Return status code
1067 */
1068 static int xhci_stop ( struct xhci_device *xhci ) {
1069 uint32_t usbcmd;
1070 uint32_t usbsts;
1071 unsigned int i;
1072
1073 /* Clear run/stop bit */
1074 usbcmd = readl ( xhci->op + XHCI_OP_USBCMD );
1075 usbcmd &= ~XHCI_USBCMD_RUN;
1076 writel ( usbcmd, xhci->op + XHCI_OP_USBCMD );
1077
1078 /* Wait for device to stop */
1079 for ( i = 0 ; i < XHCI_STOP_MAX_WAIT_MS ; i++ ) {
1080
1081 /* Check if device is stopped */
1082 usbsts = readl ( xhci->op + XHCI_OP_USBSTS );
1083 if ( usbsts & XHCI_USBSTS_HCH )
1084 return 0;
1085
1086 /* Delay */
1087 mdelay ( 1 );
1088 }
1089
1090 DBGC ( xhci, "XHCI %p timed out waiting for stop\n", xhci );
1091 return -ETIMEDOUT;
1092 }
1093
1094 /**
1095 * Reset xHCI device
1096 *
1097 * @v xhci xHCI device
1098 * @ret rc Return status code
1099 */
1100 static int xhci_reset ( struct xhci_device *xhci ) {
1101 uint32_t usbcmd;
1102 unsigned int i;
1103 int rc;
1104
1105 /* The xHCI specification states that resetting a running
1106 * device may result in undefined behaviour, so try stopping
1107 * it first.
1108 */
1109 if ( ( rc = xhci_stop ( xhci ) ) != 0 ) {
1110 /* Ignore errors and attempt to reset the device anyway */
1111 }
1112
1113 /* Reset device */
1114 writel ( XHCI_USBCMD_HCRST, xhci->op + XHCI_OP_USBCMD );
1115
1116 /* Wait for reset to complete */
1117 for ( i = 0 ; i < XHCI_RESET_MAX_WAIT_MS ; i++ ) {
1118
1119 /* Check if reset is complete */
1120 usbcmd = readl ( xhci->op + XHCI_OP_USBCMD );
1121 if ( ! ( usbcmd & XHCI_USBCMD_HCRST ) )
1122 return 0;
1123
1124 /* Delay */
1125 mdelay ( 1 );
1126 }
1127
1128 DBGC ( xhci, "XHCI %p timed out waiting for reset\n", xhci );
1129 return -ETIMEDOUT;
1130 }
1131
1132 /******************************************************************************
1133 *
1134 * Transfer request blocks
1135 *
1136 ******************************************************************************
1137 */
1138
1139 /**
1140 * Allocate transfer request block ring
1141 *
1142 * @v xhci xHCI device
1143 * @v ring TRB ring
1144 * @v shift Ring size (log2)
1145 * @v slot Device slot
1146 * @v target Doorbell target
1147 * @v stream Doorbell stream ID
1148 * @ret rc Return status code
1149 */
1150 static int xhci_ring_alloc ( struct xhci_device *xhci,
1151 struct xhci_trb_ring *ring,
1152 unsigned int shift, unsigned int slot,
1153 unsigned int target, unsigned int stream ) {
1154 struct xhci_trb_link *link;
1155 unsigned int count;
1156 int rc;
1157
1158 /* Sanity check */
1159 assert ( shift > 0 );
1160
1161 /* Initialise structure */
1162 memset ( ring, 0, sizeof ( *ring ) );
1163 ring->shift = shift;
1164 count = ( 1U << shift );
1165 ring->mask = ( count - 1 );
1166 ring->len = ( ( count + 1 /* Link TRB */ ) * sizeof ( ring->trb[0] ) );
1167 ring->db = ( xhci->db + ( slot * sizeof ( ring->dbval ) ) );
1168 ring->dbval = XHCI_DBVAL ( target, stream );
1169
1170 /* Allocate I/O buffers */
1171 ring->iobuf = zalloc ( count * sizeof ( ring->iobuf[0] ) );
1172 if ( ! ring->iobuf ) {
1173 rc = -ENOMEM;
1174 goto err_alloc_iobuf;
1175 }
1176
1177 /* Allocate TRBs */
1178 ring->trb = malloc_dma ( ring->len, xhci_align ( ring->len ) );
1179 if ( ! ring->trb ) {
1180 rc = -ENOMEM;
1181 goto err_alloc_trb;
1182 }
1183 memset ( ring->trb, 0, ring->len );
1184
1185 /* Initialise Link TRB */
1186 link = &ring->trb[count].link;
1187 link->next = cpu_to_le64 ( virt_to_phys ( ring->trb ) );
1188 link->flags = XHCI_TRB_TC;
1189 link->type = XHCI_TRB_LINK;
1190 ring->link = link;
1191
1192 return 0;
1193
1194 free_dma ( ring->trb, ring->len );
1195 err_alloc_trb:
1196 free ( ring->iobuf );
1197 err_alloc_iobuf:
1198 return rc;
1199 }
1200
1201 /**
1202 * Free transfer request block ring
1203 *
1204 * @v ring TRB ring
1205 */
1206 static void xhci_ring_free ( struct xhci_trb_ring *ring ) {
1207 unsigned int count = ( 1U << ring->shift );
1208 unsigned int i;
1209
1210 /* Sanity checks */
1211 assert ( ring->cons == ring->prod );
1212 for ( i = 0 ; i < count ; i++ )
1213 assert ( ring->iobuf[i] == NULL );
1214
1215 /* Free TRBs */
1216 free_dma ( ring->trb, ring->len );
1217
1218 /* Free I/O buffers */
1219 free ( ring->iobuf );
1220 }
1221
1222 /**
1223 * Enqueue a transfer request block
1224 *
1225 * @v ring TRB ring
1226 * @v iobuf I/O buffer (if any)
1227 * @v trb Transfer request block (with empty Cycle flag)
1228 * @ret rc Return status code
1229 *
1230 * This operation does not implicitly ring the doorbell register.
1231 */
1232 static int xhci_enqueue ( struct xhci_trb_ring *ring, struct io_buffer *iobuf,
1233 const union xhci_trb *trb ) {
1234 union xhci_trb *dest;
1235 unsigned int prod;
1236 unsigned int mask;
1237 unsigned int index;
1238 unsigned int cycle;
1239
1240 /* Sanity check */
1241 assert ( ! ( trb->common.flags & XHCI_TRB_C ) );
1242
1243 /* Fail if ring is full */
1244 if ( ! xhci_ring_remaining ( ring ) )
1245 return -ENOBUFS;
1246
1247 /* Update producer counter (and link TRB, if applicable) */
1248 prod = ring->prod++;
1249 mask = ring->mask;
1250 cycle = ( ( ~( prod >> ring->shift ) ) & XHCI_TRB_C );
1251 index = ( prod & mask );
1252 if ( index == 0 )
1253 ring->link->flags = ( XHCI_TRB_TC | ( cycle ^ XHCI_TRB_C ) );
1254
1255 /* Record I/O buffer */
1256 ring->iobuf[index] = iobuf;
1257
1258 /* Enqueue TRB */
1259 dest = &ring->trb[index];
1260 dest->template.parameter = trb->template.parameter;
1261 dest->template.status = trb->template.status;
1262 wmb();
1263 dest->template.control = ( trb->template.control |
1264 cpu_to_le32 ( cycle ) );
1265
1266 return 0;
1267 }
1268
1269 /**
1270 * Dequeue a transfer request block
1271 *
1272 * @v ring TRB ring
1273 * @ret iobuf I/O buffer
1274 */
1275 static struct io_buffer * xhci_dequeue ( struct xhci_trb_ring *ring ) {
1276 struct io_buffer *iobuf;
1277 unsigned int cons;
1278 unsigned int mask;
1279 unsigned int index;
1280
1281 /* Sanity check */
1282 assert ( xhci_ring_fill ( ring ) != 0 );
1283
1284 /* Update consumer counter */
1285 cons = ring->cons++;
1286 mask = ring->mask;
1287 index = ( cons & mask );
1288
1289 /* Retrieve I/O buffer */
1290 iobuf = ring->iobuf[index];
1291 ring->iobuf[index] = NULL;
1292
1293 return iobuf;
1294 }
1295
1296 /**
1297 * Enqueue multiple transfer request blocks
1298 *
1299 * @v ring TRB ring
1300 * @v iobuf I/O buffer
1301 * @v trbs Transfer request blocks (with empty Cycle flag)
1302 * @v count Number of transfer request blocks
1303 * @ret rc Return status code
1304 *
1305 * This operation does not implicitly ring the doorbell register.
1306 */
1307 static int xhci_enqueue_multi ( struct xhci_trb_ring *ring,
1308 struct io_buffer *iobuf,
1309 const union xhci_trb *trbs,
1310 unsigned int count ) {
1311 const union xhci_trb *trb = trbs;
1312 int rc;
1313
1314 /* Sanity check */
1315 assert ( iobuf != NULL );
1316
1317 /* Fail if ring does not have sufficient space */
1318 if ( xhci_ring_remaining ( ring ) < count )
1319 return -ENOBUFS;
1320
1321 /* Enqueue each TRB, recording the I/O buffer with the final TRB */
1322 while ( count-- ) {
1323 rc = xhci_enqueue ( ring, ( count ? NULL : iobuf ), trb++ );
1324 assert ( rc == 0 ); /* Should never be able to fail */
1325 }
1326
1327 return 0;
1328 }
1329
1330 /**
1331 * Dequeue multiple transfer request blocks
1332 *
1333 * @v ring TRB ring
1334 * @ret iobuf I/O buffer
1335 */
1336 static struct io_buffer * xhci_dequeue_multi ( struct xhci_trb_ring *ring ) {
1337 struct io_buffer *iobuf;
1338
1339 /* Dequeue TRBs until we reach the final TRB for an I/O buffer */
1340 do {
1341 iobuf = xhci_dequeue ( ring );
1342 } while ( iobuf == NULL );
1343
1344 return iobuf;
1345 }
1346
1347 /**
1348 * Ring doorbell register
1349 *
1350 * @v ring TRB ring
1351 */
1352 static inline __attribute__ (( always_inline )) void
1353 xhci_doorbell ( struct xhci_trb_ring *ring ) {
1354
1355 wmb();
1356 writel ( ring->dbval, ring->db );
1357 }
1358
1359 /******************************************************************************
1360 *
1361 * Command and event rings
1362 *
1363 ******************************************************************************
1364 */
1365
1366 /**
1367 * Allocate command ring
1368 *
1369 * @v xhci xHCI device
1370 * @ret rc Return status code
1371 */
1372 static int xhci_command_alloc ( struct xhci_device *xhci ) {
1373 physaddr_t crp;
1374 int rc;
1375
1376 /* Allocate TRB ring */
1377 if ( ( rc = xhci_ring_alloc ( xhci, &xhci->command, XHCI_CMD_TRBS_LOG2,
1378 0, 0, 0 ) ) != 0 )
1379 goto err_ring_alloc;
1380
1381 /* Program command ring control register */
1382 crp = virt_to_phys ( xhci->command.trb );
1383 if ( ( rc = xhci_writeq ( xhci, ( crp | XHCI_CRCR_RCS ),
1384 xhci->op + XHCI_OP_CRCR ) ) != 0 )
1385 goto err_writeq;
1386
1387 DBGC2 ( xhci, "XHCI %p CRCR at [%08lx,%08lx)\n",
1388 xhci, crp, ( crp + xhci->command.len ) );
1389 return 0;
1390
1391 err_writeq:
1392 xhci_ring_free ( &xhci->command );
1393 err_ring_alloc:
1394 return rc;
1395 }
1396
1397 /**
1398 * Free command ring
1399 *
1400 * @v xhci xHCI device
1401 */
1402 static void xhci_command_free ( struct xhci_device *xhci ) {
1403
1404 /* Sanity check */
1405 assert ( ( readl ( xhci->op + XHCI_OP_CRCR ) & XHCI_CRCR_CRR ) == 0 );
1406
1407 /* Clear command ring control register */
1408 xhci_writeq ( xhci, 0, xhci->op + XHCI_OP_CRCR );
1409
1410 /* Free TRB ring */
1411 xhci_ring_free ( &xhci->command );
1412 }
1413
1414 /**
1415 * Allocate event ring
1416 *
1417 * @v xhci xHCI device
1418 * @ret rc Return status code
1419 */
1420 static int xhci_event_alloc ( struct xhci_device *xhci ) {
1421 struct xhci_event_ring *event = &xhci->event;
1422 unsigned int count;
1423 size_t len;
1424 int rc;
1425
1426 /* Allocate event ring */
1427 count = ( 1 << XHCI_EVENT_TRBS_LOG2 );
1428 len = ( count * sizeof ( event->trb[0] ) );
1429 event->trb = malloc_dma ( len, xhci_align ( len ) );
1430 if ( ! event->trb ) {
1431 rc = -ENOMEM;
1432 goto err_alloc_trb;
1433 }
1434 memset ( event->trb, 0, len );
1435
1436 /* Allocate event ring segment table */
1437 event->segment = malloc_dma ( sizeof ( event->segment[0] ),
1438 xhci_align ( sizeof (event->segment[0])));
1439 if ( ! event->segment ) {
1440 rc = -ENOMEM;
1441 goto err_alloc_segment;
1442 }
1443 memset ( event->segment, 0, sizeof ( event->segment[0] ) );
1444 event->segment[0].base = cpu_to_le64 ( virt_to_phys ( event->trb ) );
1445 event->segment[0].count = cpu_to_le32 ( count );
1446
1447 /* Program event ring registers */
1448 writel ( 1, xhci->run + XHCI_RUN_ERSTSZ ( 0 ) );
1449 if ( ( rc = xhci_writeq ( xhci, virt_to_phys ( event->trb ),
1450 xhci->run + XHCI_RUN_ERDP ( 0 ) ) ) != 0 )
1451 goto err_writeq_erdp;
1452 if ( ( rc = xhci_writeq ( xhci, virt_to_phys ( event->segment ),
1453 xhci->run + XHCI_RUN_ERSTBA ( 0 ) ) ) != 0 )
1454 goto err_writeq_erstba;
1455
1456 DBGC2 ( xhci, "XHCI %p event ring [%08lx,%08lx) table [%08lx,%08lx)\n",
1457 xhci, virt_to_phys ( event->trb ),
1458 ( virt_to_phys ( event->trb ) + len ),
1459 virt_to_phys ( event->segment ),
1460 ( virt_to_phys ( event->segment ) +
1461 sizeof (event->segment[0] ) ) );
1462 return 0;
1463
1464 xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERSTBA ( 0 ) );
1465 err_writeq_erstba:
1466 xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERDP ( 0 ) );
1467 err_writeq_erdp:
1468 free_dma ( event->trb, len );
1469 err_alloc_segment:
1470 free_dma ( event->segment, sizeof ( event->segment[0] ) );
1471 err_alloc_trb:
1472 return rc;
1473 }
1474
1475 /**
1476 * Free event ring
1477 *
1478 * @v xhci xHCI device
1479 */
1480 static void xhci_event_free ( struct xhci_device *xhci ) {
1481 struct xhci_event_ring *event = &xhci->event;
1482 unsigned int count;
1483 size_t len;
1484
1485 /* Clear event ring registers */
1486 writel ( 0, xhci->run + XHCI_RUN_ERSTSZ ( 0 ) );
1487 xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERSTBA ( 0 ) );
1488 xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERDP ( 0 ) );
1489
1490 /* Free event ring segment table */
1491 free_dma ( event->segment, sizeof ( event->segment[0] ) );
1492
1493 /* Free event ring */
1494 count = ( 1 << XHCI_EVENT_TRBS_LOG2 );
1495 len = ( count * sizeof ( event->trb[0] ) );
1496 free_dma ( event->trb, len );
1497 }
1498
1499 /**
1500 * Handle transfer event
1501 *
1502 * @v xhci xHCI device
1503 * @v transfer Transfer event TRB
1504 */
1505 static void xhci_transfer ( struct xhci_device *xhci,
1506 struct xhci_trb_transfer *transfer ) {
1507 struct xhci_slot *slot;
1508 struct xhci_endpoint *endpoint;
1509 struct io_buffer *iobuf;
1510 int rc;
1511
1512 /* Profile transfer events */
1513 profile_start ( &xhci_transfer_profiler );
1514
1515 /* Identify slot */
1516 if ( ( transfer->slot > xhci->slots ) ||
1517 ( ( slot = xhci->slot[transfer->slot] ) == NULL ) ) {
1518 DBGC ( xhci, "XHCI %p transfer event invalid slot %d:\n",
1519 xhci, transfer->slot );
1520 DBGC_HDA ( xhci, 0, transfer, sizeof ( *transfer ) );
1521 return;
1522 }
1523
1524 /* Identify endpoint */
1525 if ( ( transfer->endpoint > XHCI_CTX_END ) ||
1526 ( ( endpoint = slot->endpoint[transfer->endpoint] ) == NULL ) ) {
1527 DBGC ( xhci, "XHCI %p slot %d transfer event invalid epid "
1528 "%d:\n", xhci, slot->id, transfer->endpoint );
1529 DBGC_HDA ( xhci, 0, transfer, sizeof ( *transfer ) );
1530 return;
1531 }
1532
1533 /* Dequeue TRB(s) */
1534 iobuf = xhci_dequeue_multi ( &endpoint->ring );
1535 assert ( iobuf != NULL );
1536
1537 /* Check for errors */
1538 if ( ! ( ( transfer->code == XHCI_CMPLT_SUCCESS ) ||
1539 ( transfer->code == XHCI_CMPLT_SHORT ) ) ) {
1540
1541 /* Construct error */
1542 rc = -ECODE ( transfer->code );
1543 DBGC ( xhci, "XHCI %p slot %d ctx %d failed (code %d): %s\n",
1544 xhci, slot->id, endpoint->ctx, transfer->code,
1545 strerror ( rc ) );
1546 DBGC_HDA ( xhci, 0, transfer, sizeof ( *transfer ) );
1547
1548 /* Sanity check */
1549 assert ( ( endpoint->context->state & XHCI_ENDPOINT_STATE_MASK )
1550 != XHCI_ENDPOINT_RUNNING );
1551
1552 /* Report failure to USB core */
1553 usb_complete_err ( endpoint->ep, iobuf, rc );
1554 return;
1555 }
1556
1557 /* Record actual transfer size */
1558 iob_unput ( iobuf, le16_to_cpu ( transfer->residual ) );
1559
1560 /* Sanity check (for successful completions only) */
1561 assert ( xhci_ring_consumed ( &endpoint->ring ) ==
1562 le64_to_cpu ( transfer->transfer ) );
1563
1564 /* Report completion to USB core */
1565 usb_complete ( endpoint->ep, iobuf );
1566 profile_stop ( &xhci_transfer_profiler );
1567 }
1568
1569 /**
1570 * Handle command completion event
1571 *
1572 * @v xhci xHCI device
1573 * @v complete Command completion event
1574 */
1575 static void xhci_complete ( struct xhci_device *xhci,
1576 struct xhci_trb_complete *complete ) {
1577
1578 /* Dequeue command TRB */
1579 xhci_dequeue ( &xhci->command );
1580
1581 /* Sanity check */
1582 assert ( xhci_ring_consumed ( &xhci->command ) ==
1583 le64_to_cpu ( complete->command ) );
1584
1585 /* Record completion if applicable */
1586 if ( xhci->completion ) {
1587 memcpy ( xhci->completion, complete,
1588 sizeof ( *xhci->completion ) );
1589 xhci->completion = NULL;
1590 } else {
1591 DBGC ( xhci, "XHCI %p unexpected completion:\n", xhci );
1592 DBGC_HDA ( xhci, 0, complete, sizeof ( *complete ) );
1593 }
1594 }
1595
1596 /**
1597 * Handle port status event
1598 *
1599 * @v xhci xHCI device
1600 * @v port Port status event
1601 */
1602 static void xhci_port_status ( struct xhci_device *xhci,
1603 struct xhci_trb_port_status *port ) {
1604 uint32_t portsc;
1605
1606 /* Sanity check */
1607 assert ( ( port->port > 0 ) && ( port->port <= xhci->ports ) );
1608
1609 /* Clear port status change bits */
1610 portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port->port ) );
1611 portsc &= ( XHCI_PORTSC_PRESERVE | XHCI_PORTSC_CHANGE );
1612 writel ( portsc, xhci->op + XHCI_OP_PORTSC ( port->port ) );
1613
1614 /* Report port status change */
1615 usb_port_changed ( usb_port ( xhci->bus->hub, port->port ) );
1616 }
1617
1618 /**
1619 * Handle host controller event
1620 *
1621 * @v xhci xHCI device
1622 * @v host Host controller event
1623 */
1624 static void xhci_host_controller ( struct xhci_device *xhci,
1625 struct xhci_trb_host_controller *host ) {
1626 int rc;
1627
1628 /* Construct error */
1629 rc = -ECODE ( host->code );
1630 DBGC ( xhci, "XHCI %p host controller event (code %d): %s\n",
1631 xhci, host->code, strerror ( rc ) );
1632 }
1633
1634 /**
1635 * Poll event ring
1636 *
1637 * @v xhci xHCI device
1638 */
1639 static void xhci_event_poll ( struct xhci_device *xhci ) {
1640 struct xhci_event_ring *event = &xhci->event;
1641 union xhci_trb *trb;
1642 unsigned int shift = XHCI_EVENT_TRBS_LOG2;
1643 unsigned int count = ( 1 << shift );
1644 unsigned int mask = ( count - 1 );
1645 unsigned int consumed;
1646 unsigned int type;
1647
1648 /* Poll for events */
1649 profile_start ( &xhci_event_profiler );
1650 for ( consumed = 0 ; ; consumed++ ) {
1651
1652 /* Stop if we reach an empty TRB */
1653 rmb();
1654 trb = &event->trb[ event->cons & mask ];
1655 if ( ! ( ( trb->common.flags ^
1656 ( event->cons >> shift ) ) & XHCI_TRB_C ) )
1657 break;
1658
1659 /* Handle TRB */
1660 type = ( trb->common.type & XHCI_TRB_TYPE_MASK );
1661 switch ( type ) {
1662
1663 case XHCI_TRB_TRANSFER :
1664 xhci_transfer ( xhci, &trb->transfer );
1665 break;
1666
1667 case XHCI_TRB_COMPLETE :
1668 xhci_complete ( xhci, &trb->complete );
1669 break;
1670
1671 case XHCI_TRB_PORT_STATUS:
1672 xhci_port_status ( xhci, &trb->port );
1673 break;
1674
1675 case XHCI_TRB_HOST_CONTROLLER:
1676 xhci_host_controller ( xhci, &trb->host );
1677 break;
1678
1679 default:
1680 DBGC ( xhci, "XHCI %p unrecognised event %#x\n:",
1681 xhci, event->cons );
1682 DBGC_HDA ( xhci, virt_to_phys ( trb ),
1683 trb, sizeof ( *trb ) );
1684 break;
1685 }
1686
1687 /* Consume this TRB */
1688 event->cons++;
1689 }
1690
1691 /* Update dequeue pointer if applicable */
1692 if ( consumed ) {
1693 xhci_writeq ( xhci, virt_to_phys ( trb ),
1694 xhci->run + XHCI_RUN_ERDP ( 0 ) );
1695 profile_stop ( &xhci_event_profiler );
1696 }
1697 }
1698
1699 /**
1700 * Issue command and wait for completion
1701 *
1702 * @v xhci xHCI device
1703 * @v trb Transfer request block (with empty Cycle flag)
1704 * @ret rc Return status code
1705 *
1706 * On a successful completion, the TRB will be overwritten with the
1707 * completion.
1708 */
1709 static int xhci_command ( struct xhci_device *xhci, union xhci_trb *trb ) {
1710 struct xhci_trb_complete *complete = &trb->complete;
1711 unsigned int i;
1712 int rc;
1713
1714 /* Record the completion buffer */
1715 xhci->completion = trb;
1716
1717 /* Enqueue the command */
1718 if ( ( rc = xhci_enqueue ( &xhci->command, NULL, trb ) ) != 0 )
1719 goto err_enqueue;
1720
1721 /* Ring the command doorbell */
1722 xhci_doorbell ( &xhci->command );
1723
1724 /* Wait for the command to complete */
1725 for ( i = 0 ; i < XHCI_COMMAND_MAX_WAIT_MS ; i++ ) {
1726
1727 /* Poll event ring */
1728 xhci_event_poll ( xhci );
1729
1730 /* Check for completion */
1731 if ( ! xhci->completion ) {
1732 if ( complete->code != XHCI_CMPLT_SUCCESS ) {
1733 rc = -ECODE ( complete->code );
1734 DBGC ( xhci, "XHCI %p command failed (code "
1735 "%d): %s\n", xhci, complete->code,
1736 strerror ( rc ) );
1737 DBGC_HDA ( xhci, 0, trb, sizeof ( *trb ) );
1738 return rc;
1739 }
1740 return 0;
1741 }
1742
1743 /* Delay */
1744 mdelay ( 1 );
1745 }
1746
1747 /* Timeout */
1748 DBGC ( xhci, "XHCI %p timed out waiting for completion\n", xhci );
1749 rc = -ETIMEDOUT;
1750
1751 err_enqueue:
1752 xhci->completion = NULL;
1753 return rc;
1754 }
1755
1756 /**
1757 * Issue NOP and wait for completion
1758 *
1759 * @v xhci xHCI device
1760 * @ret rc Return status code
1761 */
1762 static inline int xhci_nop ( struct xhci_device *xhci ) {
1763 union xhci_trb trb;
1764 struct xhci_trb_common *nop = &trb.common;
1765 int rc;
1766
1767 /* Construct command */
1768 memset ( nop, 0, sizeof ( *nop ) );
1769 nop->flags = XHCI_TRB_IOC;
1770 nop->type = XHCI_TRB_NOP_CMD;
1771
1772 /* Issue command and wait for completion */
1773 if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 )
1774 return rc;
1775
1776 return 0;
1777 }
1778
1779 /**
1780 * Enable slot
1781 *
1782 * @v xhci xHCI device
1783 * @v type Slot type
1784 * @ret slot Device slot ID, or negative error
1785 */
1786 static inline int xhci_enable_slot ( struct xhci_device *xhci,
1787 unsigned int type ) {
1788 union xhci_trb trb;
1789 struct xhci_trb_enable_slot *enable = &trb.enable;
1790 struct xhci_trb_complete *enabled = &trb.complete;
1791 unsigned int slot;
1792 int rc;
1793
1794 /* Construct command */
1795 memset ( enable, 0, sizeof ( *enable ) );
1796 enable->slot = type;
1797 enable->type = XHCI_TRB_ENABLE_SLOT;
1798
1799 /* Issue command and wait for completion */
1800 if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) {
1801 DBGC ( xhci, "XHCI %p could not enable new slot: %s\n",
1802 xhci, strerror ( rc ) );
1803 return rc;
1804 }
1805
1806 /* Extract slot number */
1807 slot = enabled->slot;
1808
1809 DBGC2 ( xhci, "XHCI %p slot %d enabled\n", xhci, slot );
1810 return slot;
1811 }
1812
1813 /**
1814 * Disable slot
1815 *
1816 * @v xhci xHCI device
1817 * @v slot Device slot
1818 * @ret rc Return status code
1819 */
1820 static inline int xhci_disable_slot ( struct xhci_device *xhci,
1821 unsigned int slot ) {
1822 union xhci_trb trb;
1823 struct xhci_trb_disable_slot *disable = &trb.disable;
1824 int rc;
1825
1826 /* Construct command */
1827 memset ( disable, 0, sizeof ( *disable ) );
1828 disable->type = XHCI_TRB_DISABLE_SLOT;
1829 disable->slot = slot;
1830
1831 /* Issue command and wait for completion */
1832 if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) {
1833 DBGC ( xhci, "XHCI %p could not disable slot %d: %s\n",
1834 xhci, slot, strerror ( rc ) );
1835 return rc;
1836 }
1837
1838 DBGC2 ( xhci, "XHCI %p slot %d disabled\n", xhci, slot );
1839 return 0;
1840 }
1841
1842 /**
1843 * Issue context-based command and wait for completion
1844 *
1845 * @v xhci xHCI device
1846 * @v slot Device slot
1847 * @v endpoint Endpoint
1848 * @v type TRB type
1849 * @v populate Input context populater
1850 * @ret rc Return status code
1851 */
1852 static int xhci_context ( struct xhci_device *xhci, struct xhci_slot *slot,
1853 struct xhci_endpoint *endpoint, unsigned int type,
1854 void ( * populate ) ( struct xhci_device *xhci,
1855 struct xhci_slot *slot,
1856 struct xhci_endpoint *endpoint,
1857 void *input ) ) {
1858 union xhci_trb trb;
1859 struct xhci_trb_context *context = &trb.context;
1860 size_t len;
1861 void *input;
1862 int rc;
1863
1864 /* Allocate an input context */
1865 len = xhci_input_context_offset ( xhci, XHCI_CTX_END );
1866 input = malloc_dma ( len, xhci_align ( len ) );
1867 if ( ! input ) {
1868 rc = -ENOMEM;
1869 goto err_alloc;
1870 }
1871 memset ( input, 0, len );
1872
1873 /* Populate input context */
1874 populate ( xhci, slot, endpoint, input );
1875
1876 /* Construct command */
1877 memset ( context, 0, sizeof ( *context ) );
1878 context->type = type;
1879 context->input = cpu_to_le64 ( virt_to_phys ( input ) );
1880 context->slot = slot->id;
1881
1882 /* Issue command and wait for completion */
1883 if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 )
1884 goto err_command;
1885
1886 err_command:
1887 free_dma ( input, len );
1888 err_alloc:
1889 return rc;
1890 }
1891
1892 /**
1893 * Populate address device input context
1894 *
1895 * @v xhci xHCI device
1896 * @v slot Device slot
1897 * @v endpoint Endpoint
1898 * @v input Input context
1899 */
1900 static void xhci_address_device_input ( struct xhci_device *xhci,
1901 struct xhci_slot *slot,
1902 struct xhci_endpoint *endpoint,
1903 void *input ) {
1904 struct xhci_control_context *control_ctx;
1905 struct xhci_slot_context *slot_ctx;
1906 struct xhci_endpoint_context *ep_ctx;
1907
1908 /* Sanity checks */
1909 assert ( endpoint->ctx == XHCI_CTX_EP0 );
1910
1911 /* Populate control context */
1912 control_ctx = input;
1913 control_ctx->add = cpu_to_le32 ( ( 1 << XHCI_CTX_SLOT ) |
1914 ( 1 << XHCI_CTX_EP0 ) );
1915
1916 /* Populate slot context */
1917 slot_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_SLOT ));
1918 slot_ctx->info = cpu_to_le32 ( XHCI_SLOT_INFO ( 1, 0, slot->psiv,
1919 slot->route ) );
1920 slot_ctx->port = slot->port;
1921
1922 /* Populate control endpoint context */
1923 ep_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_EP0 ) );
1924 ep_ctx->type = XHCI_EP_TYPE_CONTROL;
1925 ep_ctx->burst = endpoint->ep->burst;
1926 ep_ctx->mtu = cpu_to_le16 ( endpoint->ep->mtu );
1927 ep_ctx->dequeue = cpu_to_le64 ( virt_to_phys ( endpoint->ring.trb ) |
1928 XHCI_EP_DCS );
1929 ep_ctx->trb_len = cpu_to_le16 ( XHCI_EP0_TRB_LEN );
1930 }
1931
1932 /**
1933 * Address device
1934 *
1935 * @v xhci xHCI device
1936 * @v slot Device slot
1937 * @ret rc Return status code
1938 */
1939 static inline int xhci_address_device ( struct xhci_device *xhci,
1940 struct xhci_slot *slot ) {
1941 struct usb_device *usb = slot->usb;
1942 struct xhci_slot_context *slot_ctx;
1943 int rc;
1944
1945 /* Assign device address */
1946 if ( ( rc = xhci_context ( xhci, slot, slot->endpoint[XHCI_CTX_EP0],
1947 XHCI_TRB_ADDRESS_DEVICE,
1948 xhci_address_device_input ) ) != 0 )
1949 return rc;
1950
1951 /* Get assigned address */
1952 slot_ctx = ( slot->context +
1953 xhci_device_context_offset ( xhci, XHCI_CTX_SLOT ) );
1954 usb->address = slot_ctx->address;
1955 DBGC2 ( xhci, "XHCI %p assigned address %d to %s\n",
1956 xhci, usb->address, usb->name );
1957
1958 return 0;
1959 }
1960
1961 /**
1962 * Populate configure endpoint input context
1963 *
1964 * @v xhci xHCI device
1965 * @v slot Device slot
1966 * @v endpoint Endpoint
1967 * @v input Input context
1968 */
1969 static void xhci_configure_endpoint_input ( struct xhci_device *xhci,
1970 struct xhci_slot *slot __unused,
1971 struct xhci_endpoint *endpoint,
1972 void *input ) {
1973 struct xhci_control_context *control_ctx;
1974 struct xhci_slot_context *slot_ctx;
1975 struct xhci_endpoint_context *ep_ctx;
1976
1977 /* Populate control context */
1978 control_ctx = input;
1979 control_ctx->add = cpu_to_le32 ( ( 1 << XHCI_CTX_SLOT ) |
1980 ( 1 << endpoint->ctx ) );
1981
1982 /* Populate slot context */
1983 slot_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_SLOT ));
1984 slot_ctx->info = cpu_to_le32 ( XHCI_SLOT_INFO ( ( XHCI_CTX_END - 1 ),
1985 0, 0, 0 ) );
1986
1987 /* Populate endpoint context */
1988 ep_ctx = ( input + xhci_input_context_offset ( xhci, endpoint->ctx ) );
1989 ep_ctx->interval = endpoint->interval;
1990 ep_ctx->type = endpoint->type;
1991 ep_ctx->burst = endpoint->ep->burst;
1992 ep_ctx->mtu = cpu_to_le16 ( endpoint->ep->mtu );
1993 ep_ctx->dequeue = cpu_to_le64 ( virt_to_phys ( endpoint->ring.trb ) |
1994 XHCI_EP_DCS );
1995 ep_ctx->trb_len = cpu_to_le16 ( endpoint->ep->mtu ); /* best guess */
1996 }
1997
1998 /**
1999 * Configure endpoint
2000 *
2001 * @v xhci xHCI device
2002 * @v slot Device slot
2003 * @v endpoint Endpoint
2004 * @ret rc Return status code
2005 */
2006 static inline int xhci_configure_endpoint ( struct xhci_device *xhci,
2007 struct xhci_slot *slot,
2008 struct xhci_endpoint *endpoint ) {
2009 int rc;
2010
2011 /* Configure endpoint */
2012 if ( ( rc = xhci_context ( xhci, slot, endpoint,
2013 XHCI_TRB_CONFIGURE_ENDPOINT,
2014 xhci_configure_endpoint_input ) ) != 0 )
2015 return rc;
2016
2017 DBGC2 ( xhci, "XHCI %p slot %d ctx %d configured\n",
2018 xhci, slot->id, endpoint->ctx );
2019 return 0;
2020 }
2021
2022 /**
2023 * Populate deconfigure endpoint input context
2024 *
2025 * @v xhci xHCI device
2026 * @v slot Device slot
2027 * @v endpoint Endpoint
2028 * @v input Input context
2029 */
2030 static void
2031 xhci_deconfigure_endpoint_input ( struct xhci_device *xhci __unused,
2032 struct xhci_slot *slot __unused,
2033 struct xhci_endpoint *endpoint,
2034 void *input ) {
2035 struct xhci_control_context *control_ctx;
2036 struct xhci_slot_context *slot_ctx;
2037
2038 /* Populate control context */
2039 control_ctx = input;
2040 control_ctx->add = cpu_to_le32 ( 1 << XHCI_CTX_SLOT );
2041 control_ctx->drop = cpu_to_le32 ( 1 << endpoint->ctx );
2042
2043 /* Populate slot context */
2044 slot_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_SLOT ));
2045 slot_ctx->info = cpu_to_le32 ( XHCI_SLOT_INFO ( ( XHCI_CTX_END - 1 ),
2046 0, 0, 0 ) );
2047 }
2048
2049 /**
2050 * Deconfigure endpoint
2051 *
2052 * @v xhci xHCI device
2053 * @v slot Device slot
2054 * @v endpoint Endpoint
2055 * @ret rc Return status code
2056 */
2057 static inline int xhci_deconfigure_endpoint ( struct xhci_device *xhci,
2058 struct xhci_slot *slot,
2059 struct xhci_endpoint *endpoint ) {
2060 int rc;
2061
2062 /* Deconfigure endpoint */
2063 if ( ( rc = xhci_context ( xhci, slot, endpoint,
2064 XHCI_TRB_CONFIGURE_ENDPOINT,
2065 xhci_deconfigure_endpoint_input ) ) != 0 )
2066 return rc;
2067
2068 DBGC2 ( xhci, "XHCI %p slot %d ctx %d deconfigured\n",
2069 xhci, slot->id, endpoint->ctx );
2070 return 0;
2071 }
2072
2073 /**
2074 * Populate evaluate context input context
2075 *
2076 * @v xhci xHCI device
2077 * @v slot Device slot
2078 * @v endpoint Endpoint
2079 * @v input Input context
2080 */
2081 static void xhci_evaluate_context_input ( struct xhci_device *xhci,
2082 struct xhci_slot *slot __unused,
2083 struct xhci_endpoint *endpoint,
2084 void *input ) {
2085 struct xhci_control_context *control_ctx;
2086 struct xhci_slot_context *slot_ctx;
2087 struct xhci_endpoint_context *ep_ctx;
2088
2089 /* Populate control context */
2090 control_ctx = input;
2091 control_ctx->add = cpu_to_le32 ( ( 1 << XHCI_CTX_SLOT ) |
2092 ( 1 << endpoint->ctx ) );
2093
2094 /* Populate slot context */
2095 slot_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_SLOT ));
2096 slot_ctx->info = cpu_to_le32 ( XHCI_SLOT_INFO ( ( XHCI_CTX_END - 1 ),
2097 0, 0, 0 ) );
2098
2099 /* Populate endpoint context */
2100 ep_ctx = ( input + xhci_input_context_offset ( xhci, endpoint->ctx ) );
2101 ep_ctx->mtu = cpu_to_le16 ( endpoint->ep->mtu );
2102 }
2103
2104 /**
2105 * Evaluate context
2106 *
2107 * @v xhci xHCI device
2108 * @v slot Device slot
2109 * @v endpoint Endpoint
2110 * @ret rc Return status code
2111 */
2112 static inline int xhci_evaluate_context ( struct xhci_device *xhci,
2113 struct xhci_slot *slot,
2114 struct xhci_endpoint *endpoint ) {
2115 int rc;
2116
2117 /* Configure endpoint */
2118 if ( ( rc = xhci_context ( xhci, slot, endpoint,
2119 XHCI_TRB_EVALUATE_CONTEXT,
2120 xhci_evaluate_context_input ) ) != 0 )
2121 return rc;
2122
2123 DBGC2 ( xhci, "XHCI %p slot %d ctx %d (re-)evaluated\n",
2124 xhci, slot->id, endpoint->ctx );
2125 return 0;
2126 }
2127
2128 /**
2129 * Reset endpoint
2130 *
2131 * @v xhci xHCI device
2132 * @v slot Device slot
2133 * @v endpoint Endpoint
2134 * @ret rc Return status code
2135 */
2136 static inline int xhci_reset_endpoint ( struct xhci_device *xhci,
2137 struct xhci_slot *slot,
2138 struct xhci_endpoint *endpoint ) {
2139 union xhci_trb trb;
2140 struct xhci_trb_reset_endpoint *reset = &trb.reset;
2141 int rc;
2142
2143 /* Construct command */
2144 memset ( reset, 0, sizeof ( *reset ) );
2145 reset->slot = slot->id;
2146 reset->endpoint = endpoint->ctx;
2147 reset->type = XHCI_TRB_RESET_ENDPOINT;
2148
2149 /* Issue command and wait for completion */
2150 if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) {
2151 DBGC ( xhci, "XHCI %p slot %d ctx %d could not reset endpoint "
2152 "in state %d: %s\n", xhci, slot->id, endpoint->ctx,
2153 endpoint->context->state, strerror ( rc ) );
2154 return rc;
2155 }
2156
2157 return 0;
2158 }
2159
2160 /**
2161 * Stop endpoint
2162 *
2163 * @v xhci xHCI device
2164 * @v slot Device slot
2165 * @v endpoint Endpoint
2166 * @ret rc Return status code
2167 */
2168 static inline int xhci_stop_endpoint ( struct xhci_device *xhci,
2169 struct xhci_slot *slot,
2170 struct xhci_endpoint *endpoint ) {
2171 union xhci_trb trb;
2172 struct xhci_trb_stop_endpoint *stop = &trb.stop;
2173 int rc;
2174
2175 /* Construct command */
2176 memset ( stop, 0, sizeof ( *stop ) );
2177 stop->slot = slot->id;
2178 stop->endpoint = endpoint->ctx;
2179 stop->type = XHCI_TRB_STOP_ENDPOINT;
2180
2181 /* Issue command and wait for completion */
2182 if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) {
2183 DBGC ( xhci, "XHCI %p slot %d ctx %d could not stop endpoint "
2184 "in state %d: %s\n", xhci, slot->id, endpoint->ctx,
2185 endpoint->context->state, strerror ( rc ) );
2186 return rc;
2187 }
2188
2189 return 0;
2190 }
2191
2192 /**
2193 * Set transfer ring dequeue pointer
2194 *
2195 * @v xhci xHCI device
2196 * @v slot Device slot
2197 * @v endpoint Endpoint
2198 * @ret rc Return status code
2199 */
2200 static inline int
2201 xhci_set_tr_dequeue_pointer ( struct xhci_device *xhci,
2202 struct xhci_slot *slot,
2203 struct xhci_endpoint *endpoint ) {
2204 union xhci_trb trb;
2205 struct xhci_trb_set_tr_dequeue_pointer *dequeue = &trb.dequeue;
2206 struct xhci_trb_ring *ring = &endpoint->ring;
2207 unsigned int cons;
2208 unsigned int mask;
2209 unsigned int index;
2210 unsigned int dcs;
2211 int rc;
2212
2213 /* Construct command */
2214 memset ( dequeue, 0, sizeof ( *dequeue ) );
2215 cons = ring->cons;
2216 mask = ring->mask;
2217 dcs = ( ( ~( cons >> ring->shift ) ) & XHCI_EP_DCS );
2218 index = ( cons & mask );
2219 dequeue->dequeue =
2220 cpu_to_le64 ( virt_to_phys ( &ring->trb[index] ) | dcs );
2221 dequeue->slot = slot->id;
2222 dequeue->endpoint = endpoint->ctx;
2223 dequeue->type = XHCI_TRB_SET_TR_DEQUEUE_POINTER;
2224
2225 /* Issue command and wait for completion */
2226 if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) {
2227 DBGC ( xhci, "XHCI %p slot %d ctx %d could not set TR dequeue "
2228 "pointer in state %d: %s\n", xhci, slot->id,
2229 endpoint->ctx, endpoint->context->state, strerror ( rc));
2230 return rc;
2231 }
2232
2233 return 0;
2234 }
2235
2236 /******************************************************************************
2237 *
2238 * Endpoint operations
2239 *
2240 ******************************************************************************
2241 */
2242
2243 /**
2244 * Open endpoint
2245 *
2246 * @v ep USB endpoint
2247 * @ret rc Return status code
2248 */
2249 static int xhci_endpoint_open ( struct usb_endpoint *ep ) {
2250 struct usb_device *usb = ep->usb;
2251 struct xhci_slot *slot = usb_get_hostdata ( usb );
2252 struct xhci_device *xhci = slot->xhci;
2253 struct xhci_endpoint *endpoint;
2254 unsigned int ctx;
2255 unsigned int type;
2256 unsigned int interval;
2257 int rc;
2258
2259 /* Calculate context index */
2260 ctx = XHCI_CTX ( ep->address );
2261 assert ( slot->endpoint[ctx] == NULL );
2262
2263 /* Calculate endpoint type */
2264 type = XHCI_EP_TYPE ( ep->attributes & USB_ENDPOINT_ATTR_TYPE_MASK );
2265 if ( type == XHCI_EP_TYPE ( USB_ENDPOINT_ATTR_CONTROL ) )
2266 type = XHCI_EP_TYPE_CONTROL;
2267 if ( ep->address & USB_DIR_IN )
2268 type |= XHCI_EP_TYPE_IN;
2269
2270 /* Calculate interval */
2271 if ( type & XHCI_EP_TYPE_PERIODIC ) {
2272 interval = ( fls ( ep->interval ) - 1 );
2273 } else {
2274 interval = ep->interval;
2275 }
2276
2277 /* Allocate and initialise structure */
2278 endpoint = zalloc ( sizeof ( *endpoint ) );
2279 if ( ! endpoint ) {
2280 rc = -ENOMEM;
2281 goto err_alloc;
2282 }
2283 usb_endpoint_set_hostdata ( ep, endpoint );
2284 slot->endpoint[ctx] = endpoint;
2285 endpoint->xhci = xhci;
2286 endpoint->slot = slot;
2287 endpoint->ep = ep;
2288 endpoint->ctx = ctx;
2289 endpoint->type = type;
2290 endpoint->interval = interval;
2291 endpoint->context = ( ( ( void * ) slot->context ) +
2292 xhci_device_context_offset ( xhci, ctx ) );
2293
2294 /* Allocate transfer ring */
2295 if ( ( rc = xhci_ring_alloc ( xhci, &endpoint->ring,
2296 XHCI_TRANSFER_TRBS_LOG2,
2297 slot->id, ctx, 0 ) ) != 0 )
2298 goto err_ring_alloc;
2299
2300 /* Configure endpoint, if applicable */
2301 if ( ( ctx != XHCI_CTX_EP0 ) &&
2302 ( ( rc = xhci_configure_endpoint ( xhci, slot, endpoint ) ) != 0 ))
2303 goto err_configure_endpoint;
2304
2305 DBGC2 ( xhci, "XHCI %p slot %d ctx %d ring [%08lx,%08lx)\n",
2306 xhci, slot->id, ctx, virt_to_phys ( endpoint->ring.trb ),
2307 ( virt_to_phys ( endpoint->ring.trb ) + endpoint->ring.len ) );
2308 return 0;
2309
2310 xhci_deconfigure_endpoint ( xhci, slot, endpoint );
2311 err_configure_endpoint:
2312 xhci_ring_free ( &endpoint->ring );
2313 err_ring_alloc:
2314 slot->endpoint[ctx] = NULL;
2315 free ( endpoint );
2316 err_alloc:
2317 return rc;
2318 }
2319
2320 /**
2321 * Close endpoint
2322 *
2323 * @v ep USB endpoint
2324 */
2325 static void xhci_endpoint_close ( struct usb_endpoint *ep ) {
2326 struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
2327 struct xhci_slot *slot = endpoint->slot;
2328 struct xhci_device *xhci = slot->xhci;
2329 struct io_buffer *iobuf;
2330 unsigned int ctx = endpoint->ctx;
2331
2332 /* Deconfigure endpoint, if applicable */
2333 if ( ctx != XHCI_CTX_EP0 )
2334 xhci_deconfigure_endpoint ( xhci, slot, endpoint );
2335
2336 /* Cancel any incomplete transfers */
2337 while ( xhci_ring_fill ( &endpoint->ring ) ) {
2338 iobuf = xhci_dequeue_multi ( &endpoint->ring );
2339 usb_complete_err ( ep, iobuf, -ECANCELED );
2340 }
2341
2342 /* Free endpoint */
2343 xhci_ring_free ( &endpoint->ring );
2344 slot->endpoint[ctx] = NULL;
2345 free ( endpoint );
2346 }
2347
2348 /**
2349 * Reset endpoint
2350 *
2351 * @v ep USB endpoint
2352 * @ret rc Return status code
2353 */
2354 static int xhci_endpoint_reset ( struct usb_endpoint *ep ) {
2355 struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
2356 struct xhci_slot *slot = endpoint->slot;
2357 struct xhci_device *xhci = slot->xhci;
2358 int rc;
2359
2360 /* Reset endpoint context */
2361 if ( ( rc = xhci_reset_endpoint ( xhci, slot, endpoint ) ) != 0 )
2362 return rc;
2363
2364 /* Set transfer ring dequeue pointer */
2365 if ( ( rc = xhci_set_tr_dequeue_pointer ( xhci, slot, endpoint ) ) != 0)
2366 return rc;
2367
2368 DBGC ( xhci, "XHCI %p slot %d ctx %d reset\n",
2369 xhci, slot->id, endpoint->ctx );
2370 return 0;
2371 }
2372
2373 /**
2374 * Update MTU
2375 *
2376 * @v ep USB endpoint
2377 * @ret rc Return status code
2378 */
2379 static int xhci_endpoint_mtu ( struct usb_endpoint *ep ) {
2380 struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
2381 struct xhci_slot *slot = endpoint->slot;
2382 struct xhci_device *xhci = slot->xhci;
2383 int rc;
2384
2385 /* Evalulate context */
2386 if ( ( rc = xhci_evaluate_context ( xhci, slot, endpoint ) ) != 0 )
2387 return rc;
2388
2389 return 0;
2390 }
2391
2392 /**
2393 * Enqueue message transfer
2394 *
2395 * @v ep USB endpoint
2396 * @v packet Setup packet
2397 * @v iobuf I/O buffer
2398 * @ret rc Return status code
2399 */
2400 static int xhci_endpoint_message ( struct usb_endpoint *ep,
2401 struct usb_setup_packet *packet,
2402 struct io_buffer *iobuf ) {
2403 struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
2404 unsigned int input = ( le16_to_cpu ( packet->request ) & USB_DIR_IN );
2405 size_t len = iob_len ( iobuf );
2406 union xhci_trb trbs[ 1 /* setup */ + 1 /* possible data */ +
2407 1 /* status */ ];
2408 union xhci_trb *trb = trbs;
2409 struct xhci_trb_setup *setup;
2410 struct xhci_trb_data *data;
2411 struct xhci_trb_status *status;
2412 int rc;
2413
2414 /* Profile message transfers */
2415 profile_start ( &xhci_message_profiler );
2416
2417 /* Construct setup stage TRB */
2418 memset ( trbs, 0, sizeof ( trbs ) );
2419 setup = &(trb++)->setup;
2420 memcpy ( &setup->packet, packet, sizeof ( setup->packet ) );
2421 setup->len = cpu_to_le32 ( sizeof ( *packet ) );
2422 setup->flags = XHCI_TRB_IDT;
2423 setup->type = XHCI_TRB_SETUP;
2424 if ( len )
2425 setup->direction = ( input ? XHCI_SETUP_IN : XHCI_SETUP_OUT );
2426
2427 /* Construct data stage TRB, if applicable */
2428 if ( len ) {
2429 data = &(trb++)->data;
2430 data->data = cpu_to_le64 ( virt_to_phys ( iobuf->data ) );
2431 data->len = cpu_to_le32 ( len );
2432 data->type = XHCI_TRB_DATA;
2433 data->direction = ( input ? XHCI_DATA_IN : XHCI_DATA_OUT );
2434 }
2435
2436 /* Construct status stage TRB */
2437 status = &(trb++)->status;
2438 status->flags = XHCI_TRB_IOC;
2439 status->type = XHCI_TRB_STATUS;
2440 status->direction =
2441 ( ( len && input ) ? XHCI_STATUS_OUT : XHCI_STATUS_IN );
2442
2443 /* Enqueue TRBs */
2444 if ( ( rc = xhci_enqueue_multi ( &endpoint->ring, iobuf, trbs,
2445 ( trb - trbs ) ) ) != 0 )
2446 return rc;
2447
2448 /* Ring the doorbell */
2449 xhci_doorbell ( &endpoint->ring );
2450
2451 profile_stop ( &xhci_message_profiler );
2452 return 0;
2453 }
2454
2455 /**
2456 * Enqueue stream transfer
2457 *
2458 * @v ep USB endpoint
2459 * @v iobuf I/O buffer
2460 * @v terminate Terminate using a short packet
2461 * @ret rc Return status code
2462 */
2463 static int xhci_endpoint_stream ( struct usb_endpoint *ep,
2464 struct io_buffer *iobuf, int terminate ) {
2465 struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
2466 union xhci_trb trbs[ 1 /* Normal */ + 1 /* Possible zero-length */ ];
2467 union xhci_trb *trb = trbs;
2468 struct xhci_trb_normal *normal;
2469 size_t len = iob_len ( iobuf );
2470 int rc;
2471
2472 /* Profile stream transfers */
2473 profile_start ( &xhci_stream_profiler );
2474
2475 /* Construct normal TRBs */
2476 memset ( &trbs, 0, sizeof ( trbs ) );
2477 normal = &(trb++)->normal;
2478 normal->data = cpu_to_le64 ( virt_to_phys ( iobuf->data ) );
2479 normal->len = cpu_to_le32 ( len );
2480 normal->type = XHCI_TRB_NORMAL;
2481 if ( terminate && ( ( len & ( ep->mtu - 1 ) ) == 0 ) ) {
2482 normal->flags = XHCI_TRB_CH;
2483 normal = &(trb++)->normal;
2484 normal->type = XHCI_TRB_NORMAL;
2485 }
2486 normal->flags = XHCI_TRB_IOC;
2487
2488 /* Enqueue TRBs */
2489 if ( ( rc = xhci_enqueue_multi ( &endpoint->ring, iobuf, trbs,
2490 ( trb - trbs ) ) ) != 0 )
2491 return rc;
2492
2493 /* Ring the doorbell */
2494 xhci_doorbell ( &endpoint->ring );
2495
2496 profile_stop ( &xhci_stream_profiler );
2497 return 0;
2498 }
2499
2500 /******************************************************************************
2501 *
2502 * Device operations
2503 *
2504 ******************************************************************************
2505 */
2506
2507 /**
2508 * Open device
2509 *
2510 * @v usb USB device
2511 * @ret rc Return status code
2512 */
2513 static int xhci_device_open ( struct usb_device *usb ) {
2514 struct xhci_device *xhci = usb_bus_get_hostdata ( usb->port->hub->bus );
2515 struct xhci_slot *slot;
2516 size_t len;
2517 int type;
2518 int id;
2519 int rc;
2520
2521 /* Determine applicable slot type */
2522 type = xhci_port_slot_type ( xhci, usb->port->address );
2523 if ( type < 0 ) {
2524 rc = type;
2525 DBGC ( xhci, "XHCI %p port %d has no slot type\n",
2526 xhci, usb->port->address );
2527 goto err_type;
2528 }
2529
2530 /* Allocate a device slot number */
2531 id = xhci_enable_slot ( xhci, type );
2532 if ( id < 0 ) {
2533 rc = id;
2534 goto err_enable_slot;
2535 }
2536 assert ( xhci->slot[id] == NULL );
2537
2538 /* Allocate and initialise structure */
2539 slot = zalloc ( sizeof ( *slot ) );
2540 if ( ! slot ) {
2541 rc = -ENOMEM;
2542 goto err_alloc;
2543 }
2544 usb_set_hostdata ( usb, slot );
2545 xhci->slot[id] = slot;
2546 slot->xhci = xhci;
2547 slot->usb = usb;
2548 slot->id = id;
2549
2550 /* Allocate a device context */
2551 len = xhci_device_context_offset ( xhci, XHCI_CTX_END );
2552 slot->context = malloc_dma ( len, xhci_align ( len ) );
2553 if ( ! slot->context ) {
2554 rc = -ENOMEM;
2555 goto err_alloc_context;
2556 }
2557 memset ( slot->context, 0, len );
2558
2559 /* Set device context base address */
2560 assert ( xhci->dcbaa[id] == 0 );
2561 xhci->dcbaa[id] = cpu_to_le64 ( virt_to_phys ( slot->context ) );
2562
2563 DBGC2 ( xhci, "XHCI %p slot %d device context [%08lx,%08lx) for %s\n",
2564 xhci, slot->id, virt_to_phys ( slot->context ),
2565 ( virt_to_phys ( slot->context ) + len ), usb->name );
2566 return 0;
2567
2568 xhci->dcbaa[id] = 0;
2569 free_dma ( slot->context, len );
2570 err_alloc_context:
2571 xhci->slot[id] = NULL;
2572 free ( slot );
2573 err_alloc:
2574 xhci_disable_slot ( xhci, id );
2575 err_enable_slot:
2576 err_type:
2577 return rc;
2578 }
2579
2580 /**
2581 * Close device
2582 *
2583 * @v usb USB device
2584 */
2585 static void xhci_device_close ( struct usb_device *usb ) {
2586 struct xhci_slot *slot = usb_get_hostdata ( usb );
2587 struct xhci_device *xhci = slot->xhci;
2588 size_t len = xhci_device_context_offset ( xhci, XHCI_CTX_END );
2589 unsigned int id = slot->id;
2590 int rc;
2591
2592 /* Disable slot */
2593 if ( ( rc = xhci_disable_slot ( xhci, id ) ) != 0 ) {
2594 /* Slot is still enabled. Leak the slot context,
2595 * since the controller may still write to this
2596 * memory, and leave the DCBAA entry intact.
2597 *
2598 * If the controller later reports that this same slot
2599 * has been re-enabled, then some assertions will be
2600 * triggered.
2601 */
2602 DBGC ( xhci, "XHCI %p slot %d leaking context memory\n",
2603 xhci, slot->id );
2604 slot->context = NULL;
2605 }
2606
2607 /* Free slot */
2608 if ( slot->context ) {
2609 free_dma ( slot->context, len );
2610 xhci->dcbaa[id] = 0;
2611 }
2612 xhci->slot[id] = NULL;
2613 free ( slot );
2614 }
2615
2616 /**
2617 * Assign device address
2618 *
2619 * @v usb USB device
2620 * @ret rc Return status code
2621 */
2622 static int xhci_device_address ( struct usb_device *usb ) {
2623 struct xhci_slot *slot = usb_get_hostdata ( usb );
2624 struct xhci_device *xhci = slot->xhci;
2625 struct usb_port *port = usb->port;
2626 struct usb_port *root_port;
2627 int psiv;
2628 int rc;
2629
2630 /* Calculate route string */
2631 slot->route = usb_route_string ( usb );
2632
2633 /* Calculate root hub port number */
2634 root_port = usb_root_hub_port ( usb );
2635 slot->port = root_port->address;
2636
2637 /* Calculate protocol speed ID */
2638 psiv = xhci_port_psiv ( xhci, slot->port, port->speed );
2639 if ( psiv < 0 ) {
2640 rc = psiv;
2641 return rc;
2642 }
2643 slot->psiv = psiv;
2644
2645 /* Address device */
2646 if ( ( rc = xhci_address_device ( xhci, slot ) ) != 0 )
2647 return rc;
2648
2649 return 0;
2650 }
2651
2652 /******************************************************************************
2653 *
2654 * Bus operations
2655 *
2656 ******************************************************************************
2657 */
2658
2659 /**
2660 * Open USB bus
2661 *
2662 * @v bus USB bus
2663 * @ret rc Return status code
2664 */
2665 static int xhci_bus_open ( struct usb_bus *bus ) {
2666 struct xhci_device *xhci = usb_bus_get_hostdata ( bus );
2667 int rc;
2668
2669 /* Allocate device slot array */
2670 xhci->slot = zalloc ( xhci->slots * sizeof ( xhci->slot[0] ) );
2671 if ( ! xhci->slot ) {
2672 rc = -ENOMEM;
2673 goto err_slot_alloc;
2674 }
2675
2676 /* Allocate device context base address array */
2677 if ( ( rc = xhci_dcbaa_alloc ( xhci ) ) != 0 )
2678 goto err_dcbaa_alloc;
2679
2680 /* Allocate scratchpad buffers */
2681 if ( ( rc = xhci_scratchpad_alloc ( xhci ) ) != 0 )
2682 goto err_scratchpad_alloc;
2683
2684 /* Allocate command ring */
2685 if ( ( rc = xhci_command_alloc ( xhci ) ) != 0 )
2686 goto err_command_alloc;
2687
2688 /* Allocate event ring */
2689 if ( ( rc = xhci_event_alloc ( xhci ) ) != 0 )
2690 goto err_event_alloc;
2691
2692 /* Start controller */
2693 xhci_run ( xhci );
2694
2695 return 0;
2696
2697 xhci_stop ( xhci );
2698 xhci_event_free ( xhci );
2699 err_event_alloc:
2700 xhci_command_free ( xhci );
2701 err_command_alloc:
2702 xhci_scratchpad_free ( xhci );
2703 err_scratchpad_alloc:
2704 xhci_dcbaa_free ( xhci );
2705 err_dcbaa_alloc:
2706 free ( xhci->slot );
2707 err_slot_alloc:
2708 return rc;
2709 }
2710
2711 /**
2712 * Close USB bus
2713 *
2714 * @v bus USB bus
2715 */
2716 static void xhci_bus_close ( struct usb_bus *bus ) {
2717 struct xhci_device *xhci = usb_bus_get_hostdata ( bus );
2718 unsigned int i;
2719
2720 /* Sanity checks */
2721 assert ( xhci->slot != NULL );
2722 for ( i = 0 ; i < xhci->slots ; i++ )
2723 assert ( xhci->slot[i] == NULL );
2724
2725 xhci_stop ( xhci );
2726 xhci_event_free ( xhci );
2727 xhci_command_free ( xhci );
2728 xhci_scratchpad_free ( xhci );
2729 xhci_dcbaa_free ( xhci );
2730 free ( xhci->slot );
2731 }
2732
2733 /**
2734 * Poll USB bus
2735 *
2736 * @v bus USB bus
2737 */
2738 static void xhci_bus_poll ( struct usb_bus *bus ) {
2739 struct xhci_device *xhci = usb_bus_get_hostdata ( bus );
2740
2741 /* Poll event ring */
2742 xhci_event_poll ( xhci );
2743 }
2744
2745 /******************************************************************************
2746 *
2747 * Root hub operations
2748 *
2749 ******************************************************************************
2750 */
2751
2752 /**
2753 * Open root hub
2754 *
2755 * @v hub USB hub
2756 * @ret rc Return status code
2757 */
2758 static int xhci_hub_open ( struct usb_hub *hub ) {
2759 struct usb_bus *bus = hub->bus;
2760 struct xhci_device *xhci = usb_bus_get_hostdata ( bus );
2761 struct usb_port *port;
2762 uint32_t portsc;
2763 unsigned int i;
2764
2765 /* Enable power to all ports */
2766 for ( i = 1 ; i <= xhci->ports ; i++ ) {
2767 portsc = readl ( xhci->op + XHCI_OP_PORTSC ( i ) );
2768 portsc &= XHCI_PORTSC_PRESERVE;
2769 portsc |= XHCI_PORTSC_PP;
2770 writel ( portsc, xhci->op + XHCI_OP_PORTSC ( i ) );
2771 }
2772
2773 /* xHCI spec requires us to potentially wait 20ms after
2774 * enabling power to a port.
2775 */
2776 mdelay ( XHCI_PORT_POWER_DELAY_MS );
2777
2778 /* USB3 ports may power up as Disabled */
2779 for ( i = 1 ; i <= xhci->ports ; i++ ) {
2780 portsc = readl ( xhci->op + XHCI_OP_PORTSC ( i ) );
2781 port = usb_port ( hub, i );
2782 if ( ( port->protocol >= USB_PROTO_3_0 ) &&
2783 ( ( portsc & XHCI_PORTSC_PLS_MASK ) ==
2784 XHCI_PORTSC_PLS_DISABLED ) ) {
2785 /* Force link state to RxDetect */
2786 portsc &= XHCI_PORTSC_PRESERVE;
2787 portsc |= ( XHCI_PORTSC_PLS_RXDETECT | XHCI_PORTSC_LWS);
2788 writel ( portsc, xhci->op + XHCI_OP_PORTSC ( i ) );
2789 }
2790 }
2791
2792 /* Some xHCI cards seem to require an additional delay after
2793 * setting the link state to RxDetect.
2794 */
2795 mdelay ( XHCI_LINK_STATE_DELAY_MS );
2796
2797 /* Record hub driver private data */
2798 usb_hub_set_drvdata ( hub, xhci );
2799
2800 return 0;
2801 }
2802
2803 /**
2804 * Close root hub
2805 *
2806 * @v hub USB hub
2807 */
2808 static void xhci_hub_close ( struct usb_hub *hub ) {
2809
2810 /* Clear hub driver private data */
2811 usb_hub_set_drvdata ( hub, NULL );
2812 }
2813
2814 /**
2815 * Enable port
2816 *
2817 * @v hub USB hub
2818 * @v port USB port
2819 * @ret rc Return status code
2820 */
2821 static int xhci_hub_enable ( struct usb_hub *hub, struct usb_port *port ) {
2822 struct xhci_device *xhci = usb_hub_get_drvdata ( hub );
2823 uint32_t portsc;
2824 unsigned int i;
2825
2826 /* Reset port if applicable */
2827 if ( port->protocol < USB_PROTO_3_0 ) {
2828 portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port->address ) );
2829 portsc &= XHCI_PORTSC_PRESERVE;
2830 portsc |= XHCI_PORTSC_PR;
2831 writel ( portsc, xhci->op + XHCI_OP_PORTSC ( port->address ) );
2832 }
2833
2834 /* Wait for port to become enabled */
2835 for ( i = 0 ; i < XHCI_PORT_RESET_MAX_WAIT_MS ; i++ ) {
2836
2837 /* Check port status */
2838 portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port->address ) );
2839 if ( portsc & XHCI_PORTSC_PED )
2840 return 0;
2841
2842 /* Delay */
2843 mdelay ( 1 );
2844 }
2845
2846 DBGC ( xhci, "XHCI %p timed out waiting for port %d to enable\n",
2847 xhci, port->address );
2848 return -ETIMEDOUT;
2849 }
2850
2851 /**
2852 * Disable port
2853 *
2854 * @v hub USB hub
2855 * @v port USB port
2856 * @ret rc Return status code
2857 */
2858 static int xhci_hub_disable ( struct usb_hub *hub, struct usb_port *port ) {
2859 struct xhci_device *xhci = usb_hub_get_drvdata ( hub );
2860 uint32_t portsc;
2861
2862 /* Disable port */
2863 portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port->address ) );
2864 portsc &= XHCI_PORTSC_PRESERVE;
2865 portsc |= XHCI_PORTSC_PED;
2866 writel ( portsc, xhci->op + XHCI_OP_PORTSC ( port->address ) );
2867
2868 return 0;
2869 }
2870
2871 /**
2872 * Update root hub port speed
2873 *
2874 * @v hub USB hub
2875 * @v port USB port
2876 * @ret rc Return status code
2877 */
2878 static int xhci_hub_speed ( struct usb_hub *hub, struct usb_port *port ) {
2879 struct xhci_device *xhci = usb_hub_get_drvdata ( hub );
2880 uint32_t portsc;
2881 unsigned int psiv;
2882 int ccs;
2883 int ped;
2884 int speed;
2885 int rc;
2886
2887 /* Read port status */
2888 portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port->address ) );
2889 DBGC2 ( xhci, "XHCI %p port %d status is %08x\n",
2890 xhci, port->address, portsc );
2891
2892 /* Check whether or not port is connected */
2893 ccs = ( portsc & XHCI_PORTSC_CCS );
2894 if ( ! ccs ) {
2895 port->speed = USB_SPEED_NONE;
2896 return 0;
2897 }
2898
2899 /* For USB2 ports, the PSIV field is not valid until the port
2900 * completes reset and becomes enabled.
2901 */
2902 ped = ( portsc & XHCI_PORTSC_PED );
2903 if ( ( port->protocol < USB_PROTO_3_0 ) && ! ped ) {
2904 port->speed = USB_SPEED_FULL;
2905 return 0;
2906 }
2907
2908 /* Get port speed and map to generic USB speed */
2909 psiv = XHCI_PORTSC_PSIV ( portsc );
2910 speed = xhci_port_speed ( xhci, port->address, psiv );
2911 if ( speed < 0 ) {
2912 rc = speed;
2913 return rc;
2914 }
2915
2916 port->speed = speed;
2917 return 0;
2918 }
2919
2920 /******************************************************************************
2921 *
2922 * PCI interface
2923 *
2924 ******************************************************************************
2925 */
2926
2927 /** USB host controller operations */
2928 static struct usb_host_operations xhci_operations = {
2929 .endpoint = {
2930 .open = xhci_endpoint_open,
2931 .close = xhci_endpoint_close,
2932 .reset = xhci_endpoint_reset,
2933 .mtu = xhci_endpoint_mtu,
2934 .message = xhci_endpoint_message,
2935 .stream = xhci_endpoint_stream,
2936 },
2937 .device = {
2938 .open = xhci_device_open,
2939 .close = xhci_device_close,
2940 .address = xhci_device_address,
2941 },
2942 .bus = {
2943 .open = xhci_bus_open,
2944 .close = xhci_bus_close,
2945 .poll = xhci_bus_poll,
2946 },
2947 .hub = {
2948 .open = xhci_hub_open,
2949 .close = xhci_hub_close,
2950 .enable = xhci_hub_enable,
2951 .disable = xhci_hub_disable,
2952 .speed = xhci_hub_speed,
2953 },
2954 };
2955
2956 /**
2957 * Probe PCI device
2958 *
2959 * @v pci PCI device
2960 * @ret rc Return status code
2961 */
2962 static int xhci_probe ( struct pci_device *pci ) {
2963 struct xhci_device *xhci;
2964 struct usb_port *port;
2965 unsigned long bar_start;
2966 size_t bar_size;
2967 unsigned int i;
2968 int rc;
2969
2970 /* Allocate and initialise structure */
2971 xhci = zalloc ( sizeof ( *xhci ) );
2972 if ( ! xhci ) {
2973 rc = -ENOMEM;
2974 goto err_alloc;
2975 }
2976
2977 /* Fix up PCI device */
2978 adjust_pci_device ( pci );
2979
2980 /* Map registers */
2981 bar_start = pci_bar_start ( pci, XHCI_BAR );
2982 bar_size = pci_bar_size ( pci, XHCI_BAR );
2983 xhci->regs = ioremap ( bar_start, bar_size );
2984 if ( ! xhci->regs ) {
2985 rc = -ENODEV;
2986 goto err_ioremap;
2987 }
2988
2989 /* Initialise xHCI device */
2990 xhci_init ( xhci, xhci->regs );
2991
2992 /* Initialise USB legacy support and claim ownership */
2993 xhci_legacy_init ( xhci );
2994 if ( ( rc = xhci_legacy_claim ( xhci ) ) != 0 )
2995 goto err_legacy_claim;
2996
2997 /* Reset device */
2998 if ( ( rc = xhci_reset ( xhci ) ) != 0 )
2999 goto err_reset;
3000
3001 /* Allocate USB bus */
3002 xhci->bus = alloc_usb_bus ( &pci->dev, xhci->ports,
3003 &xhci_operations );
3004 if ( ! xhci->bus ) {
3005 rc = -ENOMEM;
3006 goto err_alloc_bus;
3007 }
3008 usb_bus_set_hostdata ( xhci->bus, xhci );
3009 usb_hub_set_drvdata ( xhci->bus->hub, xhci );
3010
3011 /* Set port protocols */
3012 for ( i = 1 ; i <= xhci->ports ; i++ ) {
3013 port = usb_port ( xhci->bus->hub, i );
3014 port->protocol = xhci_port_protocol ( xhci, i );
3015 }
3016
3017 /* Register USB bus */
3018 if ( ( rc = register_usb_bus ( xhci->bus ) ) != 0 )
3019 goto err_register;
3020
3021 pci_set_drvdata ( pci, xhci );
3022 return 0;
3023
3024 unregister_usb_bus ( xhci->bus );
3025 err_register:
3026 free_usb_bus ( xhci->bus );
3027 err_alloc_bus:
3028 xhci_reset ( xhci );
3029 err_reset:
3030 xhci_legacy_release ( xhci );
3031 err_legacy_claim:
3032 iounmap ( xhci->regs );
3033 err_ioremap:
3034 free ( xhci );
3035 err_alloc:
3036 return rc;
3037 }
3038
3039 /**
3040 * Remove PCI device
3041 *
3042 * @v pci PCI device
3043 */
3044 static void xhci_remove ( struct pci_device *pci ) {
3045 struct xhci_device *xhci = pci_get_drvdata ( pci );
3046 struct usb_bus *bus = xhci->bus;
3047
3048 unregister_usb_bus ( bus );
3049 free_usb_bus ( bus );
3050 xhci_reset ( xhci );
3051 xhci_legacy_release ( xhci );
3052 iounmap ( xhci->regs );
3053 free ( xhci );
3054 }
3055
3056 /** XHCI PCI device IDs */
3057 static struct pci_device_id xhci_ids[] = {
3058 PCI_ROM ( 0xffff, 0xffff, "xhci", "xHCI", 0 ),
3059 };
3060
3061 /** XHCI PCI driver */
3062 struct pci_driver xhci_driver __pci_driver = {
3063 .ids = xhci_ids,
3064 .id_count = ( sizeof ( xhci_ids ) / sizeof ( xhci_ids[0] ) ),
3065 .class = PCI_CLASS ( PCI_CLASS_SERIAL, PCI_CLASS_SERIAL_USB,
3066 PCI_CLASS_SERIAL_USB_XHCI ),
3067 .probe = xhci_probe,
3068 .remove = xhci_remove,
3069 };