[linda] Use standard readq() and writeq() implementations
[ipxe.git] / src / drivers / infiniband / linda.c
1 /*
2 * Copyright (C) 2008 Michael Brown <mbrown@fensystems.co.uk>.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA.
18 *
19 * You can also choose to distribute this program under the terms of
20 * the Unmodified Binary Distribution Licence (as given in the file
21 * COPYING.UBDL), provided that you have satisfied its requirements.
22 */
23
24 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
25
26 #include <stdint.h>
27 #include <stdlib.h>
28 #include <errno.h>
29 #include <unistd.h>
30 #include <assert.h>
31 #include <ipxe/io.h>
32 #include <ipxe/pci.h>
33 #include <ipxe/infiniband.h>
34 #include <ipxe/i2c.h>
35 #include <ipxe/bitbash.h>
36 #include <ipxe/malloc.h>
37 #include <ipxe/iobuf.h>
38 #include "linda.h"
39
40 /**
41 * @file
42 *
43 * QLogic Linda Infiniband HCA
44 *
45 */
46
47 /** A Linda send work queue */
48 struct linda_send_work_queue {
49 /** Send buffer usage */
50 uint8_t *send_buf;
51 /** Producer index */
52 unsigned int prod;
53 /** Consumer index */
54 unsigned int cons;
55 };
56
57 /** A Linda receive work queue */
58 struct linda_recv_work_queue {
59 /** Receive header ring */
60 void *header;
61 /** Receive header producer offset (written by hardware) */
62 struct QIB_7220_scalar header_prod;
63 /** Receive header consumer offset */
64 unsigned int header_cons;
65 /** Offset within register space of the eager array */
66 unsigned long eager_array;
67 /** Number of entries in eager array */
68 unsigned int eager_entries;
69 /** Eager array producer index */
70 unsigned int eager_prod;
71 /** Eager array consumer index */
72 unsigned int eager_cons;
73 };
74
75 /** A Linda HCA */
76 struct linda {
77 /** Registers */
78 void *regs;
79
80 /** In-use contexts */
81 uint8_t used_ctx[LINDA_NUM_CONTEXTS];
82 /** Send work queues */
83 struct linda_send_work_queue send_wq[LINDA_NUM_CONTEXTS];
84 /** Receive work queues */
85 struct linda_recv_work_queue recv_wq[LINDA_NUM_CONTEXTS];
86
87 /** Offset within register space of the first send buffer */
88 unsigned long send_buffer_base;
89 /** Send buffer availability (reported by hardware) */
90 struct QIB_7220_SendBufAvail *sendbufavail;
91 /** Send buffer availability (maintained by software) */
92 uint8_t send_buf[LINDA_MAX_SEND_BUFS];
93 /** Send buffer availability producer counter */
94 unsigned int send_buf_prod;
95 /** Send buffer availability consumer counter */
96 unsigned int send_buf_cons;
97 /** Number of reserved send buffers (across all QPs) */
98 unsigned int reserved_send_bufs;
99
100 /** I2C bit-bashing interface */
101 struct i2c_bit_basher i2c;
102 /** I2C serial EEPROM */
103 struct i2c_device eeprom;
104 };
105
106 /***************************************************************************
107 *
108 * Linda register access
109 *
110 ***************************************************************************
111 *
112 * This card requires atomic 64-bit accesses. Strange things happen
113 * if you try to use 32-bit accesses; sometimes they work, sometimes
114 * they don't, sometimes you get random data.
115 */
116
117 /**
118 * Read Linda qword register
119 *
120 * @v linda Linda device
121 * @v qword Register buffer to read into
122 * @v offset Register offset
123 */
124 static void linda_readq ( struct linda *linda, uint64_t *qword,
125 unsigned long offset ) {
126 *qword = readq ( linda->regs + offset );
127 }
128 #define linda_readq( _linda, _ptr, _offset ) \
129 linda_readq ( (_linda), (_ptr)->u.qwords, (_offset) )
130 #define linda_readq_array8b( _linda, _ptr, _offset, _idx ) \
131 linda_readq ( (_linda), (_ptr), ( (_offset) + ( (_idx) * 8 ) ) )
132 #define linda_readq_array64k( _linda, _ptr, _offset, _idx ) \
133 linda_readq ( (_linda), (_ptr), ( (_offset) + ( (_idx) * 65536 ) ) )
134
135 /**
136 * Write Linda qword register
137 *
138 * @v linda Linda device
139 * @v qword Register buffer to write
140 * @v offset Register offset
141 */
142 static void linda_writeq ( struct linda *linda, const uint64_t *qword,
143 unsigned long offset ) {
144 writeq ( *qword, ( linda->regs + offset ) );
145 }
146 #define linda_writeq( _linda, _ptr, _offset ) \
147 linda_writeq ( (_linda), (_ptr)->u.qwords, (_offset) )
148 #define linda_writeq_array8b( _linda, _ptr, _offset, _idx ) \
149 linda_writeq ( (_linda), (_ptr), ( (_offset) + ( (_idx) * 8 ) ) )
150 #define linda_writeq_array64k( _linda, _ptr, _offset, _idx ) \
151 linda_writeq ( (_linda), (_ptr), ( (_offset) + ( (_idx) * 65536 ) ) )
152
153 /**
154 * Write Linda dword register
155 *
156 * @v linda Linda device
157 * @v dword Value to write
158 * @v offset Register offset
159 */
160 static void linda_writel ( struct linda *linda, uint32_t dword,
161 unsigned long offset ) {
162 writel ( dword, ( linda->regs + offset ) );
163 }
164
165 /***************************************************************************
166 *
167 * Link state management
168 *
169 ***************************************************************************
170 */
171
172 /**
173 * Textual representation of link state
174 *
175 * @v link_state Link state
176 * @ret link_text Link state text
177 */
178 static const char * linda_link_state_text ( unsigned int link_state ) {
179 switch ( link_state ) {
180 case LINDA_LINK_STATE_DOWN: return "DOWN";
181 case LINDA_LINK_STATE_INIT: return "INIT";
182 case LINDA_LINK_STATE_ARM: return "ARM";
183 case LINDA_LINK_STATE_ACTIVE: return "ACTIVE";
184 case LINDA_LINK_STATE_ACT_DEFER:return "ACT_DEFER";
185 default: return "UNKNOWN";
186 }
187 }
188
189 /**
190 * Handle link state change
191 *
192 * @v linda Linda device
193 */
194 static void linda_link_state_changed ( struct ib_device *ibdev ) {
195 struct linda *linda = ib_get_drvdata ( ibdev );
196 struct QIB_7220_IBCStatus ibcstatus;
197 struct QIB_7220_EXTCtrl extctrl;
198 unsigned int link_state;
199 unsigned int link_width;
200 unsigned int link_speed;
201
202 /* Read link state */
203 linda_readq ( linda, &ibcstatus, QIB_7220_IBCStatus_offset );
204 link_state = BIT_GET ( &ibcstatus, LinkState );
205 link_width = BIT_GET ( &ibcstatus, LinkWidthActive );
206 link_speed = BIT_GET ( &ibcstatus, LinkSpeedActive );
207 DBGC ( linda, "Linda %p link state %s (%s %s)\n", linda,
208 linda_link_state_text ( link_state ),
209 ( link_speed ? "DDR" : "SDR" ), ( link_width ? "x4" : "x1" ) );
210
211 /* Set LEDs according to link state */
212 linda_readq ( linda, &extctrl, QIB_7220_EXTCtrl_offset );
213 BIT_SET ( &extctrl, LEDPriPortGreenOn,
214 ( ( link_state >= LINDA_LINK_STATE_INIT ) ? 1 : 0 ) );
215 BIT_SET ( &extctrl, LEDPriPortYellowOn,
216 ( ( link_state >= LINDA_LINK_STATE_ACTIVE ) ? 1 : 0 ) );
217 linda_writeq ( linda, &extctrl, QIB_7220_EXTCtrl_offset );
218
219 /* Notify Infiniband core of link state change */
220 ibdev->port_state = ( link_state + 1 );
221 ibdev->link_width_active =
222 ( link_width ? IB_LINK_WIDTH_4X : IB_LINK_WIDTH_1X );
223 ibdev->link_speed_active =
224 ( link_speed ? IB_LINK_SPEED_DDR : IB_LINK_SPEED_SDR );
225 ib_link_state_changed ( ibdev );
226 }
227
228 /**
229 * Wait for link state change to take effect
230 *
231 * @v linda Linda device
232 * @v new_link_state Expected link state
233 * @ret rc Return status code
234 */
235 static int linda_link_state_check ( struct linda *linda,
236 unsigned int new_link_state ) {
237 struct QIB_7220_IBCStatus ibcstatus;
238 unsigned int link_state;
239 unsigned int i;
240
241 for ( i = 0 ; i < LINDA_LINK_STATE_MAX_WAIT_US ; i++ ) {
242 linda_readq ( linda, &ibcstatus, QIB_7220_IBCStatus_offset );
243 link_state = BIT_GET ( &ibcstatus, LinkState );
244 if ( link_state == new_link_state )
245 return 0;
246 udelay ( 1 );
247 }
248
249 DBGC ( linda, "Linda %p timed out waiting for link state %s\n",
250 linda, linda_link_state_text ( link_state ) );
251 return -ETIMEDOUT;
252 }
253
254 /**
255 * Set port information
256 *
257 * @v ibdev Infiniband device
258 * @v mad Set port information MAD
259 */
260 static int linda_set_port_info ( struct ib_device *ibdev, union ib_mad *mad ) {
261 struct linda *linda = ib_get_drvdata ( ibdev );
262 struct ib_port_info *port_info = &mad->smp.smp_data.port_info;
263 struct QIB_7220_IBCCtrl ibcctrl;
264 unsigned int port_state;
265 unsigned int link_state;
266
267 /* Set new link state */
268 port_state = ( port_info->link_speed_supported__port_state & 0xf );
269 if ( port_state ) {
270 link_state = ( port_state - 1 );
271 DBGC ( linda, "Linda %p set link state to %s (%x)\n", linda,
272 linda_link_state_text ( link_state ), link_state );
273 linda_readq ( linda, &ibcctrl, QIB_7220_IBCCtrl_offset );
274 BIT_SET ( &ibcctrl, LinkCmd, link_state );
275 linda_writeq ( linda, &ibcctrl, QIB_7220_IBCCtrl_offset );
276
277 /* Wait for link state change to take effect. Ignore
278 * errors; the current link state will be returned via
279 * the GetResponse MAD.
280 */
281 linda_link_state_check ( linda, link_state );
282 }
283
284 /* Detect and report link state change */
285 linda_link_state_changed ( ibdev );
286
287 return 0;
288 }
289
290 /**
291 * Set partition key table
292 *
293 * @v ibdev Infiniband device
294 * @v mad Set partition key table MAD
295 */
296 static int linda_set_pkey_table ( struct ib_device *ibdev __unused,
297 union ib_mad *mad __unused ) {
298 /* Nothing to do */
299 return 0;
300 }
301
302 /***************************************************************************
303 *
304 * Context allocation
305 *
306 ***************************************************************************
307 */
308
309 /**
310 * Map context number to QPN
311 *
312 * @v ctx Context index
313 * @ret qpn Queue pair number
314 */
315 static int linda_ctx_to_qpn ( unsigned int ctx ) {
316 /* This mapping is fixed by hardware */
317 return ( ctx * 2 );
318 }
319
320 /**
321 * Map QPN to context number
322 *
323 * @v qpn Queue pair number
324 * @ret ctx Context index
325 */
326 static int linda_qpn_to_ctx ( unsigned int qpn ) {
327 /* This mapping is fixed by hardware */
328 return ( qpn / 2 );
329 }
330
331 /**
332 * Allocate a context
333 *
334 * @v linda Linda device
335 * @ret ctx Context index, or negative error
336 */
337 static int linda_alloc_ctx ( struct linda *linda ) {
338 unsigned int ctx;
339
340 for ( ctx = 0 ; ctx < LINDA_NUM_CONTEXTS ; ctx++ ) {
341
342 if ( ! linda->used_ctx[ctx] ) {
343 linda->used_ctx[ctx ] = 1;
344 DBGC2 ( linda, "Linda %p CTX %d allocated\n",
345 linda, ctx );
346 return ctx;
347 }
348 }
349
350 DBGC ( linda, "Linda %p out of available contexts\n", linda );
351 return -ENOENT;
352 }
353
354 /**
355 * Free a context
356 *
357 * @v linda Linda device
358 * @v ctx Context index
359 */
360 static void linda_free_ctx ( struct linda *linda, unsigned int ctx ) {
361
362 linda->used_ctx[ctx] = 0;
363 DBGC2 ( linda, "Linda %p CTX %d freed\n", linda, ctx );
364 }
365
366 /***************************************************************************
367 *
368 * Send datapath
369 *
370 ***************************************************************************
371 */
372
373 /** Send buffer toggle bit
374 *
375 * We encode send buffers as 7 bits of send buffer index plus a single
376 * bit which should match the "check" bit in the SendBufAvail array.
377 */
378 #define LINDA_SEND_BUF_TOGGLE 0x80
379
380 /**
381 * Allocate a send buffer
382 *
383 * @v linda Linda device
384 * @ret send_buf Send buffer
385 *
386 * You must guarantee that a send buffer is available. This is done
387 * by refusing to allocate more TX WQEs in total than the number of
388 * available send buffers.
389 */
390 static unsigned int linda_alloc_send_buf ( struct linda *linda ) {
391 unsigned int send_buf;
392
393 send_buf = linda->send_buf[linda->send_buf_cons];
394 send_buf ^= LINDA_SEND_BUF_TOGGLE;
395 linda->send_buf_cons = ( ( linda->send_buf_cons + 1 ) %
396 LINDA_MAX_SEND_BUFS );
397 return send_buf;
398 }
399
400 /**
401 * Free a send buffer
402 *
403 * @v linda Linda device
404 * @v send_buf Send buffer
405 */
406 static void linda_free_send_buf ( struct linda *linda,
407 unsigned int send_buf ) {
408 linda->send_buf[linda->send_buf_prod] = send_buf;
409 linda->send_buf_prod = ( ( linda->send_buf_prod + 1 ) %
410 LINDA_MAX_SEND_BUFS );
411 }
412
413 /**
414 * Check to see if send buffer is in use
415 *
416 * @v linda Linda device
417 * @v send_buf Send buffer
418 * @ret in_use Send buffer is in use
419 */
420 static int linda_send_buf_in_use ( struct linda *linda,
421 unsigned int send_buf ) {
422 unsigned int send_idx;
423 unsigned int send_check;
424 unsigned int inusecheck;
425 unsigned int inuse;
426 unsigned int check;
427
428 send_idx = ( send_buf & ~LINDA_SEND_BUF_TOGGLE );
429 send_check = ( !! ( send_buf & LINDA_SEND_BUF_TOGGLE ) );
430 inusecheck = BIT_GET ( linda->sendbufavail, InUseCheck[send_idx] );
431 inuse = ( !! ( inusecheck & 0x02 ) );
432 check = ( !! ( inusecheck & 0x01 ) );
433 return ( inuse || ( check != send_check ) );
434 }
435
436 /**
437 * Calculate starting offset for send buffer
438 *
439 * @v linda Linda device
440 * @v send_buf Send buffer
441 * @ret offset Starting offset
442 */
443 static unsigned long linda_send_buffer_offset ( struct linda *linda,
444 unsigned int send_buf ) {
445 return ( linda->send_buffer_base +
446 ( ( send_buf & ~LINDA_SEND_BUF_TOGGLE ) *
447 LINDA_SEND_BUF_SIZE ) );
448 }
449
450 /**
451 * Create send work queue
452 *
453 * @v linda Linda device
454 * @v qp Queue pair
455 */
456 static int linda_create_send_wq ( struct linda *linda,
457 struct ib_queue_pair *qp ) {
458 struct ib_work_queue *wq = &qp->send;
459 struct linda_send_work_queue *linda_wq = ib_wq_get_drvdata ( wq );
460 int rc;
461
462 /* Reserve send buffers */
463 if ( ( linda->reserved_send_bufs + qp->send.num_wqes ) >
464 LINDA_MAX_SEND_BUFS ) {
465 DBGC ( linda, "Linda %p out of send buffers (have %d, used "
466 "%d, need %d)\n", linda, LINDA_MAX_SEND_BUFS,
467 linda->reserved_send_bufs, qp->send.num_wqes );
468 rc = -ENOBUFS;
469 goto err_reserve_bufs;
470 }
471 linda->reserved_send_bufs += qp->send.num_wqes;
472
473 /* Reset work queue */
474 linda_wq->prod = 0;
475 linda_wq->cons = 0;
476
477 /* Allocate space for send buffer uasge list */
478 linda_wq->send_buf = zalloc ( qp->send.num_wqes *
479 sizeof ( linda_wq->send_buf[0] ) );
480 if ( ! linda_wq->send_buf ) {
481 rc = -ENOBUFS;
482 goto err_alloc_send_buf;
483 }
484
485 return 0;
486
487 free ( linda_wq->send_buf );
488 err_alloc_send_buf:
489 linda->reserved_send_bufs -= qp->send.num_wqes;
490 err_reserve_bufs:
491 return rc;
492 }
493
494 /**
495 * Destroy send work queue
496 *
497 * @v linda Linda device
498 * @v qp Queue pair
499 */
500 static void linda_destroy_send_wq ( struct linda *linda,
501 struct ib_queue_pair *qp ) {
502 struct ib_work_queue *wq = &qp->send;
503 struct linda_send_work_queue *linda_wq = ib_wq_get_drvdata ( wq );
504
505 free ( linda_wq->send_buf );
506 linda->reserved_send_bufs -= qp->send.num_wqes;
507 }
508
509 /**
510 * Initialise send datapath
511 *
512 * @v linda Linda device
513 * @ret rc Return status code
514 */
515 static int linda_init_send ( struct linda *linda ) {
516 struct QIB_7220_SendBufBase sendbufbase;
517 struct QIB_7220_SendBufAvailAddr sendbufavailaddr;
518 struct QIB_7220_SendCtrl sendctrl;
519 unsigned int i;
520 int rc;
521
522 /* Retrieve SendBufBase */
523 linda_readq ( linda, &sendbufbase, QIB_7220_SendBufBase_offset );
524 linda->send_buffer_base = BIT_GET ( &sendbufbase,
525 BaseAddr_SmallPIO );
526 DBGC ( linda, "Linda %p send buffers at %lx\n",
527 linda, linda->send_buffer_base );
528
529 /* Initialise the send_buf[] array */
530 for ( i = 0 ; i < LINDA_MAX_SEND_BUFS ; i++ )
531 linda->send_buf[i] = i;
532
533 /* Allocate space for the SendBufAvail array */
534 linda->sendbufavail = malloc_dma ( sizeof ( *linda->sendbufavail ),
535 LINDA_SENDBUFAVAIL_ALIGN );
536 if ( ! linda->sendbufavail ) {
537 rc = -ENOMEM;
538 goto err_alloc_sendbufavail;
539 }
540 memset ( linda->sendbufavail, 0, sizeof ( linda->sendbufavail ) );
541
542 /* Program SendBufAvailAddr into the hardware */
543 memset ( &sendbufavailaddr, 0, sizeof ( sendbufavailaddr ) );
544 BIT_FILL_1 ( &sendbufavailaddr, SendBufAvailAddr,
545 ( virt_to_bus ( linda->sendbufavail ) >> 6 ) );
546 linda_writeq ( linda, &sendbufavailaddr,
547 QIB_7220_SendBufAvailAddr_offset );
548
549 /* Enable sending and DMA of SendBufAvail */
550 memset ( &sendctrl, 0, sizeof ( sendctrl ) );
551 BIT_FILL_2 ( &sendctrl,
552 SendBufAvailUpd, 1,
553 SPioEnable, 1 );
554 linda_writeq ( linda, &sendctrl, QIB_7220_SendCtrl_offset );
555
556 return 0;
557
558 free_dma ( linda->sendbufavail, sizeof ( *linda->sendbufavail ) );
559 err_alloc_sendbufavail:
560 return rc;
561 }
562
563 /**
564 * Shut down send datapath
565 *
566 * @v linda Linda device
567 */
568 static void linda_fini_send ( struct linda *linda ) {
569 struct QIB_7220_SendCtrl sendctrl;
570
571 /* Disable sending and DMA of SendBufAvail */
572 memset ( &sendctrl, 0, sizeof ( sendctrl ) );
573 linda_writeq ( linda, &sendctrl, QIB_7220_SendCtrl_offset );
574 mb();
575
576 /* Ensure hardware has seen this disable */
577 linda_readq ( linda, &sendctrl, QIB_7220_SendCtrl_offset );
578
579 free_dma ( linda->sendbufavail, sizeof ( *linda->sendbufavail ) );
580 }
581
582 /***************************************************************************
583 *
584 * Receive datapath
585 *
586 ***************************************************************************
587 */
588
589 /**
590 * Create receive work queue
591 *
592 * @v linda Linda device
593 * @v qp Queue pair
594 * @ret rc Return status code
595 */
596 static int linda_create_recv_wq ( struct linda *linda,
597 struct ib_queue_pair *qp ) {
598 struct ib_work_queue *wq = &qp->recv;
599 struct linda_recv_work_queue *linda_wq = ib_wq_get_drvdata ( wq );
600 struct QIB_7220_RcvHdrAddr0 rcvhdraddr;
601 struct QIB_7220_RcvHdrTailAddr0 rcvhdrtailaddr;
602 struct QIB_7220_RcvHdrHead0 rcvhdrhead;
603 struct QIB_7220_scalar rcvegrindexhead;
604 struct QIB_7220_RcvCtrl rcvctrl;
605 unsigned int ctx = linda_qpn_to_ctx ( qp->qpn );
606 int rc;
607
608 /* Reset context information */
609 memset ( &linda_wq->header_prod, 0,
610 sizeof ( linda_wq->header_prod ) );
611 linda_wq->header_cons = 0;
612 linda_wq->eager_prod = 0;
613 linda_wq->eager_cons = 0;
614
615 /* Allocate receive header buffer */
616 linda_wq->header = malloc_dma ( LINDA_RECV_HEADERS_SIZE,
617 LINDA_RECV_HEADERS_ALIGN );
618 if ( ! linda_wq->header ) {
619 rc = -ENOMEM;
620 goto err_alloc_header;
621 }
622
623 /* Enable context in hardware */
624 memset ( &rcvhdraddr, 0, sizeof ( rcvhdraddr ) );
625 BIT_FILL_1 ( &rcvhdraddr, RcvHdrAddr0,
626 ( virt_to_bus ( linda_wq->header ) >> 2 ) );
627 linda_writeq_array8b ( linda, &rcvhdraddr,
628 QIB_7220_RcvHdrAddr0_offset, ctx );
629 memset ( &rcvhdrtailaddr, 0, sizeof ( rcvhdrtailaddr ) );
630 BIT_FILL_1 ( &rcvhdrtailaddr, RcvHdrTailAddr0,
631 ( virt_to_bus ( &linda_wq->header_prod ) >> 2 ) );
632 linda_writeq_array8b ( linda, &rcvhdrtailaddr,
633 QIB_7220_RcvHdrTailAddr0_offset, ctx );
634 memset ( &rcvhdrhead, 0, sizeof ( rcvhdrhead ) );
635 BIT_FILL_1 ( &rcvhdrhead, counter, 1 );
636 linda_writeq_array64k ( linda, &rcvhdrhead,
637 QIB_7220_RcvHdrHead0_offset, ctx );
638 memset ( &rcvegrindexhead, 0, sizeof ( rcvegrindexhead ) );
639 BIT_FILL_1 ( &rcvegrindexhead, Value, 1 );
640 linda_writeq_array64k ( linda, &rcvegrindexhead,
641 QIB_7220_RcvEgrIndexHead0_offset, ctx );
642 linda_readq ( linda, &rcvctrl, QIB_7220_RcvCtrl_offset );
643 BIT_SET ( &rcvctrl, PortEnable[ctx], 1 );
644 BIT_SET ( &rcvctrl, IntrAvail[ctx], 1 );
645 linda_writeq ( linda, &rcvctrl, QIB_7220_RcvCtrl_offset );
646
647 DBGC ( linda, "Linda %p QPN %ld CTX %d hdrs [%lx,%lx) prod %lx\n",
648 linda, qp->qpn, ctx, virt_to_bus ( linda_wq->header ),
649 ( virt_to_bus ( linda_wq->header ) + LINDA_RECV_HEADERS_SIZE ),
650 virt_to_bus ( &linda_wq->header_prod ) );
651 return 0;
652
653 free_dma ( linda_wq->header, LINDA_RECV_HEADERS_SIZE );
654 err_alloc_header:
655 return rc;
656 }
657
658 /**
659 * Destroy receive work queue
660 *
661 * @v linda Linda device
662 * @v qp Queue pair
663 */
664 static void linda_destroy_recv_wq ( struct linda *linda,
665 struct ib_queue_pair *qp ) {
666 struct ib_work_queue *wq = &qp->recv;
667 struct linda_recv_work_queue *linda_wq = ib_wq_get_drvdata ( wq );
668 struct QIB_7220_RcvCtrl rcvctrl;
669 unsigned int ctx = linda_qpn_to_ctx ( qp->qpn );
670
671 /* Disable context in hardware */
672 linda_readq ( linda, &rcvctrl, QIB_7220_RcvCtrl_offset );
673 BIT_SET ( &rcvctrl, PortEnable[ctx], 0 );
674 BIT_SET ( &rcvctrl, IntrAvail[ctx], 0 );
675 linda_writeq ( linda, &rcvctrl, QIB_7220_RcvCtrl_offset );
676
677 /* Make sure the hardware has seen that the context is disabled */
678 linda_readq ( linda, &rcvctrl, QIB_7220_RcvCtrl_offset );
679 mb();
680
681 /* Free headers ring */
682 free_dma ( linda_wq->header, LINDA_RECV_HEADERS_SIZE );
683
684 /* Free context */
685 linda_free_ctx ( linda, ctx );
686 }
687
688 /**
689 * Initialise receive datapath
690 *
691 * @v linda Linda device
692 * @ret rc Return status code
693 */
694 static int linda_init_recv ( struct linda *linda ) {
695 struct QIB_7220_RcvCtrl rcvctrl;
696 struct QIB_7220_scalar rcvegrbase;
697 struct QIB_7220_scalar rcvhdrentsize;
698 struct QIB_7220_scalar rcvhdrcnt;
699 struct QIB_7220_RcvBTHQP rcvbthqp;
700 unsigned int portcfg;
701 unsigned long egrbase;
702 unsigned int eager_array_size_0;
703 unsigned int eager_array_size_other;
704 unsigned int ctx;
705
706 /* Select configuration based on number of contexts */
707 switch ( LINDA_NUM_CONTEXTS ) {
708 case 5:
709 portcfg = LINDA_PORTCFG_5CTX;
710 eager_array_size_0 = LINDA_EAGER_ARRAY_SIZE_5CTX_0;
711 eager_array_size_other = LINDA_EAGER_ARRAY_SIZE_5CTX_OTHER;
712 break;
713 case 9:
714 portcfg = LINDA_PORTCFG_9CTX;
715 eager_array_size_0 = LINDA_EAGER_ARRAY_SIZE_9CTX_0;
716 eager_array_size_other = LINDA_EAGER_ARRAY_SIZE_9CTX_OTHER;
717 break;
718 case 17:
719 portcfg = LINDA_PORTCFG_17CTX;
720 eager_array_size_0 = LINDA_EAGER_ARRAY_SIZE_17CTX_0;
721 eager_array_size_other = LINDA_EAGER_ARRAY_SIZE_17CTX_OTHER;
722 break;
723 default:
724 linker_assert ( 0, invalid_LINDA_NUM_CONTEXTS );
725 return -EINVAL;
726 }
727
728 /* Configure number of contexts */
729 memset ( &rcvctrl, 0, sizeof ( rcvctrl ) );
730 BIT_FILL_3 ( &rcvctrl,
731 TailUpd, 1,
732 PortCfg, portcfg,
733 RcvQPMapEnable, 1 );
734 linda_writeq ( linda, &rcvctrl, QIB_7220_RcvCtrl_offset );
735
736 /* Configure receive header buffer sizes */
737 memset ( &rcvhdrcnt, 0, sizeof ( rcvhdrcnt ) );
738 BIT_FILL_1 ( &rcvhdrcnt, Value, LINDA_RECV_HEADER_COUNT );
739 linda_writeq ( linda, &rcvhdrcnt, QIB_7220_RcvHdrCnt_offset );
740 memset ( &rcvhdrentsize, 0, sizeof ( rcvhdrentsize ) );
741 BIT_FILL_1 ( &rcvhdrentsize, Value, ( LINDA_RECV_HEADER_SIZE >> 2 ) );
742 linda_writeq ( linda, &rcvhdrentsize, QIB_7220_RcvHdrEntSize_offset );
743
744 /* Calculate eager array start addresses for each context */
745 linda_readq ( linda, &rcvegrbase, QIB_7220_RcvEgrBase_offset );
746 egrbase = BIT_GET ( &rcvegrbase, Value );
747 linda->recv_wq[0].eager_array = egrbase;
748 linda->recv_wq[0].eager_entries = eager_array_size_0;
749 egrbase += ( eager_array_size_0 * sizeof ( struct QIB_7220_RcvEgr ) );
750 for ( ctx = 1 ; ctx < LINDA_NUM_CONTEXTS ; ctx++ ) {
751 linda->recv_wq[ctx].eager_array = egrbase;
752 linda->recv_wq[ctx].eager_entries = eager_array_size_other;
753 egrbase += ( eager_array_size_other *
754 sizeof ( struct QIB_7220_RcvEgr ) );
755 }
756 for ( ctx = 0 ; ctx < LINDA_NUM_CONTEXTS ; ctx++ ) {
757 DBGC ( linda, "Linda %p CTX %d eager array at %lx (%d "
758 "entries)\n", linda, ctx,
759 linda->recv_wq[ctx].eager_array,
760 linda->recv_wq[ctx].eager_entries );
761 }
762
763 /* Set the BTH QP for Infinipath packets to an unused value */
764 memset ( &rcvbthqp, 0, sizeof ( rcvbthqp ) );
765 BIT_FILL_1 ( &rcvbthqp, RcvBTHQP, LINDA_QP_IDETH );
766 linda_writeq ( linda, &rcvbthqp, QIB_7220_RcvBTHQP_offset );
767
768 return 0;
769 }
770
771 /**
772 * Shut down receive datapath
773 *
774 * @v linda Linda device
775 */
776 static void linda_fini_recv ( struct linda *linda __unused ) {
777 /* Nothing to do; all contexts were already disabled when the
778 * queue pairs were destroyed
779 */
780 }
781
782 /***************************************************************************
783 *
784 * Completion queue operations
785 *
786 ***************************************************************************
787 */
788
789 /**
790 * Create completion queue
791 *
792 * @v ibdev Infiniband device
793 * @v cq Completion queue
794 * @ret rc Return status code
795 */
796 static int linda_create_cq ( struct ib_device *ibdev,
797 struct ib_completion_queue *cq ) {
798 struct linda *linda = ib_get_drvdata ( ibdev );
799 static int cqn;
800
801 /* The hardware has no concept of completion queues. We
802 * simply use the association between CQs and WQs (already
803 * handled by the IB core) to decide which WQs to poll.
804 *
805 * We do set a CQN, just to avoid confusing debug messages
806 * from the IB core.
807 */
808 cq->cqn = ++cqn;
809 DBGC ( linda, "Linda %p CQN %ld created\n", linda, cq->cqn );
810
811 return 0;
812 }
813
814 /**
815 * Destroy completion queue
816 *
817 * @v ibdev Infiniband device
818 * @v cq Completion queue
819 */
820 static void linda_destroy_cq ( struct ib_device *ibdev,
821 struct ib_completion_queue *cq ) {
822 struct linda *linda = ib_get_drvdata ( ibdev );
823
824 /* Nothing to do */
825 DBGC ( linda, "Linda %p CQN %ld destroyed\n", linda, cq->cqn );
826 }
827
828 /***************************************************************************
829 *
830 * Queue pair operations
831 *
832 ***************************************************************************
833 */
834
835 /**
836 * Create queue pair
837 *
838 * @v ibdev Infiniband device
839 * @v qp Queue pair
840 * @ret rc Return status code
841 */
842 static int linda_create_qp ( struct ib_device *ibdev,
843 struct ib_queue_pair *qp ) {
844 struct linda *linda = ib_get_drvdata ( ibdev );
845 int ctx;
846 int rc;
847
848 /* Locate an available context */
849 ctx = linda_alloc_ctx ( linda );
850 if ( ctx < 0 ) {
851 rc = ctx;
852 goto err_alloc_ctx;
853 }
854
855 /* Set queue pair number based on context index */
856 qp->qpn = linda_ctx_to_qpn ( ctx );
857
858 /* Set work-queue private data pointers */
859 ib_wq_set_drvdata ( &qp->send, &linda->send_wq[ctx] );
860 ib_wq_set_drvdata ( &qp->recv, &linda->recv_wq[ctx] );
861
862 /* Create receive work queue */
863 if ( ( rc = linda_create_recv_wq ( linda, qp ) ) != 0 )
864 goto err_create_recv_wq;
865
866 /* Create send work queue */
867 if ( ( rc = linda_create_send_wq ( linda, qp ) ) != 0 )
868 goto err_create_send_wq;
869
870 return 0;
871
872 linda_destroy_send_wq ( linda, qp );
873 err_create_send_wq:
874 linda_destroy_recv_wq ( linda, qp );
875 err_create_recv_wq:
876 linda_free_ctx ( linda, ctx );
877 err_alloc_ctx:
878 return rc;
879 }
880
881 /**
882 * Modify queue pair
883 *
884 * @v ibdev Infiniband device
885 * @v qp Queue pair
886 * @ret rc Return status code
887 */
888 static int linda_modify_qp ( struct ib_device *ibdev,
889 struct ib_queue_pair *qp ) {
890 struct linda *linda = ib_get_drvdata ( ibdev );
891
892 /* Nothing to do; the hardware doesn't have a notion of queue
893 * keys
894 */
895 DBGC ( linda, "Linda %p QPN %ld modified\n", linda, qp->qpn );
896 return 0;
897 }
898
899 /**
900 * Destroy queue pair
901 *
902 * @v ibdev Infiniband device
903 * @v qp Queue pair
904 */
905 static void linda_destroy_qp ( struct ib_device *ibdev,
906 struct ib_queue_pair *qp ) {
907 struct linda *linda = ib_get_drvdata ( ibdev );
908
909 linda_destroy_send_wq ( linda, qp );
910 linda_destroy_recv_wq ( linda, qp );
911 }
912
913 /***************************************************************************
914 *
915 * Work request operations
916 *
917 ***************************************************************************
918 */
919
920 /**
921 * Post send work queue entry
922 *
923 * @v ibdev Infiniband device
924 * @v qp Queue pair
925 * @v dest Destination address vector
926 * @v iobuf I/O buffer
927 * @ret rc Return status code
928 */
929 static int linda_post_send ( struct ib_device *ibdev,
930 struct ib_queue_pair *qp,
931 struct ib_address_vector *dest,
932 struct io_buffer *iobuf ) {
933 struct linda *linda = ib_get_drvdata ( ibdev );
934 struct ib_work_queue *wq = &qp->send;
935 struct linda_send_work_queue *linda_wq = ib_wq_get_drvdata ( wq );
936 struct QIB_7220_SendPbc sendpbc;
937 uint8_t header_buf[IB_MAX_HEADER_SIZE];
938 struct io_buffer headers;
939 unsigned int send_buf;
940 unsigned long start_offset;
941 unsigned long offset;
942 size_t len;
943 ssize_t frag_len;
944 uint32_t *data;
945
946 /* Allocate send buffer and calculate offset */
947 send_buf = linda_alloc_send_buf ( linda );
948 start_offset = offset = linda_send_buffer_offset ( linda, send_buf );
949
950 /* Store I/O buffer and send buffer index */
951 assert ( wq->iobufs[linda_wq->prod] == NULL );
952 wq->iobufs[linda_wq->prod] = iobuf;
953 linda_wq->send_buf[linda_wq->prod] = send_buf;
954
955 /* Construct headers */
956 iob_populate ( &headers, header_buf, 0, sizeof ( header_buf ) );
957 iob_reserve ( &headers, sizeof ( header_buf ) );
958 ib_push ( ibdev, &headers, qp, iob_len ( iobuf ), dest );
959
960 /* Calculate packet length */
961 len = ( ( sizeof ( sendpbc ) + iob_len ( &headers ) +
962 iob_len ( iobuf ) + 3 ) & ~3 );
963
964 /* Construct send per-buffer control word */
965 memset ( &sendpbc, 0, sizeof ( sendpbc ) );
966 BIT_FILL_2 ( &sendpbc,
967 LengthP1_toibc, ( ( len >> 2 ) - 1 ),
968 VL15, 1 );
969
970 /* Write SendPbc */
971 DBG_DISABLE ( DBGLVL_IO );
972 linda_writeq ( linda, &sendpbc, offset );
973 offset += sizeof ( sendpbc );
974
975 /* Write headers */
976 for ( data = headers.data, frag_len = iob_len ( &headers ) ;
977 frag_len > 0 ; data++, offset += 4, frag_len -= 4 ) {
978 linda_writel ( linda, *data, offset );
979 }
980
981 /* Write data */
982 for ( data = iobuf->data, frag_len = iob_len ( iobuf ) ;
983 frag_len > 0 ; data++, offset += 4, frag_len -= 4 ) {
984 linda_writel ( linda, *data, offset );
985 }
986 DBG_ENABLE ( DBGLVL_IO );
987
988 assert ( ( start_offset + len ) == offset );
989 DBGC2 ( linda, "Linda %p QPN %ld TX %d(%d) posted [%lx,%lx)\n",
990 linda, qp->qpn, send_buf, linda_wq->prod,
991 start_offset, offset );
992
993 /* Increment producer counter */
994 linda_wq->prod = ( ( linda_wq->prod + 1 ) & ( wq->num_wqes - 1 ) );
995
996 return 0;
997 }
998
999 /**
1000 * Complete send work queue entry
1001 *
1002 * @v ibdev Infiniband device
1003 * @v qp Queue pair
1004 * @v wqe_idx Work queue entry index
1005 */
1006 static void linda_complete_send ( struct ib_device *ibdev,
1007 struct ib_queue_pair *qp,
1008 unsigned int wqe_idx ) {
1009 struct linda *linda = ib_get_drvdata ( ibdev );
1010 struct ib_work_queue *wq = &qp->send;
1011 struct linda_send_work_queue *linda_wq = ib_wq_get_drvdata ( wq );
1012 struct io_buffer *iobuf;
1013 unsigned int send_buf;
1014
1015 /* Parse completion */
1016 send_buf = linda_wq->send_buf[wqe_idx];
1017 DBGC2 ( linda, "Linda %p QPN %ld TX %d(%d) complete\n",
1018 linda, qp->qpn, send_buf, wqe_idx );
1019
1020 /* Complete work queue entry */
1021 iobuf = wq->iobufs[wqe_idx];
1022 assert ( iobuf != NULL );
1023 ib_complete_send ( ibdev, qp, iobuf, 0 );
1024 wq->iobufs[wqe_idx] = NULL;
1025
1026 /* Free send buffer */
1027 linda_free_send_buf ( linda, send_buf );
1028 }
1029
1030 /**
1031 * Poll send work queue
1032 *
1033 * @v ibdev Infiniband device
1034 * @v qp Queue pair
1035 */
1036 static void linda_poll_send_wq ( struct ib_device *ibdev,
1037 struct ib_queue_pair *qp ) {
1038 struct linda *linda = ib_get_drvdata ( ibdev );
1039 struct ib_work_queue *wq = &qp->send;
1040 struct linda_send_work_queue *linda_wq = ib_wq_get_drvdata ( wq );
1041 unsigned int send_buf;
1042
1043 /* Look for completions */
1044 while ( wq->fill ) {
1045
1046 /* Check to see if send buffer has completed */
1047 send_buf = linda_wq->send_buf[linda_wq->cons];
1048 if ( linda_send_buf_in_use ( linda, send_buf ) )
1049 break;
1050
1051 /* Complete this buffer */
1052 linda_complete_send ( ibdev, qp, linda_wq->cons );
1053
1054 /* Increment consumer counter */
1055 linda_wq->cons = ( ( linda_wq->cons + 1 ) &
1056 ( wq->num_wqes - 1 ) );
1057 }
1058 }
1059
1060 /**
1061 * Post receive work queue entry
1062 *
1063 * @v ibdev Infiniband device
1064 * @v qp Queue pair
1065 * @v iobuf I/O buffer
1066 * @ret rc Return status code
1067 */
1068 static int linda_post_recv ( struct ib_device *ibdev,
1069 struct ib_queue_pair *qp,
1070 struct io_buffer *iobuf ) {
1071 struct linda *linda = ib_get_drvdata ( ibdev );
1072 struct ib_work_queue *wq = &qp->recv;
1073 struct linda_recv_work_queue *linda_wq = ib_wq_get_drvdata ( wq );
1074 struct QIB_7220_RcvEgr rcvegr;
1075 struct QIB_7220_scalar rcvegrindexhead;
1076 unsigned int ctx = linda_qpn_to_ctx ( qp->qpn );
1077 physaddr_t addr;
1078 size_t len;
1079 unsigned int wqe_idx;
1080 unsigned int bufsize;
1081
1082 /* Sanity checks */
1083 addr = virt_to_bus ( iobuf->data );
1084 len = iob_tailroom ( iobuf );
1085 if ( addr & ( LINDA_EAGER_BUFFER_ALIGN - 1 ) ) {
1086 DBGC ( linda, "Linda %p QPN %ld misaligned RX buffer "
1087 "(%08lx)\n", linda, qp->qpn, addr );
1088 return -EINVAL;
1089 }
1090 if ( len != LINDA_RECV_PAYLOAD_SIZE ) {
1091 DBGC ( linda, "Linda %p QPN %ld wrong RX buffer size (%zd)\n",
1092 linda, qp->qpn, len );
1093 return -EINVAL;
1094 }
1095
1096 /* Calculate eager producer index and WQE index */
1097 wqe_idx = ( linda_wq->eager_prod & ( wq->num_wqes - 1 ) );
1098 assert ( wq->iobufs[wqe_idx] == NULL );
1099
1100 /* Store I/O buffer */
1101 wq->iobufs[wqe_idx] = iobuf;
1102
1103 /* Calculate buffer size */
1104 switch ( LINDA_RECV_PAYLOAD_SIZE ) {
1105 case 2048: bufsize = LINDA_EAGER_BUFFER_2K; break;
1106 case 4096: bufsize = LINDA_EAGER_BUFFER_4K; break;
1107 case 8192: bufsize = LINDA_EAGER_BUFFER_8K; break;
1108 case 16384: bufsize = LINDA_EAGER_BUFFER_16K; break;
1109 case 32768: bufsize = LINDA_EAGER_BUFFER_32K; break;
1110 case 65536: bufsize = LINDA_EAGER_BUFFER_64K; break;
1111 default: linker_assert ( 0, invalid_rx_payload_size );
1112 bufsize = LINDA_EAGER_BUFFER_NONE;
1113 }
1114
1115 /* Post eager buffer */
1116 memset ( &rcvegr, 0, sizeof ( rcvegr ) );
1117 BIT_FILL_2 ( &rcvegr,
1118 Addr, ( addr >> 11 ),
1119 BufSize, bufsize );
1120 linda_writeq_array8b ( linda, &rcvegr,
1121 linda_wq->eager_array, linda_wq->eager_prod );
1122 DBGC2 ( linda, "Linda %p QPN %ld RX egr %d(%d) posted [%lx,%lx)\n",
1123 linda, qp->qpn, linda_wq->eager_prod, wqe_idx,
1124 addr, ( addr + len ) );
1125
1126 /* Increment producer index */
1127 linda_wq->eager_prod = ( ( linda_wq->eager_prod + 1 ) &
1128 ( linda_wq->eager_entries - 1 ) );
1129
1130 /* Update head index */
1131 memset ( &rcvegrindexhead, 0, sizeof ( rcvegrindexhead ) );
1132 BIT_FILL_1 ( &rcvegrindexhead,
1133 Value, ( ( linda_wq->eager_prod + 1 ) &
1134 ( linda_wq->eager_entries - 1 ) ) );
1135 linda_writeq_array64k ( linda, &rcvegrindexhead,
1136 QIB_7220_RcvEgrIndexHead0_offset, ctx );
1137
1138 return 0;
1139 }
1140
1141 /**
1142 * Complete receive work queue entry
1143 *
1144 * @v ibdev Infiniband device
1145 * @v qp Queue pair
1146 * @v header_offs Header offset
1147 */
1148 static void linda_complete_recv ( struct ib_device *ibdev,
1149 struct ib_queue_pair *qp,
1150 unsigned int header_offs ) {
1151 struct linda *linda = ib_get_drvdata ( ibdev );
1152 struct ib_work_queue *wq = &qp->recv;
1153 struct linda_recv_work_queue *linda_wq = ib_wq_get_drvdata ( wq );
1154 struct QIB_7220_RcvHdrFlags *rcvhdrflags;
1155 struct QIB_7220_RcvEgr rcvegr;
1156 struct io_buffer headers;
1157 struct io_buffer *iobuf;
1158 struct ib_queue_pair *intended_qp;
1159 struct ib_address_vector dest;
1160 struct ib_address_vector source;
1161 unsigned int rcvtype;
1162 unsigned int pktlen;
1163 unsigned int egrindex;
1164 unsigned int useegrbfr;
1165 unsigned int iberr, mkerr, tiderr, khdrerr, mtuerr;
1166 unsigned int lenerr, parityerr, vcrcerr, icrcerr;
1167 unsigned int err;
1168 unsigned int hdrqoffset;
1169 unsigned int header_len;
1170 unsigned int padded_payload_len;
1171 unsigned int wqe_idx;
1172 size_t payload_len;
1173 int qp0;
1174 int rc;
1175
1176 /* RcvHdrFlags are at the end of the header entry */
1177 rcvhdrflags = ( linda_wq->header + header_offs +
1178 LINDA_RECV_HEADER_SIZE - sizeof ( *rcvhdrflags ) );
1179 rcvtype = BIT_GET ( rcvhdrflags, RcvType );
1180 pktlen = ( BIT_GET ( rcvhdrflags, PktLen ) << 2 );
1181 egrindex = BIT_GET ( rcvhdrflags, EgrIndex );
1182 useegrbfr = BIT_GET ( rcvhdrflags, UseEgrBfr );
1183 hdrqoffset = ( BIT_GET ( rcvhdrflags, HdrqOffset ) << 2 );
1184 iberr = BIT_GET ( rcvhdrflags, IBErr );
1185 mkerr = BIT_GET ( rcvhdrflags, MKErr );
1186 tiderr = BIT_GET ( rcvhdrflags, TIDErr );
1187 khdrerr = BIT_GET ( rcvhdrflags, KHdrErr );
1188 mtuerr = BIT_GET ( rcvhdrflags, MTUErr );
1189 lenerr = BIT_GET ( rcvhdrflags, LenErr );
1190 parityerr = BIT_GET ( rcvhdrflags, ParityErr );
1191 vcrcerr = BIT_GET ( rcvhdrflags, VCRCErr );
1192 icrcerr = BIT_GET ( rcvhdrflags, ICRCErr );
1193 header_len = ( LINDA_RECV_HEADER_SIZE - hdrqoffset -
1194 sizeof ( *rcvhdrflags ) );
1195 padded_payload_len = ( pktlen - header_len - 4 /* ICRC */ );
1196 err = ( iberr | mkerr | tiderr | khdrerr | mtuerr |
1197 lenerr | parityerr | vcrcerr | icrcerr );
1198 /* IB header is placed immediately before RcvHdrFlags */
1199 iob_populate ( &headers, ( ( ( void * ) rcvhdrflags ) - header_len ),
1200 header_len, header_len );
1201
1202 /* Dump diagnostic information */
1203 if ( err || ( ! useegrbfr ) ) {
1204 DBGC ( linda, "Linda %p QPN %ld RX egr %d%s hdr %d type %d "
1205 "len %d(%d+%d+4)%s%s%s%s%s%s%s%s%s%s%s\n", linda,
1206 qp->qpn, egrindex, ( useegrbfr ? "" : "(unused)" ),
1207 ( header_offs / LINDA_RECV_HEADER_SIZE ), rcvtype,
1208 pktlen, header_len, padded_payload_len,
1209 ( err ? " [Err" : "" ), ( iberr ? " IB" : "" ),
1210 ( mkerr ? " MK" : "" ), ( tiderr ? " TID" : "" ),
1211 ( khdrerr ? " KHdr" : "" ), ( mtuerr ? " MTU" : "" ),
1212 ( lenerr ? " Len" : "" ), ( parityerr ? " Parity" : ""),
1213 ( vcrcerr ? " VCRC" : "" ), ( icrcerr ? " ICRC" : "" ),
1214 ( err ? "]" : "" ) );
1215 } else {
1216 DBGC2 ( linda, "Linda %p QPN %ld RX egr %d hdr %d type %d "
1217 "len %d(%d+%d+4)\n", linda, qp->qpn, egrindex,
1218 ( header_offs / LINDA_RECV_HEADER_SIZE ), rcvtype,
1219 pktlen, header_len, padded_payload_len );
1220 }
1221 DBGCP_HDA ( linda, hdrqoffset, headers.data,
1222 ( header_len + sizeof ( *rcvhdrflags ) ) );
1223
1224 /* Parse header to generate address vector */
1225 qp0 = ( qp->qpn == 0 );
1226 intended_qp = NULL;
1227 if ( ( rc = ib_pull ( ibdev, &headers, ( qp0 ? &intended_qp : NULL ),
1228 &payload_len, &dest, &source ) ) != 0 ) {
1229 DBGC ( linda, "Linda %p could not parse headers: %s\n",
1230 linda, strerror ( rc ) );
1231 err = 1;
1232 }
1233 if ( ! intended_qp )
1234 intended_qp = qp;
1235
1236 /* Complete this buffer and any skipped buffers. Note that
1237 * when the hardware runs out of buffers, it will repeatedly
1238 * report the same buffer (the tail) as a TID error, and that
1239 * it also has a habit of sometimes skipping over several
1240 * buffers at once.
1241 */
1242 while ( 1 ) {
1243
1244 /* If we have caught up to the producer counter, stop.
1245 * This will happen when the hardware first runs out
1246 * of buffers and starts reporting TID errors against
1247 * the eager buffer it wants to use next.
1248 */
1249 if ( linda_wq->eager_cons == linda_wq->eager_prod )
1250 break;
1251
1252 /* If we have caught up to where we should be after
1253 * completing this egrindex, stop. We phrase the test
1254 * this way to avoid completing the entire ring when
1255 * we receive the same egrindex twice in a row.
1256 */
1257 if ( ( linda_wq->eager_cons ==
1258 ( ( egrindex + 1 ) & ( linda_wq->eager_entries - 1 ) )))
1259 break;
1260
1261 /* Identify work queue entry and corresponding I/O
1262 * buffer.
1263 */
1264 wqe_idx = ( linda_wq->eager_cons & ( wq->num_wqes - 1 ) );
1265 iobuf = wq->iobufs[wqe_idx];
1266 assert ( iobuf != NULL );
1267 wq->iobufs[wqe_idx] = NULL;
1268
1269 /* Complete the eager buffer */
1270 if ( linda_wq->eager_cons == egrindex ) {
1271 /* Completing the eager buffer described in
1272 * this header entry.
1273 */
1274 iob_put ( iobuf, payload_len );
1275 rc = ( err ? -EIO : ( useegrbfr ? 0 : -ECANCELED ) );
1276 /* Redirect to target QP if necessary */
1277 if ( qp != intended_qp ) {
1278 DBGC ( linda, "Linda %p redirecting QPN %ld "
1279 "=> %ld\n",
1280 linda, qp->qpn, intended_qp->qpn );
1281 /* Compensate for incorrect fill levels */
1282 qp->recv.fill--;
1283 intended_qp->recv.fill++;
1284 }
1285 ib_complete_recv ( ibdev, intended_qp, &dest, &source,
1286 iobuf, rc);
1287 } else {
1288 /* Completing on a skipped-over eager buffer */
1289 ib_complete_recv ( ibdev, qp, &dest, &source, iobuf,
1290 -ECANCELED );
1291 }
1292
1293 /* Clear eager buffer */
1294 memset ( &rcvegr, 0, sizeof ( rcvegr ) );
1295 linda_writeq_array8b ( linda, &rcvegr, linda_wq->eager_array,
1296 linda_wq->eager_cons );
1297
1298 /* Increment consumer index */
1299 linda_wq->eager_cons = ( ( linda_wq->eager_cons + 1 ) &
1300 ( linda_wq->eager_entries - 1 ) );
1301 }
1302 }
1303
1304 /**
1305 * Poll receive work queue
1306 *
1307 * @v ibdev Infiniband device
1308 * @v qp Queue pair
1309 */
1310 static void linda_poll_recv_wq ( struct ib_device *ibdev,
1311 struct ib_queue_pair *qp ) {
1312 struct linda *linda = ib_get_drvdata ( ibdev );
1313 struct ib_work_queue *wq = &qp->recv;
1314 struct linda_recv_work_queue *linda_wq = ib_wq_get_drvdata ( wq );
1315 struct QIB_7220_RcvHdrHead0 rcvhdrhead;
1316 unsigned int ctx = linda_qpn_to_ctx ( qp->qpn );
1317 unsigned int header_prod;
1318
1319 /* Check for received packets */
1320 header_prod = ( BIT_GET ( &linda_wq->header_prod, Value ) << 2 );
1321 if ( header_prod == linda_wq->header_cons )
1322 return;
1323
1324 /* Process all received packets */
1325 while ( linda_wq->header_cons != header_prod ) {
1326
1327 /* Complete the receive */
1328 linda_complete_recv ( ibdev, qp, linda_wq->header_cons );
1329
1330 /* Increment the consumer offset */
1331 linda_wq->header_cons += LINDA_RECV_HEADER_SIZE;
1332 linda_wq->header_cons %= LINDA_RECV_HEADERS_SIZE;
1333 }
1334
1335 /* Update consumer offset */
1336 memset ( &rcvhdrhead, 0, sizeof ( rcvhdrhead ) );
1337 BIT_FILL_2 ( &rcvhdrhead,
1338 RcvHeadPointer, ( linda_wq->header_cons >> 2 ),
1339 counter, 1 );
1340 linda_writeq_array64k ( linda, &rcvhdrhead,
1341 QIB_7220_RcvHdrHead0_offset, ctx );
1342 }
1343
1344 /**
1345 * Poll completion queue
1346 *
1347 * @v ibdev Infiniband device
1348 * @v cq Completion queue
1349 */
1350 static void linda_poll_cq ( struct ib_device *ibdev,
1351 struct ib_completion_queue *cq ) {
1352 struct ib_work_queue *wq;
1353
1354 /* Poll associated send and receive queues */
1355 list_for_each_entry ( wq, &cq->work_queues, list ) {
1356 if ( wq->is_send ) {
1357 linda_poll_send_wq ( ibdev, wq->qp );
1358 } else {
1359 linda_poll_recv_wq ( ibdev, wq->qp );
1360 }
1361 }
1362 }
1363
1364 /***************************************************************************
1365 *
1366 * Event queues
1367 *
1368 ***************************************************************************
1369 */
1370
1371 /**
1372 * Poll event queue
1373 *
1374 * @v ibdev Infiniband device
1375 */
1376 static void linda_poll_eq ( struct ib_device *ibdev ) {
1377 struct linda *linda = ib_get_drvdata ( ibdev );
1378 struct QIB_7220_ErrStatus errstatus;
1379 struct QIB_7220_ErrClear errclear;
1380
1381 /* Check for link status changes */
1382 DBG_DISABLE ( DBGLVL_IO );
1383 linda_readq ( linda, &errstatus, QIB_7220_ErrStatus_offset );
1384 DBG_ENABLE ( DBGLVL_IO );
1385 if ( BIT_GET ( &errstatus, IBStatusChanged ) ) {
1386 linda_link_state_changed ( ibdev );
1387 memset ( &errclear, 0, sizeof ( errclear ) );
1388 BIT_FILL_1 ( &errclear, IBStatusChangedClear, 1 );
1389 linda_writeq ( linda, &errclear, QIB_7220_ErrClear_offset );
1390 }
1391 }
1392
1393 /***************************************************************************
1394 *
1395 * Infiniband link-layer operations
1396 *
1397 ***************************************************************************
1398 */
1399
1400 /**
1401 * Initialise Infiniband link
1402 *
1403 * @v ibdev Infiniband device
1404 * @ret rc Return status code
1405 */
1406 static int linda_open ( struct ib_device *ibdev ) {
1407 struct linda *linda = ib_get_drvdata ( ibdev );
1408 struct QIB_7220_Control control;
1409
1410 /* Disable link */
1411 linda_readq ( linda, &control, QIB_7220_Control_offset );
1412 BIT_SET ( &control, LinkEn, 1 );
1413 linda_writeq ( linda, &control, QIB_7220_Control_offset );
1414 return 0;
1415 }
1416
1417 /**
1418 * Close Infiniband link
1419 *
1420 * @v ibdev Infiniband device
1421 */
1422 static void linda_close ( struct ib_device *ibdev ) {
1423 struct linda *linda = ib_get_drvdata ( ibdev );
1424 struct QIB_7220_Control control;
1425
1426 /* Disable link */
1427 linda_readq ( linda, &control, QIB_7220_Control_offset );
1428 BIT_SET ( &control, LinkEn, 0 );
1429 linda_writeq ( linda, &control, QIB_7220_Control_offset );
1430 }
1431
1432 /***************************************************************************
1433 *
1434 * Multicast group operations
1435 *
1436 ***************************************************************************
1437 */
1438
1439 /**
1440 * Attach to multicast group
1441 *
1442 * @v ibdev Infiniband device
1443 * @v qp Queue pair
1444 * @v gid Multicast GID
1445 * @ret rc Return status code
1446 */
1447 static int linda_mcast_attach ( struct ib_device *ibdev,
1448 struct ib_queue_pair *qp,
1449 union ib_gid *gid ) {
1450 struct linda *linda = ib_get_drvdata ( ibdev );
1451
1452 ( void ) linda;
1453 ( void ) qp;
1454 ( void ) gid;
1455 return 0;
1456 }
1457
1458 /**
1459 * Detach from multicast group
1460 *
1461 * @v ibdev Infiniband device
1462 * @v qp Queue pair
1463 * @v gid Multicast GID
1464 */
1465 static void linda_mcast_detach ( struct ib_device *ibdev,
1466 struct ib_queue_pair *qp,
1467 union ib_gid *gid ) {
1468 struct linda *linda = ib_get_drvdata ( ibdev );
1469
1470 ( void ) linda;
1471 ( void ) qp;
1472 ( void ) gid;
1473 }
1474
1475 /** Linda Infiniband operations */
1476 static struct ib_device_operations linda_ib_operations = {
1477 .create_cq = linda_create_cq,
1478 .destroy_cq = linda_destroy_cq,
1479 .create_qp = linda_create_qp,
1480 .modify_qp = linda_modify_qp,
1481 .destroy_qp = linda_destroy_qp,
1482 .post_send = linda_post_send,
1483 .post_recv = linda_post_recv,
1484 .poll_cq = linda_poll_cq,
1485 .poll_eq = linda_poll_eq,
1486 .open = linda_open,
1487 .close = linda_close,
1488 .mcast_attach = linda_mcast_attach,
1489 .mcast_detach = linda_mcast_detach,
1490 .set_port_info = linda_set_port_info,
1491 .set_pkey_table = linda_set_pkey_table,
1492 };
1493
1494 /***************************************************************************
1495 *
1496 * I2C bus operations
1497 *
1498 ***************************************************************************
1499 */
1500
1501 /** Linda I2C bit to GPIO mappings */
1502 static unsigned int linda_i2c_bits[] = {
1503 [I2C_BIT_SCL] = ( 1 << LINDA_GPIO_SCL ),
1504 [I2C_BIT_SDA] = ( 1 << LINDA_GPIO_SDA ),
1505 };
1506
1507 /**
1508 * Read Linda I2C line status
1509 *
1510 * @v basher Bit-bashing interface
1511 * @v bit_id Bit number
1512 * @ret zero Input is a logic 0
1513 * @ret non-zero Input is a logic 1
1514 */
1515 static int linda_i2c_read_bit ( struct bit_basher *basher,
1516 unsigned int bit_id ) {
1517 struct linda *linda =
1518 container_of ( basher, struct linda, i2c.basher );
1519 struct QIB_7220_EXTStatus extstatus;
1520 unsigned int status;
1521
1522 DBG_DISABLE ( DBGLVL_IO );
1523
1524 linda_readq ( linda, &extstatus, QIB_7220_EXTStatus_offset );
1525 status = ( BIT_GET ( &extstatus, GPIOIn ) & linda_i2c_bits[bit_id] );
1526
1527 DBG_ENABLE ( DBGLVL_IO );
1528
1529 return status;
1530 }
1531
1532 /**
1533 * Write Linda I2C line status
1534 *
1535 * @v basher Bit-bashing interface
1536 * @v bit_id Bit number
1537 * @v data Value to write
1538 */
1539 static void linda_i2c_write_bit ( struct bit_basher *basher,
1540 unsigned int bit_id, unsigned long data ) {
1541 struct linda *linda =
1542 container_of ( basher, struct linda, i2c.basher );
1543 struct QIB_7220_EXTCtrl extctrl;
1544 struct QIB_7220_GPIO gpioout;
1545 unsigned int bit = linda_i2c_bits[bit_id];
1546 unsigned int outputs = 0;
1547 unsigned int output_enables = 0;
1548
1549 DBG_DISABLE ( DBGLVL_IO );
1550
1551 /* Read current GPIO mask and outputs */
1552 linda_readq ( linda, &extctrl, QIB_7220_EXTCtrl_offset );
1553 linda_readq ( linda, &gpioout, QIB_7220_GPIOOut_offset );
1554
1555 /* Update outputs and output enables. I2C lines are tied
1556 * high, so we always set the output to 0 and use the output
1557 * enable to control the line.
1558 */
1559 output_enables = BIT_GET ( &extctrl, GPIOOe );
1560 output_enables = ( ( output_enables & ~bit ) | ( ~data & bit ) );
1561 outputs = BIT_GET ( &gpioout, GPIO );
1562 outputs = ( outputs & ~bit );
1563 BIT_SET ( &extctrl, GPIOOe, output_enables );
1564 BIT_SET ( &gpioout, GPIO, outputs );
1565
1566 /* Write the output enable first; that way we avoid logic
1567 * hazards.
1568 */
1569 linda_writeq ( linda, &extctrl, QIB_7220_EXTCtrl_offset );
1570 linda_writeq ( linda, &gpioout, QIB_7220_GPIOOut_offset );
1571 mb();
1572
1573 DBG_ENABLE ( DBGLVL_IO );
1574 }
1575
1576 /** Linda I2C bit-bashing interface operations */
1577 static struct bit_basher_operations linda_i2c_basher_ops = {
1578 .read = linda_i2c_read_bit,
1579 .write = linda_i2c_write_bit,
1580 };
1581
1582 /**
1583 * Initialise Linda I2C subsystem
1584 *
1585 * @v linda Linda device
1586 * @ret rc Return status code
1587 */
1588 static int linda_init_i2c ( struct linda *linda ) {
1589 static int try_eeprom_address[] = { 0x51, 0x50 };
1590 unsigned int i;
1591 int rc;
1592
1593 /* Initialise bus */
1594 if ( ( rc = init_i2c_bit_basher ( &linda->i2c,
1595 &linda_i2c_basher_ops ) ) != 0 ) {
1596 DBGC ( linda, "Linda %p could not initialise I2C bus: %s\n",
1597 linda, strerror ( rc ) );
1598 return rc;
1599 }
1600
1601 /* Probe for devices */
1602 for ( i = 0 ; i < ( sizeof ( try_eeprom_address ) /
1603 sizeof ( try_eeprom_address[0] ) ) ; i++ ) {
1604 init_i2c_eeprom ( &linda->eeprom, try_eeprom_address[i] );
1605 if ( ( rc = i2c_check_presence ( &linda->i2c.i2c,
1606 &linda->eeprom ) ) == 0 ) {
1607 DBGC2 ( linda, "Linda %p found EEPROM at %02x\n",
1608 linda, try_eeprom_address[i] );
1609 return 0;
1610 }
1611 }
1612
1613 DBGC ( linda, "Linda %p could not find EEPROM\n", linda );
1614 return -ENODEV;
1615 }
1616
1617 /**
1618 * Read EEPROM parameters
1619 *
1620 * @v linda Linda device
1621 * @v guid GUID to fill in
1622 * @ret rc Return status code
1623 */
1624 static int linda_read_eeprom ( struct linda *linda, union ib_guid *guid ) {
1625 struct i2c_interface *i2c = &linda->i2c.i2c;
1626 int rc;
1627
1628 /* Read GUID */
1629 if ( ( rc = i2c->read ( i2c, &linda->eeprom, LINDA_EEPROM_GUID_OFFSET,
1630 guid->bytes, sizeof ( *guid ) ) ) != 0 ) {
1631 DBGC ( linda, "Linda %p could not read GUID: %s\n",
1632 linda, strerror ( rc ) );
1633 return rc;
1634 }
1635 DBGC2 ( linda, "Linda %p has GUID " IB_GUID_FMT "\n",
1636 linda, IB_GUID_ARGS ( guid ) );
1637
1638 /* Read serial number (debug only) */
1639 if ( DBG_LOG ) {
1640 uint8_t serial[LINDA_EEPROM_SERIAL_SIZE + 1];
1641
1642 serial[ sizeof ( serial ) - 1 ] = '\0';
1643 if ( ( rc = i2c->read ( i2c, &linda->eeprom,
1644 LINDA_EEPROM_SERIAL_OFFSET, serial,
1645 ( sizeof ( serial ) - 1 ) ) ) != 0 ) {
1646 DBGC ( linda, "Linda %p could not read serial: %s\n",
1647 linda, strerror ( rc ) );
1648 return rc;
1649 }
1650 DBGC2 ( linda, "Linda %p has serial number \"%s\"\n",
1651 linda, serial );
1652 }
1653
1654 return 0;
1655 }
1656
1657 /***************************************************************************
1658 *
1659 * External parallel bus access
1660 *
1661 ***************************************************************************
1662 */
1663
1664 /**
1665 * Request ownership of the IB external parallel bus
1666 *
1667 * @v linda Linda device
1668 * @ret rc Return status code
1669 */
1670 static int linda_ib_epb_request ( struct linda *linda ) {
1671 struct QIB_7220_ibsd_epb_access_ctrl access;
1672 unsigned int i;
1673
1674 /* Request ownership */
1675 memset ( &access, 0, sizeof ( access ) );
1676 BIT_FILL_1 ( &access, sw_ib_epb_req, 1 );
1677 linda_writeq ( linda, &access, QIB_7220_ibsd_epb_access_ctrl_offset );
1678
1679 /* Wait for ownership to be granted */
1680 for ( i = 0 ; i < LINDA_EPB_REQUEST_MAX_WAIT_US ; i++ ) {
1681 linda_readq ( linda, &access,
1682 QIB_7220_ibsd_epb_access_ctrl_offset );
1683 if ( BIT_GET ( &access, sw_ib_epb_req_granted ) )
1684 return 0;
1685 udelay ( 1 );
1686 }
1687
1688 DBGC ( linda, "Linda %p timed out waiting for IB EPB request\n",
1689 linda );
1690 return -ETIMEDOUT;
1691 }
1692
1693 /**
1694 * Wait for IB external parallel bus transaction to complete
1695 *
1696 * @v linda Linda device
1697 * @v xact Buffer to hold transaction result
1698 * @ret rc Return status code
1699 */
1700 static int linda_ib_epb_wait ( struct linda *linda,
1701 struct QIB_7220_ibsd_epb_transaction_reg *xact ) {
1702 unsigned int i;
1703
1704 /* Discard first read to allow for signals crossing clock domains */
1705 linda_readq ( linda, xact, QIB_7220_ibsd_epb_transaction_reg_offset );
1706
1707 for ( i = 0 ; i < LINDA_EPB_XACT_MAX_WAIT_US ; i++ ) {
1708 linda_readq ( linda, xact,
1709 QIB_7220_ibsd_epb_transaction_reg_offset );
1710 if ( BIT_GET ( xact, ib_epb_rdy ) ) {
1711 if ( BIT_GET ( xact, ib_epb_req_error ) ) {
1712 DBGC ( linda, "Linda %p EPB transaction "
1713 "failed\n", linda );
1714 return -EIO;
1715 } else {
1716 return 0;
1717 }
1718 }
1719 udelay ( 1 );
1720 }
1721
1722 DBGC ( linda, "Linda %p timed out waiting for IB EPB transaction\n",
1723 linda );
1724 return -ETIMEDOUT;
1725 }
1726
1727 /**
1728 * Release ownership of the IB external parallel bus
1729 *
1730 * @v linda Linda device
1731 */
1732 static void linda_ib_epb_release ( struct linda *linda ) {
1733 struct QIB_7220_ibsd_epb_access_ctrl access;
1734
1735 memset ( &access, 0, sizeof ( access ) );
1736 BIT_FILL_1 ( &access, sw_ib_epb_req, 0 );
1737 linda_writeq ( linda, &access, QIB_7220_ibsd_epb_access_ctrl_offset );
1738 }
1739
1740 /**
1741 * Read data via IB external parallel bus
1742 *
1743 * @v linda Linda device
1744 * @v location EPB location
1745 * @ret data Data read, or negative error
1746 *
1747 * You must have already acquired ownership of the IB external
1748 * parallel bus.
1749 */
1750 static int linda_ib_epb_read ( struct linda *linda, unsigned int location ) {
1751 struct QIB_7220_ibsd_epb_transaction_reg xact;
1752 unsigned int data;
1753 int rc;
1754
1755 /* Ensure no transaction is currently in progress */
1756 if ( ( rc = linda_ib_epb_wait ( linda, &xact ) ) != 0 )
1757 return rc;
1758
1759 /* Process data */
1760 memset ( &xact, 0, sizeof ( xact ) );
1761 BIT_FILL_3 ( &xact,
1762 ib_epb_address, LINDA_EPB_LOC_ADDRESS ( location ),
1763 ib_epb_read_write, LINDA_EPB_READ,
1764 ib_epb_cs, LINDA_EPB_LOC_CS ( location ) );
1765 linda_writeq ( linda, &xact,
1766 QIB_7220_ibsd_epb_transaction_reg_offset );
1767
1768 /* Wait for transaction to complete */
1769 if ( ( rc = linda_ib_epb_wait ( linda, &xact ) ) != 0 )
1770 return rc;
1771
1772 data = BIT_GET ( &xact, ib_epb_data );
1773 return data;
1774 }
1775
1776 /**
1777 * Write data via IB external parallel bus
1778 *
1779 * @v linda Linda device
1780 * @v location EPB location
1781 * @v data Data to write
1782 * @ret rc Return status code
1783 *
1784 * You must have already acquired ownership of the IB external
1785 * parallel bus.
1786 */
1787 static int linda_ib_epb_write ( struct linda *linda, unsigned int location,
1788 unsigned int data ) {
1789 struct QIB_7220_ibsd_epb_transaction_reg xact;
1790 int rc;
1791
1792 /* Ensure no transaction is currently in progress */
1793 if ( ( rc = linda_ib_epb_wait ( linda, &xact ) ) != 0 )
1794 return rc;
1795
1796 /* Process data */
1797 memset ( &xact, 0, sizeof ( xact ) );
1798 BIT_FILL_4 ( &xact,
1799 ib_epb_data, data,
1800 ib_epb_address, LINDA_EPB_LOC_ADDRESS ( location ),
1801 ib_epb_read_write, LINDA_EPB_WRITE,
1802 ib_epb_cs, LINDA_EPB_LOC_CS ( location ) );
1803 linda_writeq ( linda, &xact,
1804 QIB_7220_ibsd_epb_transaction_reg_offset );
1805
1806 /* Wait for transaction to complete */
1807 if ( ( rc = linda_ib_epb_wait ( linda, &xact ) ) != 0 )
1808 return rc;
1809
1810 return 0;
1811 }
1812
1813 /**
1814 * Read/modify/write EPB register
1815 *
1816 * @v linda Linda device
1817 * @v cs Chip select
1818 * @v channel Channel
1819 * @v element Element
1820 * @v reg Register
1821 * @v value Value to set
1822 * @v mask Mask to apply to old value
1823 * @ret rc Return status code
1824 */
1825 static int linda_ib_epb_mod_reg ( struct linda *linda, unsigned int cs,
1826 unsigned int channel, unsigned int element,
1827 unsigned int reg, unsigned int value,
1828 unsigned int mask ) {
1829 unsigned int location;
1830 int old_value;
1831 int rc;
1832
1833 DBG_DISABLE ( DBGLVL_IO );
1834
1835 /* Sanity check */
1836 assert ( ( value & mask ) == value );
1837
1838 /* Acquire bus ownership */
1839 if ( ( rc = linda_ib_epb_request ( linda ) ) != 0 )
1840 goto out;
1841
1842 /* Read existing value, if necessary */
1843 location = LINDA_EPB_LOC ( cs, channel, element, reg );
1844 if ( (~mask) & 0xff ) {
1845 old_value = linda_ib_epb_read ( linda, location );
1846 if ( old_value < 0 ) {
1847 rc = old_value;
1848 goto out_release;
1849 }
1850 } else {
1851 old_value = 0;
1852 }
1853
1854 /* Update value */
1855 value = ( ( old_value & ~mask ) | value );
1856 DBGCP ( linda, "Linda %p CS %d EPB(%d,%d,%#02x) %#02x => %#02x\n",
1857 linda, cs, channel, element, reg, old_value, value );
1858 if ( ( rc = linda_ib_epb_write ( linda, location, value ) ) != 0 )
1859 goto out_release;
1860
1861 out_release:
1862 /* Release bus */
1863 linda_ib_epb_release ( linda );
1864 out:
1865 DBG_ENABLE ( DBGLVL_IO );
1866 return rc;
1867 }
1868
1869 /**
1870 * Transfer data to/from microcontroller RAM
1871 *
1872 * @v linda Linda device
1873 * @v address Starting address
1874 * @v write Data to write, or NULL
1875 * @v read Data to read, or NULL
1876 * @v len Length of data
1877 * @ret rc Return status code
1878 */
1879 static int linda_ib_epb_ram_xfer ( struct linda *linda, unsigned int address,
1880 const void *write, void *read,
1881 size_t len ) {
1882 unsigned int control;
1883 unsigned int address_hi;
1884 unsigned int address_lo;
1885 int data;
1886 int rc;
1887
1888 DBG_DISABLE ( DBGLVL_IO );
1889
1890 assert ( ! ( write && read ) );
1891 assert ( ( address % LINDA_EPB_UC_CHUNK_SIZE ) == 0 );
1892 assert ( ( len % LINDA_EPB_UC_CHUNK_SIZE ) == 0 );
1893
1894 /* Acquire bus ownership */
1895 if ( ( rc = linda_ib_epb_request ( linda ) ) != 0 )
1896 goto out;
1897
1898 /* Process data */
1899 while ( len ) {
1900
1901 /* Reset the address for each new chunk */
1902 if ( ( address % LINDA_EPB_UC_CHUNK_SIZE ) == 0 ) {
1903
1904 /* Write the control register */
1905 control = ( read ? LINDA_EPB_UC_CTL_READ :
1906 LINDA_EPB_UC_CTL_WRITE );
1907 if ( ( rc = linda_ib_epb_write ( linda,
1908 LINDA_EPB_UC_CTL,
1909 control ) ) != 0 )
1910 break;
1911
1912 /* Write the address registers */
1913 address_hi = ( address >> 8 );
1914 if ( ( rc = linda_ib_epb_write ( linda,
1915 LINDA_EPB_UC_ADDR_HI,
1916 address_hi ) ) != 0 )
1917 break;
1918 address_lo = ( address & 0xff );
1919 if ( ( rc = linda_ib_epb_write ( linda,
1920 LINDA_EPB_UC_ADDR_LO,
1921 address_lo ) ) != 0 )
1922 break;
1923 }
1924
1925 /* Read or write the data */
1926 if ( read ) {
1927 data = linda_ib_epb_read ( linda, LINDA_EPB_UC_DATA );
1928 if ( data < 0 ) {
1929 rc = data;
1930 break;
1931 }
1932 *( ( uint8_t * ) read++ ) = data;
1933 } else {
1934 data = *( ( uint8_t * ) write++ );
1935 if ( ( rc = linda_ib_epb_write ( linda,
1936 LINDA_EPB_UC_DATA,
1937 data ) ) != 0 )
1938 break;
1939 }
1940 address++;
1941 len--;
1942
1943 /* Reset the control byte after each chunk */
1944 if ( ( address % LINDA_EPB_UC_CHUNK_SIZE ) == 0 ) {
1945 if ( ( rc = linda_ib_epb_write ( linda,
1946 LINDA_EPB_UC_CTL,
1947 0 ) ) != 0 )
1948 break;
1949 }
1950 }
1951
1952 /* Release bus */
1953 linda_ib_epb_release ( linda );
1954
1955 out:
1956 DBG_ENABLE ( DBGLVL_IO );
1957 return rc;
1958 }
1959
1960 /***************************************************************************
1961 *
1962 * Infiniband SerDes initialisation
1963 *
1964 ***************************************************************************
1965 */
1966
1967 /** A Linda SerDes parameter */
1968 struct linda_serdes_param {
1969 /** EPB address as constructed by LINDA_EPB_ADDRESS() */
1970 uint16_t address;
1971 /** Value to set */
1972 uint8_t value;
1973 /** Mask to apply to old value */
1974 uint8_t mask;
1975 } __packed;
1976
1977 /** Magic "all channels" channel number */
1978 #define LINDA_EPB_ALL_CHANNELS 31
1979
1980 /** End of SerDes parameter list marker */
1981 #define LINDA_SERDES_PARAM_END { 0, 0, 0 }
1982
1983 /**
1984 * Program IB SerDes register(s)
1985 *
1986 * @v linda Linda device
1987 * @v param SerDes parameter
1988 * @ret rc Return status code
1989 */
1990 static int linda_set_serdes_param ( struct linda *linda,
1991 struct linda_serdes_param *param ) {
1992 unsigned int channel;
1993 unsigned int channel_start;
1994 unsigned int channel_end;
1995 unsigned int element;
1996 unsigned int reg;
1997 int rc;
1998
1999 /* Break down the EPB address and determine channels */
2000 channel = LINDA_EPB_ADDRESS_CHANNEL ( param->address );
2001 element = LINDA_EPB_ADDRESS_ELEMENT ( param->address );
2002 reg = LINDA_EPB_ADDRESS_REG ( param->address );
2003 if ( channel == LINDA_EPB_ALL_CHANNELS ) {
2004 channel_start = 0;
2005 channel_end = 3;
2006 } else {
2007 channel_start = channel_end = channel;
2008 }
2009
2010 /* Modify register for each specified channel */
2011 for ( channel = channel_start ; channel <= channel_end ; channel++ ) {
2012 if ( ( rc = linda_ib_epb_mod_reg ( linda, LINDA_EPB_CS_SERDES,
2013 channel, element, reg,
2014 param->value,
2015 param->mask ) ) != 0 )
2016 return rc;
2017 }
2018
2019 return 0;
2020 }
2021
2022 /**
2023 * Program IB SerDes registers
2024 *
2025 * @v linda Linda device
2026 * @v param SerDes parameters
2027 * @v count Number of parameters
2028 * @ret rc Return status code
2029 */
2030 static int linda_set_serdes_params ( struct linda *linda,
2031 struct linda_serdes_param *params ) {
2032 int rc;
2033
2034 for ( ; params->mask != 0 ; params++ ){
2035 if ( ( rc = linda_set_serdes_param ( linda,
2036 params ) ) != 0 )
2037 return rc;
2038 }
2039
2040 return 0;
2041 }
2042
2043 #define LINDA_DDS_VAL( amp_d, main_d, ipst_d, ipre_d, \
2044 amp_s, main_s, ipst_s, ipre_s ) \
2045 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 9, 0x00 ), \
2046 ( ( ( amp_d & 0x1f ) << 1 ) | 1 ), 0xff }, \
2047 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 9, 0x01 ), \
2048 ( ( ( amp_s & 0x1f ) << 1 ) | 1 ), 0xff }, \
2049 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 9, 0x09 ), \
2050 ( ( main_d << 3 ) | 4 | ( ipre_d >> 2 ) ), 0xff }, \
2051 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 9, 0x0a ), \
2052 ( ( main_s << 3 ) | 4 | ( ipre_s >> 2 ) ), 0xff }, \
2053 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 9, 0x06 ), \
2054 ( ( ( ipst_d & 0xf ) << 1 ) | \
2055 ( ( ipre_d & 3 ) << 6 ) | 0x21 ), 0xff }, \
2056 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 9, 0x07 ), \
2057 ( ( ( ipst_s & 0xf ) << 1 ) | \
2058 ( ( ipre_s & 3 ) << 6) | 0x21 ), 0xff }
2059
2060 /**
2061 * Linda SerDes default parameters
2062 *
2063 * These magic start-of-day values are taken from the Linux driver.
2064 */
2065 static struct linda_serdes_param linda_serdes_defaults1[] = {
2066 /* RXHSCTRL0 */
2067 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 6, 0x00 ), 0xd4, 0xff },
2068 /* VCDL_DAC2 */
2069 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 6, 0x05 ), 0x2d, 0xff },
2070 /* VCDL_CTRL2 */
2071 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 6, 0x08 ), 0x03, 0x0f },
2072 /* START_EQ1 */
2073 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x27 ), 0x10, 0xff },
2074 /* START_EQ2 */
2075 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x28 ), 0x30, 0xff },
2076 /* BACTRL */
2077 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 6, 0x0e ), 0x40, 0xff },
2078 /* LDOUTCTRL1 */
2079 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x06 ), 0x04, 0xff },
2080 /* RXHSSTATUS */
2081 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 6, 0x0f ), 0x04, 0xff },
2082 /* End of this block */
2083 LINDA_SERDES_PARAM_END
2084 };
2085 static struct linda_serdes_param linda_serdes_defaults2[] = {
2086 /* LDOUTCTRL1 */
2087 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x06 ), 0x00, 0xff },
2088 /* DDS values */
2089 LINDA_DDS_VAL ( 31, 19, 12, 0, 29, 22, 9, 0 ),
2090 /* Set Rcv Eq. to Preset node */
2091 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x27 ), 0x10, 0xff },
2092 /* DFELTHFDR */
2093 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x08 ), 0x00, 0xff },
2094 /* DFELTHHDR */
2095 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x21 ), 0x00, 0xff },
2096 /* TLTHFDR */
2097 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x09 ), 0x02, 0xff },
2098 /* TLTHHDR */
2099 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x23 ), 0x02, 0xff },
2100 /* ZFR */
2101 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x1b ), 0x0c, 0xff },
2102 /* ZCNT) */
2103 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x1c ), 0x0c, 0xff },
2104 /* GFR */
2105 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x1e ), 0x10, 0xff },
2106 /* GHR */
2107 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x1f ), 0x10, 0xff },
2108 /* VCDL_CTRL0 toggle */
2109 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 6, 0x06 ), 0x20, 0xff },
2110 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 6, 0x06 ), 0x00, 0xff },
2111 /* CMUCTRL5 */
2112 { LINDA_EPB_ADDRESS ( 7, 0, 0x15 ), 0x80, 0xff },
2113 /* End of this block */
2114 LINDA_SERDES_PARAM_END
2115 };
2116 static struct linda_serdes_param linda_serdes_defaults3[] = {
2117 /* START_EQ1 */
2118 { LINDA_EPB_ADDRESS ( LINDA_EPB_ALL_CHANNELS, 7, 0x27 ), 0x00, 0x38 },
2119 /* End of this block */
2120 LINDA_SERDES_PARAM_END
2121 };
2122
2123 /**
2124 * Program the microcontroller RAM
2125 *
2126 * @v linda Linda device
2127 * @ret rc Return status code
2128 */
2129 static int linda_program_uc_ram ( struct linda *linda ) {
2130 int rc;
2131
2132 if ( ( rc = linda_ib_epb_ram_xfer ( linda, 0, linda_ib_fw, NULL,
2133 sizeof ( linda_ib_fw ) ) ) != 0 ){
2134 DBGC ( linda, "Linda %p could not load IB firmware: %s\n",
2135 linda, strerror ( rc ) );
2136 return rc;
2137 }
2138
2139 return 0;
2140 }
2141
2142 /**
2143 * Verify the microcontroller RAM
2144 *
2145 * @v linda Linda device
2146 * @ret rc Return status code
2147 */
2148 static int linda_verify_uc_ram ( struct linda *linda ) {
2149 uint8_t verify[LINDA_EPB_UC_CHUNK_SIZE];
2150 unsigned int offset;
2151 int rc;
2152
2153 for ( offset = 0 ; offset < sizeof ( linda_ib_fw );
2154 offset += sizeof ( verify ) ) {
2155 if ( ( rc = linda_ib_epb_ram_xfer ( linda, offset,
2156 NULL, verify,
2157 sizeof (verify) )) != 0 ){
2158 DBGC ( linda, "Linda %p could not read back IB "
2159 "firmware: %s\n", linda, strerror ( rc ) );
2160 return rc;
2161 }
2162 if ( memcmp ( ( linda_ib_fw + offset ), verify,
2163 sizeof ( verify ) ) != 0 ) {
2164 DBGC ( linda, "Linda %p firmware verification failed "
2165 "at offset %#x\n", linda, offset );
2166 DBGC_HDA ( linda, offset, ( linda_ib_fw + offset ),
2167 sizeof ( verify ) );
2168 DBGC_HDA ( linda, offset, verify, sizeof ( verify ) );
2169 return -EIO;
2170 }
2171 }
2172
2173 DBGC2 ( linda, "Linda %p firmware verified ok\n", linda );
2174 return 0;
2175 }
2176
2177 /**
2178 * Use the microcontroller to trim the IB link
2179 *
2180 * @v linda Linda device
2181 * @ret rc Return status code
2182 */
2183 static int linda_trim_ib ( struct linda *linda ) {
2184 struct QIB_7220_IBSerDesCtrl ctrl;
2185 struct QIB_7220_IntStatus intstatus;
2186 unsigned int i;
2187 int rc;
2188
2189 /* Bring the microcontroller out of reset */
2190 linda_readq ( linda, &ctrl, QIB_7220_IBSerDesCtrl_offset );
2191 BIT_SET ( &ctrl, ResetIB_uC_Core, 0 );
2192 linda_writeq ( linda, &ctrl, QIB_7220_IBSerDesCtrl_offset );
2193
2194 /* Wait for the "trim done" signal */
2195 for ( i = 0 ; i < LINDA_TRIM_DONE_MAX_WAIT_MS ; i++ ) {
2196 linda_readq ( linda, &intstatus, QIB_7220_IntStatus_offset );
2197 if ( BIT_GET ( &intstatus, IBSerdesTrimDone ) ) {
2198 rc = 0;
2199 goto out_reset;
2200 }
2201 mdelay ( 1 );
2202 }
2203
2204 DBGC ( linda, "Linda %p timed out waiting for trim done\n", linda );
2205 rc = -ETIMEDOUT;
2206 out_reset:
2207 /* Put the microcontroller back into reset */
2208 BIT_SET ( &ctrl, ResetIB_uC_Core, 1 );
2209 linda_writeq ( linda, &ctrl, QIB_7220_IBSerDesCtrl_offset );
2210
2211 return rc;
2212 }
2213
2214 /**
2215 * Initialise the IB SerDes
2216 *
2217 * @v linda Linda device
2218 * @ret rc Return status code
2219 */
2220 static int linda_init_ib_serdes ( struct linda *linda ) {
2221 struct QIB_7220_Control control;
2222 struct QIB_7220_IBCCtrl ibcctrl;
2223 struct QIB_7220_IBCDDRCtrl ibcddrctrl;
2224 struct QIB_7220_XGXSCfg xgxscfg;
2225 int rc;
2226
2227 /* Disable link */
2228 linda_readq ( linda, &control, QIB_7220_Control_offset );
2229 BIT_SET ( &control, LinkEn, 0 );
2230 linda_writeq ( linda, &control, QIB_7220_Control_offset );
2231
2232 /* Configure sensible defaults for IBC */
2233 memset ( &ibcctrl, 0, sizeof ( ibcctrl ) );
2234 BIT_FILL_6 ( &ibcctrl, /* Tuning values taken from Linux driver */
2235 FlowCtrlPeriod, 0x03,
2236 FlowCtrlWaterMark, 0x05,
2237 MaxPktLen, ( ( LINDA_RECV_HEADER_SIZE +
2238 LINDA_RECV_PAYLOAD_SIZE +
2239 4 /* ICRC */ ) >> 2 ),
2240 PhyerrThreshold, 0xf,
2241 OverrunThreshold, 0xf,
2242 CreditScale, 0x4 );
2243 linda_writeq ( linda, &ibcctrl, QIB_7220_IBCCtrl_offset );
2244
2245 /* Force SDR only to avoid needing all the DDR tuning,
2246 * Mellanox compatibility hacks etc. SDR is plenty for
2247 * boot-time operation.
2248 */
2249 linda_readq ( linda, &ibcddrctrl, QIB_7220_IBCDDRCtrl_offset );
2250 BIT_SET ( &ibcddrctrl, IB_ENHANCED_MODE, 0 );
2251 BIT_SET ( &ibcddrctrl, SD_SPEED_SDR, 1 );
2252 BIT_SET ( &ibcddrctrl, SD_SPEED_DDR, 0 );
2253 BIT_SET ( &ibcddrctrl, SD_SPEED_QDR, 0 );
2254 BIT_SET ( &ibcddrctrl, HRTBT_ENB, 0 );
2255 BIT_SET ( &ibcddrctrl, HRTBT_AUTO, 0 );
2256 linda_writeq ( linda, &ibcddrctrl, QIB_7220_IBCDDRCtrl_offset );
2257
2258 /* Set default SerDes parameters */
2259 if ( ( rc = linda_set_serdes_params ( linda,
2260 linda_serdes_defaults1 ) ) != 0 )
2261 return rc;
2262 udelay ( 415 ); /* Magic delay while SerDes sorts itself out */
2263 if ( ( rc = linda_set_serdes_params ( linda,
2264 linda_serdes_defaults2 ) ) != 0 )
2265 return rc;
2266
2267 /* Program the microcontroller RAM */
2268 if ( ( rc = linda_program_uc_ram ( linda ) ) != 0 )
2269 return rc;
2270
2271 /* Verify the microcontroller RAM contents */
2272 if ( DBGLVL_LOG ) {
2273 if ( ( rc = linda_verify_uc_ram ( linda ) ) != 0 )
2274 return rc;
2275 }
2276
2277 /* More SerDes tuning */
2278 if ( ( rc = linda_set_serdes_params ( linda,
2279 linda_serdes_defaults3 ) ) != 0 )
2280 return rc;
2281
2282 /* Use the microcontroller to trim the IB link */
2283 if ( ( rc = linda_trim_ib ( linda ) ) != 0 )
2284 return rc;
2285
2286 /* Bring XGXS out of reset */
2287 linda_readq ( linda, &xgxscfg, QIB_7220_XGXSCfg_offset );
2288 BIT_SET ( &xgxscfg, tx_rx_reset, 0 );
2289 BIT_SET ( &xgxscfg, xcv_reset, 0 );
2290 linda_writeq ( linda, &xgxscfg, QIB_7220_XGXSCfg_offset );
2291
2292 return rc;
2293 }
2294
2295 /***************************************************************************
2296 *
2297 * PCI layer interface
2298 *
2299 ***************************************************************************
2300 */
2301
2302 /**
2303 * Probe PCI device
2304 *
2305 * @v pci PCI device
2306 * @v id PCI ID
2307 * @ret rc Return status code
2308 */
2309 static int linda_probe ( struct pci_device *pci ) {
2310 struct ib_device *ibdev;
2311 struct linda *linda;
2312 struct QIB_7220_Revision revision;
2313 int rc;
2314
2315 /* Allocate Infiniband device */
2316 ibdev = alloc_ibdev ( sizeof ( *linda ) );
2317 if ( ! ibdev ) {
2318 rc = -ENOMEM;
2319 goto err_alloc_ibdev;
2320 }
2321 pci_set_drvdata ( pci, ibdev );
2322 linda = ib_get_drvdata ( ibdev );
2323 ibdev->op = &linda_ib_operations;
2324 ibdev->dev = &pci->dev;
2325 ibdev->port = 1;
2326
2327 /* Fix up PCI device */
2328 adjust_pci_device ( pci );
2329
2330 /* Get PCI BARs */
2331 linda->regs = ioremap ( pci->membase, LINDA_BAR0_SIZE );
2332 DBGC2 ( linda, "Linda %p has BAR at %08lx\n", linda, pci->membase );
2333
2334 /* Print some general data */
2335 linda_readq ( linda, &revision, QIB_7220_Revision_offset );
2336 DBGC2 ( linda, "Linda %p board %02lx v%ld.%ld.%ld.%ld\n", linda,
2337 BIT_GET ( &revision, BoardID ),
2338 BIT_GET ( &revision, R_SW ),
2339 BIT_GET ( &revision, R_Arch ),
2340 BIT_GET ( &revision, R_ChipRevMajor ),
2341 BIT_GET ( &revision, R_ChipRevMinor ) );
2342
2343 /* Record link capabilities. Note that we force SDR only to
2344 * avoid having to carry extra code for DDR tuning etc.
2345 */
2346 ibdev->link_width_enabled = ibdev->link_width_supported =
2347 ( IB_LINK_WIDTH_4X | IB_LINK_WIDTH_1X );
2348 ibdev->link_speed_enabled = ibdev->link_speed_supported =
2349 IB_LINK_SPEED_SDR;
2350
2351 /* Initialise I2C subsystem */
2352 if ( ( rc = linda_init_i2c ( linda ) ) != 0 )
2353 goto err_init_i2c;
2354
2355 /* Read EEPROM parameters */
2356 if ( ( rc = linda_read_eeprom ( linda, &ibdev->node_guid ) ) != 0 )
2357 goto err_read_eeprom;
2358 memcpy ( &ibdev->gid.s.guid, &ibdev->node_guid,
2359 sizeof ( ibdev->gid.s.guid ) );
2360
2361 /* Initialise send datapath */
2362 if ( ( rc = linda_init_send ( linda ) ) != 0 )
2363 goto err_init_send;
2364
2365 /* Initialise receive datapath */
2366 if ( ( rc = linda_init_recv ( linda ) ) != 0 )
2367 goto err_init_recv;
2368
2369 /* Initialise the IB SerDes */
2370 if ( ( rc = linda_init_ib_serdes ( linda ) ) != 0 )
2371 goto err_init_ib_serdes;
2372
2373 /* Register Infiniband device */
2374 if ( ( rc = register_ibdev ( ibdev ) ) != 0 ) {
2375 DBGC ( linda, "Linda %p could not register IB "
2376 "device: %s\n", linda, strerror ( rc ) );
2377 goto err_register_ibdev;
2378 }
2379
2380 return 0;
2381
2382 unregister_ibdev ( ibdev );
2383 err_register_ibdev:
2384 linda_fini_recv ( linda );
2385 err_init_recv:
2386 linda_fini_send ( linda );
2387 err_init_send:
2388 err_init_ib_serdes:
2389 err_read_eeprom:
2390 err_init_i2c:
2391 ibdev_put ( ibdev );
2392 err_alloc_ibdev:
2393 return rc;
2394 }
2395
2396 /**
2397 * Remove PCI device
2398 *
2399 * @v pci PCI device
2400 */
2401 static void linda_remove ( struct pci_device *pci ) {
2402 struct ib_device *ibdev = pci_get_drvdata ( pci );
2403 struct linda *linda = ib_get_drvdata ( ibdev );
2404
2405 unregister_ibdev ( ibdev );
2406 linda_fini_recv ( linda );
2407 linda_fini_send ( linda );
2408 ibdev_put ( ibdev );
2409 }
2410
2411 static struct pci_device_id linda_nics[] = {
2412 PCI_ROM ( 0x1077, 0x7220, "iba7220", "QLE7240/7280 HCA driver", 0 ),
2413 };
2414
2415 struct pci_driver linda_driver __pci_driver = {
2416 .ids = linda_nics,
2417 .id_count = ( sizeof ( linda_nics ) / sizeof ( linda_nics[0] ) ),
2418 .probe = linda_probe,
2419 .remove = linda_remove,
2420 };