[efi] Implement the EFI_PXE_BASE_CODE_PROTOCOL
[ipxe.git] / src / drivers / infiniband / qib7322.c
1 /*
2 * Copyright (C) 2009 Michael Brown <mbrown@fensystems.co.uk>.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA.
18 *
19 * You can also choose to distribute this program under the terms of
20 * the Unmodified Binary Distribution Licence (as given in the file
21 * COPYING.UBDL), provided that you have satisfied its requirements.
22 */
23
24 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
25
26 #include <stdint.h>
27 #include <stdlib.h>
28 #include <errno.h>
29 #include <unistd.h>
30 #include <assert.h>
31 #include <ipxe/io.h>
32 #include <ipxe/pci.h>
33 #include <ipxe/infiniband.h>
34 #include <ipxe/i2c.h>
35 #include <ipxe/bitbash.h>
36 #include <ipxe/malloc.h>
37 #include <ipxe/iobuf.h>
38 #include <ipxe/pcibackup.h>
39 #include "qib7322.h"
40
41 /**
42 * @file
43 *
44 * QLogic QIB7322 Infiniband HCA
45 *
46 */
47
48 /** A QIB7322 send buffer set */
49 struct qib7322_send_buffers {
50 /** Offset within register space of the first send buffer */
51 unsigned long base;
52 /** Send buffer size */
53 unsigned int size;
54 /** Index of first send buffer */
55 unsigned int start;
56 /** Number of send buffers
57 *
58 * Must be a power of two.
59 */
60 unsigned int count;
61 /** Send buffer availability producer counter */
62 unsigned int prod;
63 /** Send buffer availability consumer counter */
64 unsigned int cons;
65 /** Send buffer availability */
66 uint16_t avail[0];
67 };
68
69 /** A QIB7322 send work queue */
70 struct qib7322_send_work_queue {
71 /** Send buffer set */
72 struct qib7322_send_buffers *send_bufs;
73 /** Send buffer usage */
74 uint16_t *used;
75 /** Producer index */
76 unsigned int prod;
77 /** Consumer index */
78 unsigned int cons;
79 };
80
81 /** A QIB7322 receive work queue */
82 struct qib7322_recv_work_queue {
83 /** Receive header ring */
84 void *header;
85 /** Receive header producer offset (written by hardware) */
86 struct QIB_7322_scalar header_prod;
87 /** Receive header consumer offset */
88 unsigned int header_cons;
89 /** Offset within register space of the eager array */
90 unsigned long eager_array;
91 /** Number of entries in eager array */
92 unsigned int eager_entries;
93 /** Eager array producer index */
94 unsigned int eager_prod;
95 /** Eager array consumer index */
96 unsigned int eager_cons;
97 };
98
99 /** A QIB7322 HCA */
100 struct qib7322 {
101 /** Registers */
102 void *regs;
103
104 /** In-use contexts */
105 uint8_t used_ctx[QIB7322_NUM_CONTEXTS];
106 /** Send work queues */
107 struct qib7322_send_work_queue send_wq[QIB7322_NUM_CONTEXTS];
108 /** Receive work queues */
109 struct qib7322_recv_work_queue recv_wq[QIB7322_NUM_CONTEXTS];
110
111 /** Send buffer availability (reported by hardware) */
112 struct QIB_7322_SendBufAvail *sendbufavail;
113 /** Small send buffers */
114 struct qib7322_send_buffers *send_bufs_small;
115 /** VL15 port 0 send buffers */
116 struct qib7322_send_buffers *send_bufs_vl15_port0;
117 /** VL15 port 1 send buffers */
118 struct qib7322_send_buffers *send_bufs_vl15_port1;
119
120 /** I2C bit-bashing interface */
121 struct i2c_bit_basher i2c;
122 /** I2C serial EEPROM */
123 struct i2c_device eeprom;
124
125 /** Base GUID */
126 union ib_guid guid;
127 /** Infiniband devices */
128 struct ib_device *ibdev[QIB7322_MAX_PORTS];
129 };
130
131 /***************************************************************************
132 *
133 * QIB7322 register access
134 *
135 ***************************************************************************
136 *
137 * This card requires atomic 64-bit accesses. Strange things happen
138 * if you try to use 32-bit accesses; sometimes they work, sometimes
139 * they don't, sometimes you get random data.
140 *
141 * These accessors use the "movq" MMX instruction, and so won't work
142 * on really old Pentiums (which won't have PCIe anyway, so this is
143 * something of a moot point).
144 */
145
146 /**
147 * Read QIB7322 qword register
148 *
149 * @v qib7322 QIB7322 device
150 * @v dwords Register buffer to read into
151 * @v offset Register offset
152 */
153 static void qib7322_readq ( struct qib7322 *qib7322, uint32_t *dwords,
154 unsigned long offset ) {
155 void *addr = ( qib7322->regs + offset );
156
157 __asm__ __volatile__ ( "movq (%1), %%mm0\n\t"
158 "movq %%mm0, (%0)\n\t"
159 : : "r" ( dwords ), "r" ( addr ) : "memory" );
160
161 DBGIO ( "[%08lx] => %08x%08x\n",
162 virt_to_phys ( addr ), dwords[1], dwords[0] );
163 }
164 #define qib7322_readq( _qib7322, _ptr, _offset ) \
165 qib7322_readq ( (_qib7322), (_ptr)->u.dwords, (_offset) )
166 #define qib7322_readq_array8b( _qib7322, _ptr, _offset, _idx ) \
167 qib7322_readq ( (_qib7322), (_ptr), ( (_offset) + ( (_idx) * 8 ) ) )
168 #define qib7322_readq_array64k( _qib7322, _ptr, _offset, _idx ) \
169 qib7322_readq ( (_qib7322), (_ptr), ( (_offset) + ( (_idx) * 65536 ) ) )
170 #define qib7322_readq_port( _qib7322, _ptr, _offset, _port ) \
171 qib7322_readq ( (_qib7322), (_ptr), ( (_offset) + ( (_port) * 4096 ) ) )
172
173 /**
174 * Write QIB7322 qword register
175 *
176 * @v qib7322 QIB7322 device
177 * @v dwords Register buffer to write
178 * @v offset Register offset
179 */
180 static void qib7322_writeq ( struct qib7322 *qib7322, const uint32_t *dwords,
181 unsigned long offset ) {
182 void *addr = ( qib7322->regs + offset );
183
184 DBGIO ( "[%08lx] <= %08x%08x\n",
185 virt_to_phys ( addr ), dwords[1], dwords[0] );
186
187 __asm__ __volatile__ ( "movq (%0), %%mm0\n\t"
188 "movq %%mm0, (%1)\n\t"
189 : : "r" ( dwords ), "r" ( addr ) : "memory" );
190 }
191 #define qib7322_writeq( _qib7322, _ptr, _offset ) \
192 qib7322_writeq ( (_qib7322), (_ptr)->u.dwords, (_offset) )
193 #define qib7322_writeq_array8b( _qib7322, _ptr, _offset, _idx ) \
194 qib7322_writeq ( (_qib7322), (_ptr), ( (_offset) + ( (_idx) * 8 ) ) )
195 #define qib7322_writeq_array64k( _qib7322, _ptr, _offset, _idx ) \
196 qib7322_writeq ( (_qib7322), (_ptr), ( (_offset) + ( (_idx) * 65536 ) ))
197 #define qib7322_writeq_port( _qib7322, _ptr, _offset, _port ) \
198 qib7322_writeq ( (_qib7322), (_ptr), ( (_offset) + ( (_port) * 4096 ) ))
199
200 /**
201 * Write QIB7322 dword register
202 *
203 * @v qib7322 QIB7322 device
204 * @v dword Value to write
205 * @v offset Register offset
206 */
207 static void qib7322_writel ( struct qib7322 *qib7322, uint32_t dword,
208 unsigned long offset ) {
209 writel ( dword, ( qib7322->regs + offset ) );
210 }
211
212 /***************************************************************************
213 *
214 * Link state management
215 *
216 ***************************************************************************
217 */
218
219 /**
220 * Textual representation of link state
221 *
222 * @v link_state Link state
223 * @ret link_text Link state text
224 */
225 static const char * qib7322_link_state_text ( unsigned int link_state ) {
226 switch ( link_state ) {
227 case QIB7322_LINK_STATE_DOWN: return "DOWN";
228 case QIB7322_LINK_STATE_INIT: return "INIT";
229 case QIB7322_LINK_STATE_ARM: return "ARM";
230 case QIB7322_LINK_STATE_ACTIVE: return "ACTIVE";
231 case QIB7322_LINK_STATE_ACT_DEFER: return "ACT_DEFER";
232 default: return "UNKNOWN";
233 }
234 }
235
236 /**
237 * Handle link state change
238 *
239 * @v qib7322 QIB7322 device
240 */
241 static void qib7322_link_state_changed ( struct ib_device *ibdev ) {
242 struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
243 struct QIB_7322_IBCStatusA_0 ibcstatusa;
244 struct QIB_7322_EXTCtrl extctrl;
245 unsigned int port = ( ibdev->port - QIB7322_PORT_BASE );
246 unsigned int link_training_state;
247 unsigned int link_state;
248 unsigned int link_width;
249 unsigned int link_speed;
250 unsigned int link_speed_qdr;
251 unsigned int green;
252 unsigned int yellow;
253
254 /* Read link state */
255 qib7322_readq_port ( qib7322, &ibcstatusa,
256 QIB_7322_IBCStatusA_0_offset, port );
257 link_training_state = BIT_GET ( &ibcstatusa, LinkTrainingState );
258 link_state = BIT_GET ( &ibcstatusa, LinkState );
259 link_width = BIT_GET ( &ibcstatusa, LinkWidthActive );
260 link_speed = BIT_GET ( &ibcstatusa, LinkSpeedActive );
261 link_speed_qdr = BIT_GET ( &ibcstatusa, LinkSpeedQDR );
262 DBGC ( qib7322, "QIB7322 %p port %d training state %#x link state %s "
263 "(%s %s)\n", qib7322, port, link_training_state,
264 qib7322_link_state_text ( link_state ),
265 ( link_speed_qdr ? "QDR" : ( link_speed ? "DDR" : "SDR" ) ),
266 ( link_width ? "x4" : "x1" ) );
267
268 /* Set LEDs according to link state */
269 qib7322_readq ( qib7322, &extctrl, QIB_7322_EXTCtrl_offset );
270 green = ( ( link_state >= QIB7322_LINK_STATE_INIT ) ? 1 : 0 );
271 yellow = ( ( link_state >= QIB7322_LINK_STATE_ACTIVE ) ? 1 : 0 );
272 if ( port == 0 ) {
273 BIT_SET ( &extctrl, LEDPort0GreenOn, green );
274 BIT_SET ( &extctrl, LEDPort0YellowOn, yellow );
275 } else {
276 BIT_SET ( &extctrl, LEDPort1GreenOn, green );
277 BIT_SET ( &extctrl, LEDPort1YellowOn, yellow );
278 }
279 qib7322_writeq ( qib7322, &extctrl, QIB_7322_EXTCtrl_offset );
280
281 /* Notify Infiniband core of link state change */
282 ibdev->port_state = ( link_state + 1 );
283 ibdev->link_width_active =
284 ( link_width ? IB_LINK_WIDTH_4X : IB_LINK_WIDTH_1X );
285 ibdev->link_speed_active =
286 ( link_speed ? IB_LINK_SPEED_DDR : IB_LINK_SPEED_SDR );
287 ib_link_state_changed ( ibdev );
288 }
289
290 /**
291 * Wait for link state change to take effect
292 *
293 * @v ibdev Infiniband device
294 * @v new_link_state Expected link state
295 * @ret rc Return status code
296 */
297 static int qib7322_link_state_check ( struct ib_device *ibdev,
298 unsigned int new_link_state ) {
299 struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
300 struct QIB_7322_IBCStatusA_0 ibcstatusa;
301 unsigned int port = ( ibdev->port - QIB7322_PORT_BASE );
302 unsigned int link_state;
303 unsigned int i;
304
305 for ( i = 0 ; i < QIB7322_LINK_STATE_MAX_WAIT_US ; i++ ) {
306 qib7322_readq_port ( qib7322, &ibcstatusa,
307 QIB_7322_IBCStatusA_0_offset, port );
308 link_state = BIT_GET ( &ibcstatusa, LinkState );
309 if ( link_state == new_link_state )
310 return 0;
311 udelay ( 1 );
312 }
313
314 DBGC ( qib7322, "QIB7322 %p port %d timed out waiting for link state "
315 "%s\n", qib7322, port, qib7322_link_state_text ( link_state ) );
316 return -ETIMEDOUT;
317 }
318
319 /**
320 * Set port information
321 *
322 * @v ibdev Infiniband device
323 * @v mad Set port information MAD
324 */
325 static int qib7322_set_port_info ( struct ib_device *ibdev,
326 union ib_mad *mad ) {
327 struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
328 struct ib_port_info *port_info = &mad->smp.smp_data.port_info;
329 struct QIB_7322_IBCCtrlA_0 ibcctrla;
330 unsigned int port = ( ibdev->port - QIB7322_PORT_BASE );
331 unsigned int port_state;
332 unsigned int link_state;
333
334 /* Set new link state */
335 port_state = ( port_info->link_speed_supported__port_state & 0xf );
336 if ( port_state ) {
337 link_state = ( port_state - 1 );
338 DBGC ( qib7322, "QIB7322 %p set link state to %s (%x)\n",
339 qib7322, qib7322_link_state_text ( link_state ),
340 link_state );
341 qib7322_readq_port ( qib7322, &ibcctrla,
342 QIB_7322_IBCCtrlA_0_offset, port );
343 BIT_SET ( &ibcctrla, LinkCmd, link_state );
344 qib7322_writeq_port ( qib7322, &ibcctrla,
345 QIB_7322_IBCCtrlA_0_offset, port );
346
347 /* Wait for link state change to take effect. Ignore
348 * errors; the current link state will be returned via
349 * the GetResponse MAD.
350 */
351 qib7322_link_state_check ( ibdev, link_state );
352 }
353
354 /* Detect and report link state change */
355 qib7322_link_state_changed ( ibdev );
356
357 return 0;
358 }
359
360 /**
361 * Set partition key table
362 *
363 * @v ibdev Infiniband device
364 * @v mad Set partition key table MAD
365 */
366 static int qib7322_set_pkey_table ( struct ib_device *ibdev __unused,
367 union ib_mad *mad __unused ) {
368 /* Nothing to do */
369 return 0;
370 }
371
372 /***************************************************************************
373 *
374 * Context allocation
375 *
376 ***************************************************************************
377 */
378
379 /**
380 * Allocate a context and set queue pair number
381 *
382 * @v ibdev Infiniband device
383 * @v qp Queue pair
384 * @ret rc Return status code
385 */
386 static int qib7322_alloc_ctx ( struct ib_device *ibdev,
387 struct ib_queue_pair *qp ) {
388 struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
389 unsigned int port = ( ibdev->port - QIB7322_PORT_BASE );
390 unsigned int ctx;
391
392 for ( ctx = port ; ctx < QIB7322_NUM_CONTEXTS ; ctx += 2 ) {
393
394 if ( ! qib7322->used_ctx[ctx] ) {
395 qib7322->used_ctx[ctx] = 1;
396 qp->qpn = ( ctx & ~0x01 );
397 DBGC2 ( qib7322, "QIB7322 %p port %d QPN %ld is CTX "
398 "%d\n", qib7322, port, qp->qpn, ctx );
399 return 0;
400 }
401 }
402
403 DBGC ( qib7322, "QIB7322 %p port %d out of available contexts\n",
404 qib7322, port );
405 return -ENOENT;
406 }
407
408 /**
409 * Get queue pair context number
410 *
411 * @v ibdev Infiniband device
412 * @v qp Queue pair
413 * @ret ctx Context index
414 */
415 static unsigned int qib7322_ctx ( struct ib_device *ibdev,
416 struct ib_queue_pair *qp ) {
417 return ( qp->qpn + ( ibdev->port - QIB7322_PORT_BASE ) );
418 }
419
420 /**
421 * Free a context
422 *
423 * @v qib7322 QIB7322 device
424 * @v ctx Context index
425 */
426 static void qib7322_free_ctx ( struct ib_device *ibdev,
427 struct ib_queue_pair *qp ) {
428 struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
429 unsigned int port = ( ibdev->port - QIB7322_PORT_BASE );
430 unsigned int ctx = qib7322_ctx ( ibdev, qp );
431
432 qib7322->used_ctx[ctx] = 0;
433 DBGC2 ( qib7322, "QIB7322 %p port %d CTX %d freed\n",
434 qib7322, port, ctx );
435 }
436
437 /***************************************************************************
438 *
439 * Send datapath
440 *
441 ***************************************************************************
442 */
443
444 /** Send buffer toggle bit
445 *
446 * We encode send buffers as 15 bits of send buffer index plus a
447 * single bit which should match the "check" bit in the SendBufAvail
448 * array.
449 */
450 #define QIB7322_SEND_BUF_TOGGLE 0x8000
451
452 /**
453 * Create send buffer set
454 *
455 * @v qib7322 QIB7322 device
456 * @v base Send buffer base offset
457 * @v size Send buffer size
458 * @v start Index of first send buffer
459 * @v count Number of send buffers
460 * @ret send_bufs Send buffer set
461 */
462 static struct qib7322_send_buffers *
463 qib7322_create_send_bufs ( struct qib7322 *qib7322, unsigned long base,
464 unsigned int size, unsigned int start,
465 unsigned int count ) {
466 struct qib7322_send_buffers *send_bufs;
467 unsigned int i;
468
469 /* Allocate send buffer set */
470 send_bufs = zalloc ( sizeof ( *send_bufs ) +
471 ( count * sizeof ( send_bufs->avail[0] ) ) );
472 if ( ! send_bufs )
473 return NULL;
474
475 /* Populate send buffer set */
476 send_bufs->base = base;
477 send_bufs->size = size;
478 send_bufs->start = start;
479 send_bufs->count = count;
480 for ( i = 0 ; i < count ; i++ )
481 send_bufs->avail[i] = ( start + i );
482
483 DBGC2 ( qib7322, "QIB7322 %p send buffer set %p [%d,%d] at %lx\n",
484 qib7322, send_bufs, start, ( start + count - 1 ),
485 send_bufs->base );
486
487 return send_bufs;
488 }
489
490 /**
491 * Destroy send buffer set
492 *
493 * @v qib7322 QIB7322 device
494 * @v send_bufs Send buffer set
495 */
496 static void
497 qib7322_destroy_send_bufs ( struct qib7322 *qib7322 __unused,
498 struct qib7322_send_buffers *send_bufs ) {
499 free ( send_bufs );
500 }
501
502 /**
503 * Allocate a send buffer
504 *
505 * @v qib7322 QIB7322 device
506 * @v send_bufs Send buffer set
507 * @ret send_buf Send buffer, or negative error
508 */
509 static int qib7322_alloc_send_buf ( struct qib7322 *qib7322,
510 struct qib7322_send_buffers *send_bufs ) {
511 unsigned int used;
512 unsigned int mask;
513 unsigned int send_buf;
514
515 used = ( send_bufs->cons - send_bufs->prod );
516 if ( used >= send_bufs->count ) {
517 DBGC ( qib7322, "QIB7322 %p send buffer set %p out of "
518 "buffers\n", qib7322, send_bufs );
519 return -ENOBUFS;
520 }
521
522 mask = ( send_bufs->count - 1 );
523 send_buf = send_bufs->avail[ send_bufs->cons++ & mask ];
524 send_buf ^= QIB7322_SEND_BUF_TOGGLE;
525 return send_buf;
526 }
527
528 /**
529 * Free a send buffer
530 *
531 * @v qib7322 QIB7322 device
532 * @v send_bufs Send buffer set
533 * @v send_buf Send buffer
534 */
535 static void qib7322_free_send_buf ( struct qib7322 *qib7322 __unused,
536 struct qib7322_send_buffers *send_bufs,
537 unsigned int send_buf ) {
538 unsigned int mask;
539
540 mask = ( send_bufs->count - 1 );
541 send_bufs->avail[ send_bufs->prod++ & mask ] = send_buf;
542 }
543
544 /**
545 * Check to see if send buffer is in use
546 *
547 * @v qib7322 QIB7322 device
548 * @v send_buf Send buffer
549 * @ret in_use Send buffer is in use
550 */
551 static int qib7322_send_buf_in_use ( struct qib7322 *qib7322,
552 unsigned int send_buf ) {
553 unsigned int send_idx;
554 unsigned int send_check;
555 unsigned int inusecheck;
556 unsigned int inuse;
557 unsigned int check;
558
559 send_idx = ( send_buf & ~QIB7322_SEND_BUF_TOGGLE );
560 send_check = ( !! ( send_buf & QIB7322_SEND_BUF_TOGGLE ) );
561 inusecheck = BIT_GET ( qib7322->sendbufavail, InUseCheck[send_idx] );
562 inuse = ( !! ( inusecheck & 0x02 ) );
563 check = ( !! ( inusecheck & 0x01 ) );
564 return ( inuse || ( check != send_check ) );
565 }
566
567 /**
568 * Calculate starting offset for send buffer
569 *
570 * @v qib7322 QIB7322 device
571 * @v send_buf Send buffer
572 * @ret offset Starting offset
573 */
574 static unsigned long
575 qib7322_send_buffer_offset ( struct qib7322 *qib7322 __unused,
576 struct qib7322_send_buffers *send_bufs,
577 unsigned int send_buf ) {
578 unsigned int index;
579
580 index = ( ( send_buf & ~QIB7322_SEND_BUF_TOGGLE ) - send_bufs->start );
581 return ( send_bufs->base + ( index * send_bufs->size ) );
582 }
583
584 /**
585 * Create send work queue
586 *
587 * @v ibdev Infiniband device
588 * @v qp Queue pair
589 */
590 static int qib7322_create_send_wq ( struct ib_device *ibdev,
591 struct ib_queue_pair *qp ) {
592 struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
593 struct ib_work_queue *wq = &qp->send;
594 struct qib7322_send_work_queue *qib7322_wq = ib_wq_get_drvdata ( wq );
595 unsigned int port = ( ibdev->port - QIB7322_PORT_BASE );
596
597 /* Select send buffer set */
598 if ( qp->type == IB_QPT_SMI ) {
599 if ( port == 0 ) {
600 qib7322_wq->send_bufs = qib7322->send_bufs_vl15_port0;
601 } else {
602 qib7322_wq->send_bufs = qib7322->send_bufs_vl15_port1;
603 }
604 } else {
605 qib7322_wq->send_bufs = qib7322->send_bufs_small;
606 }
607
608 /* Allocate space for send buffer usage list */
609 qib7322_wq->used = zalloc ( qp->send.num_wqes *
610 sizeof ( qib7322_wq->used[0] ) );
611 if ( ! qib7322_wq->used )
612 return -ENOMEM;
613
614 /* Reset work queue */
615 qib7322_wq->prod = 0;
616 qib7322_wq->cons = 0;
617
618 return 0;
619 }
620
621 /**
622 * Destroy send work queue
623 *
624 * @v ibdev Infiniband device
625 * @v qp Queue pair
626 */
627 static void qib7322_destroy_send_wq ( struct ib_device *ibdev __unused,
628 struct ib_queue_pair *qp ) {
629 struct ib_work_queue *wq = &qp->send;
630 struct qib7322_send_work_queue *qib7322_wq = ib_wq_get_drvdata ( wq );
631
632 free ( qib7322_wq->used );
633 }
634
635 /**
636 * Initialise send datapath
637 *
638 * @v qib7322 QIB7322 device
639 * @ret rc Return status code
640 */
641 static int qib7322_init_send ( struct qib7322 *qib7322 ) {
642 struct QIB_7322_SendBufBase sendbufbase;
643 struct QIB_7322_SendBufAvailAddr sendbufavailaddr;
644 struct QIB_7322_SendCtrl sendctrl;
645 struct QIB_7322_SendCtrl_0 sendctrlp;
646 unsigned long baseaddr_smallpio;
647 unsigned long baseaddr_largepio;
648 unsigned long baseaddr_vl15_port0;
649 unsigned long baseaddr_vl15_port1;
650 int rc;
651
652 /* Create send buffer sets */
653 qib7322_readq ( qib7322, &sendbufbase, QIB_7322_SendBufBase_offset );
654 baseaddr_smallpio = BIT_GET ( &sendbufbase, BaseAddr_SmallPIO );
655 baseaddr_largepio = BIT_GET ( &sendbufbase, BaseAddr_LargePIO );
656 baseaddr_vl15_port0 = ( baseaddr_largepio +
657 ( QIB7322_LARGE_SEND_BUF_SIZE *
658 QIB7322_LARGE_SEND_BUF_COUNT ) );
659 baseaddr_vl15_port1 = ( baseaddr_vl15_port0 +
660 QIB7322_VL15_PORT0_SEND_BUF_SIZE );
661 qib7322->send_bufs_small =
662 qib7322_create_send_bufs ( qib7322, baseaddr_smallpio,
663 QIB7322_SMALL_SEND_BUF_SIZE,
664 QIB7322_SMALL_SEND_BUF_START,
665 QIB7322_SMALL_SEND_BUF_USED );
666 if ( ! qib7322->send_bufs_small ) {
667 rc = -ENOMEM;
668 goto err_create_send_bufs_small;
669 }
670 qib7322->send_bufs_vl15_port0 =
671 qib7322_create_send_bufs ( qib7322, baseaddr_vl15_port0,
672 QIB7322_VL15_PORT0_SEND_BUF_SIZE,
673 QIB7322_VL15_PORT0_SEND_BUF_START,
674 QIB7322_VL15_PORT0_SEND_BUF_COUNT );
675 if ( ! qib7322->send_bufs_vl15_port0 ) {
676 rc = -ENOMEM;
677 goto err_create_send_bufs_vl15_port0;
678 }
679 qib7322->send_bufs_vl15_port1 =
680 qib7322_create_send_bufs ( qib7322, baseaddr_vl15_port1,
681 QIB7322_VL15_PORT1_SEND_BUF_SIZE,
682 QIB7322_VL15_PORT1_SEND_BUF_START,
683 QIB7322_VL15_PORT1_SEND_BUF_COUNT );
684 if ( ! qib7322->send_bufs_vl15_port1 ) {
685 rc = -ENOMEM;
686 goto err_create_send_bufs_vl15_port1;
687 }
688
689 /* Allocate space for the SendBufAvail array */
690 qib7322->sendbufavail = malloc_dma ( sizeof ( *qib7322->sendbufavail ),
691 QIB7322_SENDBUFAVAIL_ALIGN );
692 if ( ! qib7322->sendbufavail ) {
693 rc = -ENOMEM;
694 goto err_alloc_sendbufavail;
695 }
696 memset ( qib7322->sendbufavail, 0, sizeof ( qib7322->sendbufavail ) );
697
698 /* Program SendBufAvailAddr into the hardware */
699 memset ( &sendbufavailaddr, 0, sizeof ( sendbufavailaddr ) );
700 BIT_FILL_1 ( &sendbufavailaddr, SendBufAvailAddr,
701 ( virt_to_bus ( qib7322->sendbufavail ) >> 6 ) );
702 qib7322_writeq ( qib7322, &sendbufavailaddr,
703 QIB_7322_SendBufAvailAddr_offset );
704
705 /* Enable sending */
706 memset ( &sendctrlp, 0, sizeof ( sendctrlp ) );
707 BIT_FILL_1 ( &sendctrlp, SendEnable, 1 );
708 qib7322_writeq ( qib7322, &sendctrlp, QIB_7322_SendCtrl_0_offset );
709 qib7322_writeq ( qib7322, &sendctrlp, QIB_7322_SendCtrl_1_offset );
710
711 /* Enable DMA of SendBufAvail */
712 memset ( &sendctrl, 0, sizeof ( sendctrl ) );
713 BIT_FILL_1 ( &sendctrl, SendBufAvailUpd, 1 );
714 qib7322_writeq ( qib7322, &sendctrl, QIB_7322_SendCtrl_offset );
715
716 return 0;
717
718 free_dma ( qib7322->sendbufavail, sizeof ( *qib7322->sendbufavail ) );
719 err_alloc_sendbufavail:
720 qib7322_destroy_send_bufs ( qib7322, qib7322->send_bufs_vl15_port1 );
721 err_create_send_bufs_vl15_port1:
722 qib7322_destroy_send_bufs ( qib7322, qib7322->send_bufs_vl15_port0 );
723 err_create_send_bufs_vl15_port0:
724 qib7322_destroy_send_bufs ( qib7322, qib7322->send_bufs_small );
725 err_create_send_bufs_small:
726 return rc;
727 }
728
729 /**
730 * Shut down send datapath
731 *
732 * @v qib7322 QIB7322 device
733 */
734 static void qib7322_fini_send ( struct qib7322 *qib7322 ) {
735 struct QIB_7322_SendCtrl sendctrl;
736
737 /* Disable sending and DMA of SendBufAvail */
738 memset ( &sendctrl, 0, sizeof ( sendctrl ) );
739 qib7322_writeq ( qib7322, &sendctrl, QIB_7322_SendCtrl_offset );
740 mb();
741
742 /* Ensure hardware has seen this disable */
743 qib7322_readq ( qib7322, &sendctrl, QIB_7322_SendCtrl_offset );
744
745 free_dma ( qib7322->sendbufavail, sizeof ( *qib7322->sendbufavail ) );
746 qib7322_destroy_send_bufs ( qib7322, qib7322->send_bufs_vl15_port1 );
747 qib7322_destroy_send_bufs ( qib7322, qib7322->send_bufs_vl15_port0 );
748 qib7322_destroy_send_bufs ( qib7322, qib7322->send_bufs_small );
749 }
750
751 /***************************************************************************
752 *
753 * Receive datapath
754 *
755 ***************************************************************************
756 */
757
758 /**
759 * Create receive work queue
760 *
761 * @v ibdev Infiniband device
762 * @v qp Queue pair
763 * @ret rc Return status code
764 */
765 static int qib7322_create_recv_wq ( struct ib_device *ibdev,
766 struct ib_queue_pair *qp ) {
767 struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
768 struct ib_work_queue *wq = &qp->recv;
769 struct qib7322_recv_work_queue *qib7322_wq = ib_wq_get_drvdata ( wq );
770 struct QIB_7322_RcvHdrAddr0 rcvhdraddr;
771 struct QIB_7322_RcvHdrTailAddr0 rcvhdrtailaddr;
772 struct QIB_7322_RcvHdrHead0 rcvhdrhead;
773 struct QIB_7322_scalar rcvegrindexhead;
774 struct QIB_7322_RcvCtrl rcvctrl;
775 struct QIB_7322_RcvCtrl_P rcvctrlp;
776 unsigned int port = ( ibdev->port - QIB7322_PORT_BASE );
777 unsigned int ctx = qib7322_ctx ( ibdev, qp );
778 int rc;
779
780 /* Reset context information */
781 memset ( &qib7322_wq->header_prod, 0,
782 sizeof ( qib7322_wq->header_prod ) );
783 qib7322_wq->header_cons = 0;
784 qib7322_wq->eager_prod = 0;
785 qib7322_wq->eager_cons = 0;
786
787 /* Allocate receive header buffer */
788 qib7322_wq->header = malloc_dma ( QIB7322_RECV_HEADERS_SIZE,
789 QIB7322_RECV_HEADERS_ALIGN );
790 if ( ! qib7322_wq->header ) {
791 rc = -ENOMEM;
792 goto err_alloc_header;
793 }
794
795 /* Enable context in hardware */
796 memset ( &rcvhdraddr, 0, sizeof ( rcvhdraddr ) );
797 BIT_FILL_1 ( &rcvhdraddr, RcvHdrAddr,
798 ( virt_to_bus ( qib7322_wq->header ) >> 2 ) );
799 qib7322_writeq_array8b ( qib7322, &rcvhdraddr,
800 QIB_7322_RcvHdrAddr0_offset, ctx );
801 memset ( &rcvhdrtailaddr, 0, sizeof ( rcvhdrtailaddr ) );
802 BIT_FILL_1 ( &rcvhdrtailaddr, RcvHdrTailAddr,
803 ( virt_to_bus ( &qib7322_wq->header_prod ) >> 2 ) );
804 qib7322_writeq_array8b ( qib7322, &rcvhdrtailaddr,
805 QIB_7322_RcvHdrTailAddr0_offset, ctx );
806 memset ( &rcvhdrhead, 0, sizeof ( rcvhdrhead ) );
807 BIT_FILL_1 ( &rcvhdrhead, counter, 1 );
808 qib7322_writeq_array64k ( qib7322, &rcvhdrhead,
809 QIB_7322_RcvHdrHead0_offset, ctx );
810 memset ( &rcvegrindexhead, 0, sizeof ( rcvegrindexhead ) );
811 BIT_FILL_1 ( &rcvegrindexhead, Value, 1 );
812 qib7322_writeq_array64k ( qib7322, &rcvegrindexhead,
813 QIB_7322_RcvEgrIndexHead0_offset, ctx );
814 qib7322_readq_port ( qib7322, &rcvctrlp,
815 QIB_7322_RcvCtrl_0_offset, port );
816 BIT_SET ( &rcvctrlp, ContextEnable[ctx], 1 );
817 qib7322_writeq_port ( qib7322, &rcvctrlp,
818 QIB_7322_RcvCtrl_0_offset, port );
819 qib7322_readq ( qib7322, &rcvctrl, QIB_7322_RcvCtrl_offset );
820 BIT_SET ( &rcvctrl, IntrAvail[ctx], 1 );
821 qib7322_writeq ( qib7322, &rcvctrl, QIB_7322_RcvCtrl_offset );
822
823 DBGC ( qib7322, "QIB7322 %p port %d QPN %ld CTX %d hdrs [%lx,%lx) prod "
824 "%lx\n", qib7322, port, qp->qpn, ctx,
825 virt_to_bus ( qib7322_wq->header ),
826 ( virt_to_bus ( qib7322_wq->header )
827 + QIB7322_RECV_HEADERS_SIZE ),
828 virt_to_bus ( &qib7322_wq->header_prod ) );
829 return 0;
830
831 free_dma ( qib7322_wq->header, QIB7322_RECV_HEADERS_SIZE );
832 err_alloc_header:
833 return rc;
834 }
835
836 /**
837 * Destroy receive work queue
838 *
839 * @v ibdev Infiniband device
840 * @v qp Queue pair
841 */
842 static void qib7322_destroy_recv_wq ( struct ib_device *ibdev,
843 struct ib_queue_pair *qp ) {
844 struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
845 struct ib_work_queue *wq = &qp->recv;
846 struct qib7322_recv_work_queue *qib7322_wq = ib_wq_get_drvdata ( wq );
847 struct QIB_7322_RcvCtrl rcvctrl;
848 struct QIB_7322_RcvCtrl_P rcvctrlp;
849 unsigned int port = ( ibdev->port - QIB7322_PORT_BASE );
850 unsigned int ctx = qib7322_ctx ( ibdev, qp );
851
852 /* Disable context in hardware */
853 qib7322_readq_port ( qib7322, &rcvctrlp,
854 QIB_7322_RcvCtrl_0_offset, port );
855 BIT_SET ( &rcvctrlp, ContextEnable[ctx], 0 );
856 qib7322_writeq_port ( qib7322, &rcvctrlp,
857 QIB_7322_RcvCtrl_0_offset, port );
858 qib7322_readq ( qib7322, &rcvctrl, QIB_7322_RcvCtrl_offset );
859 BIT_SET ( &rcvctrl, IntrAvail[ctx], 0 );
860 qib7322_writeq ( qib7322, &rcvctrl, QIB_7322_RcvCtrl_offset );
861
862 /* Make sure the hardware has seen that the context is disabled */
863 qib7322_readq ( qib7322, &rcvctrl, QIB_7322_RcvCtrl_offset );
864 mb();
865
866 /* Free headers ring */
867 free_dma ( qib7322_wq->header, QIB7322_RECV_HEADERS_SIZE );
868 }
869
870 /**
871 * Initialise receive datapath
872 *
873 * @v qib7322 QIB7322 device
874 * @ret rc Return status code
875 */
876 static int qib7322_init_recv ( struct qib7322 *qib7322 ) {
877 struct QIB_7322_RcvCtrl rcvctrl;
878 struct QIB_7322_RcvCtrl_0 rcvctrlp;
879 struct QIB_7322_RcvQPMapTableA_0 rcvqpmaptablea0;
880 struct QIB_7322_RcvQPMapTableB_0 rcvqpmaptableb0;
881 struct QIB_7322_RcvQPMapTableA_1 rcvqpmaptablea1;
882 struct QIB_7322_RcvQPMapTableB_1 rcvqpmaptableb1;
883 struct QIB_7322_RcvQPMulticastContext_0 rcvqpmcastctx0;
884 struct QIB_7322_RcvQPMulticastContext_1 rcvqpmcastctx1;
885 struct QIB_7322_scalar rcvegrbase;
886 struct QIB_7322_scalar rcvhdrentsize;
887 struct QIB_7322_scalar rcvhdrcnt;
888 struct QIB_7322_RcvBTHQP_0 rcvbthqp;
889 struct QIB_7322_RxCreditVL0_0 rxcreditvl;
890 unsigned int contextcfg;
891 unsigned long egrbase;
892 unsigned int eager_array_size_kernel;
893 unsigned int eager_array_size_user;
894 unsigned int ctx;
895
896 /* Select configuration based on number of contexts */
897 switch ( QIB7322_NUM_CONTEXTS ) {
898 case 6:
899 contextcfg = QIB7322_CONTEXTCFG_6CTX;
900 eager_array_size_kernel = QIB7322_EAGER_ARRAY_SIZE_6CTX_KERNEL;
901 eager_array_size_user = QIB7322_EAGER_ARRAY_SIZE_6CTX_USER;
902 break;
903 case 10:
904 contextcfg = QIB7322_CONTEXTCFG_10CTX;
905 eager_array_size_kernel = QIB7322_EAGER_ARRAY_SIZE_10CTX_KERNEL;
906 eager_array_size_user = QIB7322_EAGER_ARRAY_SIZE_10CTX_USER;
907 break;
908 case 18:
909 contextcfg = QIB7322_CONTEXTCFG_18CTX;
910 eager_array_size_kernel = QIB7322_EAGER_ARRAY_SIZE_18CTX_KERNEL;
911 eager_array_size_user = QIB7322_EAGER_ARRAY_SIZE_18CTX_USER;
912 break;
913 default:
914 linker_assert ( 0, invalid_QIB7322_NUM_CONTEXTS );
915 return -EINVAL;
916 }
917
918 /* Configure number of contexts */
919 memset ( &rcvctrl, 0, sizeof ( rcvctrl ) );
920 BIT_FILL_2 ( &rcvctrl,
921 TailUpd, 1,
922 ContextCfg, contextcfg );
923 qib7322_writeq ( qib7322, &rcvctrl, QIB_7322_RcvCtrl_offset );
924
925 /* Map QPNs to contexts */
926 memset ( &rcvctrlp, 0, sizeof ( rcvctrlp ) );
927 BIT_FILL_3 ( &rcvctrlp,
928 RcvIBPortEnable, 1,
929 RcvQPMapEnable, 1,
930 RcvPartitionKeyDisable, 1 );
931 qib7322_writeq ( qib7322, &rcvctrlp, QIB_7322_RcvCtrl_0_offset );
932 qib7322_writeq ( qib7322, &rcvctrlp, QIB_7322_RcvCtrl_1_offset );
933 memset ( &rcvqpmaptablea0, 0, sizeof ( rcvqpmaptablea0 ) );
934 BIT_FILL_6 ( &rcvqpmaptablea0,
935 RcvQPMapContext0, 0,
936 RcvQPMapContext1, 2,
937 RcvQPMapContext2, 4,
938 RcvQPMapContext3, 6,
939 RcvQPMapContext4, 8,
940 RcvQPMapContext5, 10 );
941 qib7322_writeq ( qib7322, &rcvqpmaptablea0,
942 QIB_7322_RcvQPMapTableA_0_offset );
943 memset ( &rcvqpmaptableb0, 0, sizeof ( rcvqpmaptableb0 ) );
944 BIT_FILL_3 ( &rcvqpmaptableb0,
945 RcvQPMapContext6, 12,
946 RcvQPMapContext7, 14,
947 RcvQPMapContext8, 16 );
948 qib7322_writeq ( qib7322, &rcvqpmaptableb0,
949 QIB_7322_RcvQPMapTableB_0_offset );
950 memset ( &rcvqpmaptablea1, 0, sizeof ( rcvqpmaptablea1 ) );
951 BIT_FILL_6 ( &rcvqpmaptablea1,
952 RcvQPMapContext0, 1,
953 RcvQPMapContext1, 3,
954 RcvQPMapContext2, 5,
955 RcvQPMapContext3, 7,
956 RcvQPMapContext4, 9,
957 RcvQPMapContext5, 11 );
958 qib7322_writeq ( qib7322, &rcvqpmaptablea1,
959 QIB_7322_RcvQPMapTableA_1_offset );
960 memset ( &rcvqpmaptableb1, 0, sizeof ( rcvqpmaptableb1 ) );
961 BIT_FILL_3 ( &rcvqpmaptableb1,
962 RcvQPMapContext6, 13,
963 RcvQPMapContext7, 15,
964 RcvQPMapContext8, 17 );
965 qib7322_writeq ( qib7322, &rcvqpmaptableb1,
966 QIB_7322_RcvQPMapTableB_1_offset );
967
968 /* Map multicast QPNs to contexts */
969 memset ( &rcvqpmcastctx0, 0, sizeof ( rcvqpmcastctx0 ) );
970 BIT_FILL_1 ( &rcvqpmcastctx0, RcvQpMcContext, 0 );
971 qib7322_writeq ( qib7322, &rcvqpmcastctx0,
972 QIB_7322_RcvQPMulticastContext_0_offset );
973 memset ( &rcvqpmcastctx1, 0, sizeof ( rcvqpmcastctx1 ) );
974 BIT_FILL_1 ( &rcvqpmcastctx1, RcvQpMcContext, 1 );
975 qib7322_writeq ( qib7322, &rcvqpmcastctx1,
976 QIB_7322_RcvQPMulticastContext_1_offset );
977
978 /* Configure receive header buffer sizes */
979 memset ( &rcvhdrcnt, 0, sizeof ( rcvhdrcnt ) );
980 BIT_FILL_1 ( &rcvhdrcnt, Value, QIB7322_RECV_HEADER_COUNT );
981 qib7322_writeq ( qib7322, &rcvhdrcnt, QIB_7322_RcvHdrCnt_offset );
982 memset ( &rcvhdrentsize, 0, sizeof ( rcvhdrentsize ) );
983 BIT_FILL_1 ( &rcvhdrentsize, Value, ( QIB7322_RECV_HEADER_SIZE >> 2 ) );
984 qib7322_writeq ( qib7322, &rcvhdrentsize,
985 QIB_7322_RcvHdrEntSize_offset );
986
987 /* Calculate eager array start addresses for each context */
988 qib7322_readq ( qib7322, &rcvegrbase, QIB_7322_RcvEgrBase_offset );
989 egrbase = BIT_GET ( &rcvegrbase, Value );
990 for ( ctx = 0 ; ctx < QIB7322_MAX_PORTS ; ctx++ ) {
991 qib7322->recv_wq[ctx].eager_array = egrbase;
992 qib7322->recv_wq[ctx].eager_entries = eager_array_size_kernel;
993 egrbase += ( eager_array_size_kernel *
994 sizeof ( struct QIB_7322_RcvEgr ) );
995 }
996 for ( ; ctx < QIB7322_NUM_CONTEXTS ; ctx++ ) {
997 qib7322->recv_wq[ctx].eager_array = egrbase;
998 qib7322->recv_wq[ctx].eager_entries = eager_array_size_user;
999 egrbase += ( eager_array_size_user *
1000 sizeof ( struct QIB_7322_RcvEgr ) );
1001 }
1002 for ( ctx = 0 ; ctx < QIB7322_NUM_CONTEXTS ; ctx++ ) {
1003 DBGC ( qib7322, "QIB7322 %p CTX %d eager array at %lx (%d "
1004 "entries)\n", qib7322, ctx,
1005 qib7322->recv_wq[ctx].eager_array,
1006 qib7322->recv_wq[ctx].eager_entries );
1007 }
1008
1009 /* Set the BTH QP for Infinipath packets to an unused value */
1010 memset ( &rcvbthqp, 0, sizeof ( rcvbthqp ) );
1011 BIT_FILL_1 ( &rcvbthqp, RcvBTHQP, QIB7322_QP_IDETH );
1012 qib7322_writeq ( qib7322, &rcvbthqp, QIB_7322_RcvBTHQP_0_offset );
1013 qib7322_writeq ( qib7322, &rcvbthqp, QIB_7322_RcvBTHQP_1_offset );
1014
1015 /* Assign initial credits */
1016 memset ( &rxcreditvl, 0, sizeof ( rxcreditvl ) );
1017 BIT_FILL_1 ( &rxcreditvl, RxMaxCreditVL, QIB7322_MAX_CREDITS_VL0 );
1018 qib7322_writeq_array8b ( qib7322, &rxcreditvl,
1019 QIB_7322_RxCreditVL0_0_offset, 0 );
1020 qib7322_writeq_array8b ( qib7322, &rxcreditvl,
1021 QIB_7322_RxCreditVL0_1_offset, 0 );
1022 BIT_FILL_1 ( &rxcreditvl, RxMaxCreditVL, QIB7322_MAX_CREDITS_VL15 );
1023 qib7322_writeq_array8b ( qib7322, &rxcreditvl,
1024 QIB_7322_RxCreditVL0_0_offset, 15 );
1025 qib7322_writeq_array8b ( qib7322, &rxcreditvl,
1026 QIB_7322_RxCreditVL0_1_offset, 15 );
1027
1028 return 0;
1029 }
1030
1031 /**
1032 * Shut down receive datapath
1033 *
1034 * @v qib7322 QIB7322 device
1035 */
1036 static void qib7322_fini_recv ( struct qib7322 *qib7322 __unused ) {
1037 /* Nothing to do; all contexts were already disabled when the
1038 * queue pairs were destroyed
1039 */
1040 }
1041
1042 /***************************************************************************
1043 *
1044 * Completion queue operations
1045 *
1046 ***************************************************************************
1047 */
1048
1049 /**
1050 * Create completion queue
1051 *
1052 * @v ibdev Infiniband device
1053 * @v cq Completion queue
1054 * @ret rc Return status code
1055 */
1056 static int qib7322_create_cq ( struct ib_device *ibdev,
1057 struct ib_completion_queue *cq ) {
1058 struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
1059 static int cqn;
1060
1061 /* The hardware has no concept of completion queues. We
1062 * simply use the association between CQs and WQs (already
1063 * handled by the IB core) to decide which WQs to poll.
1064 *
1065 * We do set a CQN, just to avoid confusing debug messages
1066 * from the IB core.
1067 */
1068 cq->cqn = ++cqn;
1069 DBGC ( qib7322, "QIB7322 %p CQN %ld created\n", qib7322, cq->cqn );
1070
1071 return 0;
1072 }
1073
1074 /**
1075 * Destroy completion queue
1076 *
1077 * @v ibdev Infiniband device
1078 * @v cq Completion queue
1079 */
1080 static void qib7322_destroy_cq ( struct ib_device *ibdev,
1081 struct ib_completion_queue *cq ) {
1082 struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
1083
1084 /* Nothing to do */
1085 DBGC ( qib7322, "QIB7322 %p CQN %ld destroyed\n", qib7322, cq->cqn );
1086 }
1087
1088 /***************************************************************************
1089 *
1090 * Queue pair operations
1091 *
1092 ***************************************************************************
1093 */
1094
1095 /**
1096 * Create queue pair
1097 *
1098 * @v ibdev Infiniband device
1099 * @v qp Queue pair
1100 * @ret rc Return status code
1101 */
1102 static int qib7322_create_qp ( struct ib_device *ibdev,
1103 struct ib_queue_pair *qp ) {
1104 struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
1105 unsigned int ctx;
1106 int rc;
1107
1108 /* Allocate a context and QPN */
1109 if ( ( rc = qib7322_alloc_ctx ( ibdev, qp ) ) != 0 )
1110 goto err_alloc_ctx;
1111 ctx = qib7322_ctx ( ibdev, qp );
1112
1113 /* Set work-queue private data pointers */
1114 ib_wq_set_drvdata ( &qp->send, &qib7322->send_wq[ctx] );
1115 ib_wq_set_drvdata ( &qp->recv, &qib7322->recv_wq[ctx] );
1116
1117 /* Create receive work queue */
1118 if ( ( rc = qib7322_create_recv_wq ( ibdev, qp ) ) != 0 )
1119 goto err_create_recv_wq;
1120
1121 /* Create send work queue */
1122 if ( ( rc = qib7322_create_send_wq ( ibdev, qp ) ) != 0 )
1123 goto err_create_send_wq;
1124
1125 return 0;
1126
1127 qib7322_destroy_send_wq ( ibdev, qp );
1128 err_create_send_wq:
1129 qib7322_destroy_recv_wq ( ibdev, qp );
1130 err_create_recv_wq:
1131 qib7322_free_ctx ( ibdev, qp );
1132 err_alloc_ctx:
1133 return rc;
1134 }
1135
1136 /**
1137 * Modify queue pair
1138 *
1139 * @v ibdev Infiniband device
1140 * @v qp Queue pair
1141 * @ret rc Return status code
1142 */
1143 static int qib7322_modify_qp ( struct ib_device *ibdev,
1144 struct ib_queue_pair *qp ) {
1145 struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
1146
1147 /* Nothing to do; the hardware doesn't have a notion of queue
1148 * keys
1149 */
1150 DBGC2 ( qib7322, "QIB7322 %p QPN %ld modified\n", qib7322, qp->qpn );
1151 return 0;
1152 }
1153
1154 /**
1155 * Destroy queue pair
1156 *
1157 * @v ibdev Infiniband device
1158 * @v qp Queue pair
1159 */
1160 static void qib7322_destroy_qp ( struct ib_device *ibdev,
1161 struct ib_queue_pair *qp ) {
1162
1163 qib7322_destroy_send_wq ( ibdev, qp );
1164 qib7322_destroy_recv_wq ( ibdev, qp );
1165 qib7322_free_ctx ( ibdev, qp );
1166 }
1167
1168 /***************************************************************************
1169 *
1170 * Work request operations
1171 *
1172 ***************************************************************************
1173 */
1174
1175 /**
1176 * Post send work queue entry
1177 *
1178 * @v ibdev Infiniband device
1179 * @v qp Queue pair
1180 * @v dest Destination address vector
1181 * @v iobuf I/O buffer
1182 * @ret rc Return status code
1183 */
1184 static int qib7322_post_send ( struct ib_device *ibdev,
1185 struct ib_queue_pair *qp,
1186 struct ib_address_vector *dest,
1187 struct io_buffer *iobuf ) {
1188 struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
1189 struct ib_work_queue *wq = &qp->send;
1190 struct qib7322_send_work_queue *qib7322_wq = ib_wq_get_drvdata ( wq );
1191 struct QIB_7322_SendPbc sendpbc;
1192 unsigned int port = ( ibdev->port - QIB7322_PORT_BASE );
1193 uint8_t header_buf[IB_MAX_HEADER_SIZE];
1194 struct io_buffer headers;
1195 int send_buf;
1196 unsigned long start_offset;
1197 unsigned long offset;
1198 size_t len;
1199 ssize_t frag_len;
1200 uint32_t *data;
1201
1202 /* Allocate send buffer and calculate offset */
1203 send_buf = qib7322_alloc_send_buf ( qib7322, qib7322_wq->send_bufs );
1204 if ( send_buf < 0 )
1205 return send_buf;
1206 start_offset = offset =
1207 qib7322_send_buffer_offset ( qib7322, qib7322_wq->send_bufs,
1208 send_buf );
1209
1210 /* Store I/O buffer and send buffer index */
1211 assert ( wq->iobufs[qib7322_wq->prod] == NULL );
1212 wq->iobufs[qib7322_wq->prod] = iobuf;
1213 qib7322_wq->used[qib7322_wq->prod] = send_buf;
1214
1215 /* Construct headers */
1216 iob_populate ( &headers, header_buf, 0, sizeof ( header_buf ) );
1217 iob_reserve ( &headers, sizeof ( header_buf ) );
1218 ib_push ( ibdev, &headers, qp, iob_len ( iobuf ), dest );
1219
1220 /* Calculate packet length */
1221 len = ( ( sizeof ( sendpbc ) + iob_len ( &headers ) +
1222 iob_len ( iobuf ) + 3 ) & ~3 );
1223
1224 /* Construct send per-buffer control word */
1225 memset ( &sendpbc, 0, sizeof ( sendpbc ) );
1226 BIT_FILL_3 ( &sendpbc,
1227 LengthP1_toibc, ( ( len >> 2 ) - 1 ),
1228 Port, port,
1229 VL15, ( ( qp->type == IB_QPT_SMI ) ? 1 : 0 ) );
1230
1231 /* Write SendPbc */
1232 DBG_DISABLE ( DBGLVL_IO );
1233 qib7322_writeq ( qib7322, &sendpbc, offset );
1234 offset += sizeof ( sendpbc );
1235
1236 /* Write headers */
1237 for ( data = headers.data, frag_len = iob_len ( &headers ) ;
1238 frag_len > 0 ; data++, offset += 4, frag_len -= 4 ) {
1239 qib7322_writel ( qib7322, *data, offset );
1240 }
1241
1242 /* Write data */
1243 for ( data = iobuf->data, frag_len = iob_len ( iobuf ) ;
1244 frag_len > 0 ; data++, offset += 4, frag_len -= 4 ) {
1245 qib7322_writel ( qib7322, *data, offset );
1246 }
1247 DBG_ENABLE ( DBGLVL_IO );
1248
1249 assert ( ( start_offset + len ) == offset );
1250 DBGC2 ( qib7322, "QIB7322 %p QPN %ld TX %04x(%04x) posted [%lx,%lx)\n",
1251 qib7322, qp->qpn, send_buf, qib7322_wq->prod,
1252 start_offset, offset );
1253
1254 /* Increment producer counter */
1255 qib7322_wq->prod = ( ( qib7322_wq->prod + 1 ) & ( wq->num_wqes - 1 ) );
1256
1257 return 0;
1258 }
1259
1260 /**
1261 * Complete send work queue entry
1262 *
1263 * @v ibdev Infiniband device
1264 * @v qp Queue pair
1265 * @v wqe_idx Work queue entry index
1266 */
1267 static void qib7322_complete_send ( struct ib_device *ibdev,
1268 struct ib_queue_pair *qp,
1269 unsigned int wqe_idx ) {
1270 struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
1271 struct ib_work_queue *wq = &qp->send;
1272 struct qib7322_send_work_queue *qib7322_wq = ib_wq_get_drvdata ( wq );
1273 struct io_buffer *iobuf;
1274 unsigned int send_buf;
1275
1276 /* Parse completion */
1277 send_buf = qib7322_wq->used[wqe_idx];
1278 DBGC2 ( qib7322, "QIB7322 %p QPN %ld TX %04x(%04x) complete\n",
1279 qib7322, qp->qpn, send_buf, wqe_idx );
1280
1281 /* Complete work queue entry */
1282 iobuf = wq->iobufs[wqe_idx];
1283 assert ( iobuf != NULL );
1284 ib_complete_send ( ibdev, qp, iobuf, 0 );
1285 wq->iobufs[wqe_idx] = NULL;
1286
1287 /* Free send buffer */
1288 qib7322_free_send_buf ( qib7322, qib7322_wq->send_bufs, send_buf );
1289 }
1290
1291 /**
1292 * Poll send work queue
1293 *
1294 * @v ibdev Infiniband device
1295 * @v qp Queue pair
1296 */
1297 static void qib7322_poll_send_wq ( struct ib_device *ibdev,
1298 struct ib_queue_pair *qp ) {
1299 struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
1300 struct ib_work_queue *wq = &qp->send;
1301 struct qib7322_send_work_queue *qib7322_wq = ib_wq_get_drvdata ( wq );
1302 unsigned int send_buf;
1303
1304 /* Look for completions */
1305 while ( wq->fill ) {
1306
1307 /* Check to see if send buffer has completed */
1308 send_buf = qib7322_wq->used[qib7322_wq->cons];
1309 if ( qib7322_send_buf_in_use ( qib7322, send_buf ) )
1310 break;
1311
1312 /* Complete this buffer */
1313 qib7322_complete_send ( ibdev, qp, qib7322_wq->cons );
1314
1315 /* Increment consumer counter */
1316 qib7322_wq->cons = ( ( qib7322_wq->cons + 1 ) &
1317 ( wq->num_wqes - 1 ) );
1318 }
1319 }
1320
1321 /**
1322 * Post receive work queue entry
1323 *
1324 * @v ibdev Infiniband device
1325 * @v qp Queue pair
1326 * @v iobuf I/O buffer
1327 * @ret rc Return status code
1328 */
1329 static int qib7322_post_recv ( struct ib_device *ibdev,
1330 struct ib_queue_pair *qp,
1331 struct io_buffer *iobuf ) {
1332 struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
1333 struct ib_work_queue *wq = &qp->recv;
1334 struct qib7322_recv_work_queue *qib7322_wq = ib_wq_get_drvdata ( wq );
1335 struct QIB_7322_RcvEgr rcvegr;
1336 struct QIB_7322_scalar rcvegrindexhead;
1337 unsigned int ctx = qib7322_ctx ( ibdev, qp );
1338 physaddr_t addr;
1339 size_t len;
1340 unsigned int wqe_idx;
1341 unsigned int bufsize;
1342
1343 /* Sanity checks */
1344 addr = virt_to_bus ( iobuf->data );
1345 len = iob_tailroom ( iobuf );
1346 if ( addr & ( QIB7322_EAGER_BUFFER_ALIGN - 1 ) ) {
1347 DBGC ( qib7322, "QIB7322 %p QPN %ld misaligned RX buffer "
1348 "(%08lx)\n", qib7322, qp->qpn, addr );
1349 return -EINVAL;
1350 }
1351 if ( len != QIB7322_RECV_PAYLOAD_SIZE ) {
1352 DBGC ( qib7322, "QIB7322 %p QPN %ld wrong RX buffer size "
1353 "(%zd)\n", qib7322, qp->qpn, len );
1354 return -EINVAL;
1355 }
1356
1357 /* Calculate eager producer index and WQE index */
1358 wqe_idx = ( qib7322_wq->eager_prod & ( wq->num_wqes - 1 ) );
1359 assert ( wq->iobufs[wqe_idx] == NULL );
1360
1361 /* Store I/O buffer */
1362 wq->iobufs[wqe_idx] = iobuf;
1363
1364 /* Calculate buffer size */
1365 switch ( QIB7322_RECV_PAYLOAD_SIZE ) {
1366 case 2048: bufsize = QIB7322_EAGER_BUFFER_2K; break;
1367 case 4096: bufsize = QIB7322_EAGER_BUFFER_4K; break;
1368 case 8192: bufsize = QIB7322_EAGER_BUFFER_8K; break;
1369 case 16384: bufsize = QIB7322_EAGER_BUFFER_16K; break;
1370 case 32768: bufsize = QIB7322_EAGER_BUFFER_32K; break;
1371 case 65536: bufsize = QIB7322_EAGER_BUFFER_64K; break;
1372 default: linker_assert ( 0, invalid_rx_payload_size );
1373 bufsize = QIB7322_EAGER_BUFFER_NONE;
1374 }
1375
1376 /* Post eager buffer */
1377 memset ( &rcvegr, 0, sizeof ( rcvegr ) );
1378 BIT_FILL_2 ( &rcvegr,
1379 Addr, ( addr >> 11 ),
1380 BufSize, bufsize );
1381 qib7322_writeq_array8b ( qib7322, &rcvegr, qib7322_wq->eager_array,
1382 qib7322_wq->eager_prod );
1383 DBGC2 ( qib7322, "QIB7322 %p QPN %ld RX egr %04x(%04x) posted "
1384 "[%lx,%lx)\n", qib7322, qp->qpn, qib7322_wq->eager_prod,
1385 wqe_idx, addr, ( addr + len ) );
1386
1387 /* Increment producer index */
1388 qib7322_wq->eager_prod = ( ( qib7322_wq->eager_prod + 1 ) &
1389 ( qib7322_wq->eager_entries - 1 ) );
1390
1391 /* Update head index */
1392 memset ( &rcvegrindexhead, 0, sizeof ( rcvegrindexhead ) );
1393 BIT_FILL_1 ( &rcvegrindexhead,
1394 Value, ( ( qib7322_wq->eager_prod + 1 ) &
1395 ( qib7322_wq->eager_entries - 1 ) ) );
1396 qib7322_writeq_array64k ( qib7322, &rcvegrindexhead,
1397 QIB_7322_RcvEgrIndexHead0_offset, ctx );
1398
1399 return 0;
1400 }
1401
1402 /**
1403 * Complete receive work queue entry
1404 *
1405 * @v ibdev Infiniband device
1406 * @v qp Queue pair
1407 * @v header_offs Header offset
1408 */
1409 static void qib7322_complete_recv ( struct ib_device *ibdev,
1410 struct ib_queue_pair *qp,
1411 unsigned int header_offs ) {
1412 struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
1413 struct ib_work_queue *wq = &qp->recv;
1414 struct qib7322_recv_work_queue *qib7322_wq = ib_wq_get_drvdata ( wq );
1415 struct QIB_7322_RcvHdrFlags *rcvhdrflags;
1416 struct QIB_7322_RcvEgr rcvegr;
1417 struct io_buffer headers;
1418 struct io_buffer *iobuf;
1419 struct ib_queue_pair *intended_qp;
1420 struct ib_address_vector dest;
1421 struct ib_address_vector source;
1422 unsigned int rcvtype;
1423 unsigned int pktlen;
1424 unsigned int egrindex;
1425 unsigned int useegrbfr;
1426 unsigned int iberr, mkerr, tiderr, khdrerr, mtuerr;
1427 unsigned int lenerr, parityerr, vcrcerr, icrcerr;
1428 unsigned int err;
1429 unsigned int hdrqoffset;
1430 unsigned int header_len;
1431 unsigned int padded_payload_len;
1432 unsigned int wqe_idx;
1433 size_t payload_len;
1434 int qp0;
1435 int rc;
1436
1437 /* RcvHdrFlags are at the end of the header entry */
1438 rcvhdrflags = ( qib7322_wq->header + header_offs +
1439 QIB7322_RECV_HEADER_SIZE - sizeof ( *rcvhdrflags ) );
1440 rcvtype = BIT_GET ( rcvhdrflags, RcvType );
1441 pktlen = ( BIT_GET ( rcvhdrflags, PktLen ) << 2 );
1442 egrindex = BIT_GET ( rcvhdrflags, EgrIndex );
1443 useegrbfr = BIT_GET ( rcvhdrflags, UseEgrBfr );
1444 hdrqoffset = ( BIT_GET ( rcvhdrflags, HdrqOffset ) << 2 );
1445 iberr = BIT_GET ( rcvhdrflags, IBErr );
1446 mkerr = BIT_GET ( rcvhdrflags, MKErr );
1447 tiderr = BIT_GET ( rcvhdrflags, TIDErr );
1448 khdrerr = BIT_GET ( rcvhdrflags, KHdrErr );
1449 mtuerr = BIT_GET ( rcvhdrflags, MTUErr );
1450 lenerr = BIT_GET ( rcvhdrflags, LenErr );
1451 parityerr = BIT_GET ( rcvhdrflags, ParityErr );
1452 vcrcerr = BIT_GET ( rcvhdrflags, VCRCErr );
1453 icrcerr = BIT_GET ( rcvhdrflags, ICRCErr );
1454 header_len = ( QIB7322_RECV_HEADER_SIZE - hdrqoffset -
1455 sizeof ( *rcvhdrflags ) );
1456 padded_payload_len = ( pktlen - header_len - 4 /* ICRC */ );
1457 err = ( iberr | mkerr | tiderr | khdrerr | mtuerr |
1458 lenerr | parityerr | vcrcerr | icrcerr );
1459 /* IB header is placed immediately before RcvHdrFlags */
1460 iob_populate ( &headers, ( ( ( void * ) rcvhdrflags ) - header_len ),
1461 header_len, header_len );
1462
1463 /* Dump diagnostic information */
1464 DBGC2 ( qib7322, "QIB7322 %p QPN %ld RX egr %04x%s hdr %d type %d len "
1465 "%d(%d+%d+4)%s%s%s%s%s%s%s%s%s%s%s\n", qib7322, qp->qpn,
1466 egrindex, ( useegrbfr ? "" : "(unused)" ),
1467 ( header_offs / QIB7322_RECV_HEADER_SIZE ),
1468 rcvtype, pktlen, header_len, padded_payload_len,
1469 ( err ? " [Err" : "" ), ( iberr ? " IB" : "" ),
1470 ( mkerr ? " MK" : "" ), ( tiderr ? " TID" : "" ),
1471 ( khdrerr ? " KHdr" : "" ), ( mtuerr ? " MTU" : "" ),
1472 ( lenerr ? " Len" : "" ), ( parityerr ? " Parity" : ""),
1473 ( vcrcerr ? " VCRC" : "" ), ( icrcerr ? " ICRC" : "" ),
1474 ( err ? "]" : "" ) );
1475 DBGCP_HDA ( qib7322, hdrqoffset, headers.data,
1476 ( header_len + sizeof ( *rcvhdrflags ) ) );
1477
1478 /* Parse header to generate address vector */
1479 qp0 = ( qp->qpn == 0 );
1480 intended_qp = NULL;
1481 if ( ( rc = ib_pull ( ibdev, &headers, ( qp0 ? &intended_qp : NULL ),
1482 &payload_len, &dest, &source ) ) != 0 ) {
1483 DBGC ( qib7322, "QIB7322 %p could not parse headers: %s\n",
1484 qib7322, strerror ( rc ) );
1485 err = 1;
1486 }
1487 if ( ! intended_qp )
1488 intended_qp = qp;
1489
1490 /* Complete this buffer and any skipped buffers. Note that
1491 * when the hardware runs out of buffers, it will repeatedly
1492 * report the same buffer (the tail) as a TID error, and that
1493 * it also has a habit of sometimes skipping over several
1494 * buffers at once.
1495 */
1496 while ( 1 ) {
1497
1498 /* If we have caught up to the producer counter, stop.
1499 * This will happen when the hardware first runs out
1500 * of buffers and starts reporting TID errors against
1501 * the eager buffer it wants to use next.
1502 */
1503 if ( qib7322_wq->eager_cons == qib7322_wq->eager_prod )
1504 break;
1505
1506 /* If we have caught up to where we should be after
1507 * completing this egrindex, stop. We phrase the test
1508 * this way to avoid completing the entire ring when
1509 * we receive the same egrindex twice in a row.
1510 */
1511 if ( ( qib7322_wq->eager_cons ==
1512 ( ( egrindex + 1 ) & ( qib7322_wq->eager_entries - 1 ))))
1513 break;
1514
1515 /* Identify work queue entry and corresponding I/O
1516 * buffer.
1517 */
1518 wqe_idx = ( qib7322_wq->eager_cons & ( wq->num_wqes - 1 ) );
1519 iobuf = wq->iobufs[wqe_idx];
1520 assert ( iobuf != NULL );
1521 wq->iobufs[wqe_idx] = NULL;
1522
1523 /* Complete the eager buffer */
1524 if ( qib7322_wq->eager_cons == egrindex ) {
1525 /* Completing the eager buffer described in
1526 * this header entry.
1527 */
1528 iob_put ( iobuf, payload_len );
1529 rc = ( err ? -EIO : ( useegrbfr ? 0 : -ECANCELED ) );
1530 /* Redirect to target QP if necessary */
1531 if ( qp != intended_qp ) {
1532 DBGC2 ( qib7322, "QIB7322 %p redirecting QPN "
1533 "%ld => %ld\n",
1534 qib7322, qp->qpn, intended_qp->qpn );
1535 /* Compensate for incorrect fill levels */
1536 qp->recv.fill--;
1537 intended_qp->recv.fill++;
1538 }
1539 ib_complete_recv ( ibdev, intended_qp, &dest, &source,
1540 iobuf, rc);
1541 } else {
1542 /* Completing on a skipped-over eager buffer */
1543 ib_complete_recv ( ibdev, qp, &dest, &source, iobuf,
1544 -ECANCELED );
1545 }
1546
1547 /* Clear eager buffer */
1548 memset ( &rcvegr, 0, sizeof ( rcvegr ) );
1549 qib7322_writeq_array8b ( qib7322, &rcvegr,
1550 qib7322_wq->eager_array,
1551 qib7322_wq->eager_cons );
1552
1553 /* Increment consumer index */
1554 qib7322_wq->eager_cons = ( ( qib7322_wq->eager_cons + 1 ) &
1555 ( qib7322_wq->eager_entries - 1 ) );
1556 }
1557 }
1558
1559 /**
1560 * Poll receive work queue
1561 *
1562 * @v ibdev Infiniband device
1563 * @v qp Queue pair
1564 */
1565 static void qib7322_poll_recv_wq ( struct ib_device *ibdev,
1566 struct ib_queue_pair *qp ) {
1567 struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
1568 struct ib_work_queue *wq = &qp->recv;
1569 struct qib7322_recv_work_queue *qib7322_wq = ib_wq_get_drvdata ( wq );
1570 struct QIB_7322_RcvHdrHead0 rcvhdrhead;
1571 unsigned int ctx = qib7322_ctx ( ibdev, qp );
1572 unsigned int header_prod;
1573
1574 /* Check for received packets */
1575 header_prod = ( BIT_GET ( &qib7322_wq->header_prod, Value ) << 2 );
1576 if ( header_prod == qib7322_wq->header_cons )
1577 return;
1578
1579 /* Process all received packets */
1580 while ( qib7322_wq->header_cons != header_prod ) {
1581
1582 /* Complete the receive */
1583 qib7322_complete_recv ( ibdev, qp, qib7322_wq->header_cons );
1584
1585 /* Increment the consumer offset */
1586 qib7322_wq->header_cons += QIB7322_RECV_HEADER_SIZE;
1587 qib7322_wq->header_cons %= QIB7322_RECV_HEADERS_SIZE;
1588
1589 /* QIB7322 has only one send buffer per port for VL15,
1590 * which almost always leads to send buffer exhaustion
1591 * and dropped MADs. Mitigate this by refusing to
1592 * process more than one VL15 MAD per poll, which will
1593 * enforce interleaved TX/RX polls.
1594 */
1595 if ( qp->type == IB_QPT_SMI )
1596 break;
1597 }
1598
1599 /* Update consumer offset */
1600 memset ( &rcvhdrhead, 0, sizeof ( rcvhdrhead ) );
1601 BIT_FILL_2 ( &rcvhdrhead,
1602 RcvHeadPointer, ( qib7322_wq->header_cons >> 2 ),
1603 counter, 1 );
1604 qib7322_writeq_array64k ( qib7322, &rcvhdrhead,
1605 QIB_7322_RcvHdrHead0_offset, ctx );
1606 }
1607
1608 /**
1609 * Poll completion queue
1610 *
1611 * @v ibdev Infiniband device
1612 * @v cq Completion queue
1613 */
1614 static void qib7322_poll_cq ( struct ib_device *ibdev,
1615 struct ib_completion_queue *cq ) {
1616 struct ib_work_queue *wq;
1617
1618 /* Poll associated send and receive queues */
1619 list_for_each_entry ( wq, &cq->work_queues, list ) {
1620 if ( wq->is_send ) {
1621 qib7322_poll_send_wq ( ibdev, wq->qp );
1622 } else {
1623 qib7322_poll_recv_wq ( ibdev, wq->qp );
1624 }
1625 }
1626 }
1627
1628 /***************************************************************************
1629 *
1630 * Event queues
1631 *
1632 ***************************************************************************
1633 */
1634
1635 /**
1636 * Poll event queue
1637 *
1638 * @v ibdev Infiniband device
1639 */
1640 static void qib7322_poll_eq ( struct ib_device *ibdev ) {
1641 struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
1642 struct QIB_7322_ErrStatus_0 errstatus;
1643 unsigned int port = ( ibdev->port - QIB7322_PORT_BASE );
1644
1645 /* Check for and clear status bits */
1646 DBG_DISABLE ( DBGLVL_IO );
1647 qib7322_readq_port ( qib7322, &errstatus,
1648 QIB_7322_ErrStatus_0_offset, port );
1649 if ( errstatus.u.qwords[0] ) {
1650 DBGC ( qib7322, "QIB7322 %p port %d status %08x%08x\n", qib7322,
1651 port, errstatus.u.dwords[1], errstatus.u.dwords[0] );
1652 qib7322_writeq_port ( qib7322, &errstatus,
1653 QIB_7322_ErrClear_0_offset, port );
1654 }
1655 DBG_ENABLE ( DBGLVL_IO );
1656
1657 /* Check for link status changes */
1658 if ( BIT_GET ( &errstatus, IBStatusChanged ) )
1659 qib7322_link_state_changed ( ibdev );
1660 }
1661
1662 /***************************************************************************
1663 *
1664 * Infiniband link-layer operations
1665 *
1666 ***************************************************************************
1667 */
1668
1669 /**
1670 * Determine supported link speeds
1671 *
1672 * @v qib7322 QIB7322 device
1673 * @ret supported Supported link speeds
1674 */
1675 static unsigned int qib7322_link_speed_supported ( struct qib7322 *qib7322,
1676 unsigned int port ) {
1677 struct QIB_7322_feature_mask features;
1678 struct QIB_7322_Revision revision;
1679 unsigned int supported;
1680 unsigned int boardid;
1681
1682 /* Read the active feature mask */
1683 qib7322_readq ( qib7322, &features,
1684 QIB_7322_active_feature_mask_offset );
1685 switch ( port ) {
1686 case 0 :
1687 supported = BIT_GET ( &features, Port0_Link_Speed_Supported );
1688 break;
1689 case 1 :
1690 supported = BIT_GET ( &features, Port1_Link_Speed_Supported );
1691 break;
1692 default:
1693 DBGC ( qib7322, "QIB7322 %p port %d is invalid\n",
1694 qib7322, port );
1695 supported = 0;
1696 break;
1697 }
1698
1699 /* Apply hacks for specific board IDs */
1700 qib7322_readq ( qib7322, &revision, QIB_7322_Revision_offset );
1701 boardid = BIT_GET ( &revision, BoardID );
1702 switch ( boardid ) {
1703 case QIB7322_BOARD_QMH7342 :
1704 DBGC2 ( qib7322, "QIB7322 %p is a QMH7342; forcing QDR-only\n",
1705 qib7322 );
1706 supported = IB_LINK_SPEED_QDR;
1707 break;
1708 default:
1709 /* Do nothing */
1710 break;
1711 }
1712
1713 DBGC2 ( qib7322, "QIB7322 %p port %d %s%s%s%s\n", qib7322, port,
1714 ( supported ? "supports" : "disabled" ),
1715 ( ( supported & IB_LINK_SPEED_SDR ) ? " SDR" : "" ),
1716 ( ( supported & IB_LINK_SPEED_DDR ) ? " DDR" : "" ),
1717 ( ( supported & IB_LINK_SPEED_QDR ) ? " QDR" : "" ) );
1718 return supported;
1719 }
1720
1721 /**
1722 * Initialise Infiniband link
1723 *
1724 * @v ibdev Infiniband device
1725 * @ret rc Return status code
1726 */
1727 static int qib7322_open ( struct ib_device *ibdev ) {
1728 struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
1729 struct QIB_7322_IBCCtrlA_0 ibcctrla;
1730 unsigned int port = ( ibdev->port - QIB7322_PORT_BASE );
1731
1732 /* Enable link */
1733 qib7322_readq_port ( qib7322, &ibcctrla,
1734 QIB_7322_IBCCtrlA_0_offset, port );
1735 BIT_SET ( &ibcctrla, IBLinkEn, 1 );
1736 qib7322_writeq_port ( qib7322, &ibcctrla,
1737 QIB_7322_IBCCtrlA_0_offset, port );
1738
1739 return 0;
1740 }
1741
1742 /**
1743 * Close Infiniband link
1744 *
1745 * @v ibdev Infiniband device
1746 */
1747 static void qib7322_close ( struct ib_device *ibdev ) {
1748 struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
1749 struct QIB_7322_IBCCtrlA_0 ibcctrla;
1750 unsigned int port = ( ibdev->port - QIB7322_PORT_BASE );
1751
1752 /* Disable link */
1753 qib7322_readq_port ( qib7322, &ibcctrla,
1754 QIB_7322_IBCCtrlA_0_offset, port );
1755 BIT_SET ( &ibcctrla, IBLinkEn, 0 );
1756 qib7322_writeq_port ( qib7322, &ibcctrla,
1757 QIB_7322_IBCCtrlA_0_offset, port );
1758 }
1759
1760 /***************************************************************************
1761 *
1762 * Multicast group operations
1763 *
1764 ***************************************************************************
1765 */
1766
1767 /**
1768 * Attach to multicast group
1769 *
1770 * @v ibdev Infiniband device
1771 * @v qp Queue pair
1772 * @v gid Multicast GID
1773 * @ret rc Return status code
1774 */
1775 static int qib7322_mcast_attach ( struct ib_device *ibdev,
1776 struct ib_queue_pair *qp,
1777 union ib_gid *gid ) {
1778 struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
1779
1780 ( void ) qib7322;
1781 ( void ) qp;
1782 ( void ) gid;
1783 return 0;
1784 }
1785
1786 /**
1787 * Detach from multicast group
1788 *
1789 * @v ibdev Infiniband device
1790 * @v qp Queue pair
1791 * @v gid Multicast GID
1792 */
1793 static void qib7322_mcast_detach ( struct ib_device *ibdev,
1794 struct ib_queue_pair *qp,
1795 union ib_gid *gid ) {
1796 struct qib7322 *qib7322 = ib_get_drvdata ( ibdev );
1797
1798 ( void ) qib7322;
1799 ( void ) qp;
1800 ( void ) gid;
1801 }
1802
1803 /** QIB7322 Infiniband operations */
1804 static struct ib_device_operations qib7322_ib_operations = {
1805 .create_cq = qib7322_create_cq,
1806 .destroy_cq = qib7322_destroy_cq,
1807 .create_qp = qib7322_create_qp,
1808 .modify_qp = qib7322_modify_qp,
1809 .destroy_qp = qib7322_destroy_qp,
1810 .post_send = qib7322_post_send,
1811 .post_recv = qib7322_post_recv,
1812 .poll_cq = qib7322_poll_cq,
1813 .poll_eq = qib7322_poll_eq,
1814 .open = qib7322_open,
1815 .close = qib7322_close,
1816 .mcast_attach = qib7322_mcast_attach,
1817 .mcast_detach = qib7322_mcast_detach,
1818 .set_port_info = qib7322_set_port_info,
1819 .set_pkey_table = qib7322_set_pkey_table,
1820 };
1821
1822 /***************************************************************************
1823 *
1824 * I2C bus operations
1825 *
1826 ***************************************************************************
1827 */
1828
1829 /** QIB7322 I2C bit to GPIO mappings */
1830 static unsigned int qib7322_i2c_bits[] = {
1831 [I2C_BIT_SCL] = ( 1 << QIB7322_GPIO_SCL ),
1832 [I2C_BIT_SDA] = ( 1 << QIB7322_GPIO_SDA ),
1833 };
1834
1835 /**
1836 * Read QIB7322 I2C line status
1837 *
1838 * @v basher Bit-bashing interface
1839 * @v bit_id Bit number
1840 * @ret zero Input is a logic 0
1841 * @ret non-zero Input is a logic 1
1842 */
1843 static int qib7322_i2c_read_bit ( struct bit_basher *basher,
1844 unsigned int bit_id ) {
1845 struct qib7322 *qib7322 =
1846 container_of ( basher, struct qib7322, i2c.basher );
1847 struct QIB_7322_EXTStatus extstatus;
1848 unsigned int status;
1849
1850 DBG_DISABLE ( DBGLVL_IO );
1851
1852 qib7322_readq ( qib7322, &extstatus, QIB_7322_EXTStatus_offset );
1853 status = ( BIT_GET ( &extstatus, GPIOIn ) & qib7322_i2c_bits[bit_id] );
1854
1855 DBG_ENABLE ( DBGLVL_IO );
1856
1857 return status;
1858 }
1859
1860 /**
1861 * Write QIB7322 I2C line status
1862 *
1863 * @v basher Bit-bashing interface
1864 * @v bit_id Bit number
1865 * @v data Value to write
1866 */
1867 static void qib7322_i2c_write_bit ( struct bit_basher *basher,
1868 unsigned int bit_id, unsigned long data ) {
1869 struct qib7322 *qib7322 =
1870 container_of ( basher, struct qib7322, i2c.basher );
1871 struct QIB_7322_EXTCtrl extctrl;
1872 struct QIB_7322_GPIO gpioout;
1873 unsigned int bit = qib7322_i2c_bits[bit_id];
1874 unsigned int outputs = 0;
1875 unsigned int output_enables = 0;
1876
1877 DBG_DISABLE ( DBGLVL_IO );
1878
1879 /* Read current GPIO mask and outputs */
1880 qib7322_readq ( qib7322, &extctrl, QIB_7322_EXTCtrl_offset );
1881 qib7322_readq ( qib7322, &gpioout, QIB_7322_GPIOOut_offset );
1882
1883 /* Update outputs and output enables. I2C lines are tied
1884 * high, so we always set the output to 0 and use the output
1885 * enable to control the line.
1886 */
1887 output_enables = BIT_GET ( &extctrl, GPIOOe );
1888 output_enables = ( ( output_enables & ~bit ) | ( ~data & bit ) );
1889 outputs = BIT_GET ( &gpioout, GPIO );
1890 outputs = ( outputs & ~bit );
1891 BIT_SET ( &extctrl, GPIOOe, output_enables );
1892 BIT_SET ( &gpioout, GPIO, outputs );
1893
1894 /* Write the output enable first; that way we avoid logic
1895 * hazards.
1896 */
1897 qib7322_writeq ( qib7322, &extctrl, QIB_7322_EXTCtrl_offset );
1898 qib7322_writeq ( qib7322, &gpioout, QIB_7322_GPIOOut_offset );
1899 mb();
1900
1901 DBG_ENABLE ( DBGLVL_IO );
1902 }
1903
1904 /** QIB7322 I2C bit-bashing interface operations */
1905 static struct bit_basher_operations qib7322_i2c_basher_ops = {
1906 .read = qib7322_i2c_read_bit,
1907 .write = qib7322_i2c_write_bit,
1908 };
1909
1910 /**
1911 * Initialise QIB7322 I2C subsystem
1912 *
1913 * @v qib7322 QIB7322 device
1914 * @ret rc Return status code
1915 */
1916 static int qib7322_init_i2c ( struct qib7322 *qib7322 ) {
1917 static int try_eeprom_address[] = { 0x51, 0x50 };
1918 unsigned int i;
1919 int rc;
1920
1921 /* Initialise bus */
1922 if ( ( rc = init_i2c_bit_basher ( &qib7322->i2c,
1923 &qib7322_i2c_basher_ops ) ) != 0 ) {
1924 DBGC ( qib7322, "QIB7322 %p could not initialise I2C bus: %s\n",
1925 qib7322, strerror ( rc ) );
1926 return rc;
1927 }
1928
1929 /* Probe for devices */
1930 for ( i = 0 ; i < ( sizeof ( try_eeprom_address ) /
1931 sizeof ( try_eeprom_address[0] ) ) ; i++ ) {
1932 init_i2c_eeprom ( &qib7322->eeprom, try_eeprom_address[i] );
1933 if ( ( rc = i2c_check_presence ( &qib7322->i2c.i2c,
1934 &qib7322->eeprom ) ) == 0 ) {
1935 DBGC2 ( qib7322, "QIB7322 %p found EEPROM at %02x\n",
1936 qib7322, try_eeprom_address[i] );
1937 return 0;
1938 }
1939 }
1940
1941 DBGC ( qib7322, "QIB7322 %p could not find EEPROM\n", qib7322 );
1942 return -ENODEV;
1943 }
1944
1945 /**
1946 * Read EEPROM parameters
1947 *
1948 * @v qib7322 QIB7322 device
1949 * @ret rc Return status code
1950 */
1951 static int qib7322_read_eeprom ( struct qib7322 *qib7322 ) {
1952 struct i2c_interface *i2c = &qib7322->i2c.i2c;
1953 union ib_guid *guid = &qib7322->guid;
1954 int rc;
1955
1956 /* Read GUID */
1957 if ( ( rc = i2c->read ( i2c, &qib7322->eeprom,
1958 QIB7322_EEPROM_GUID_OFFSET, guid->bytes,
1959 sizeof ( *guid ) ) ) != 0 ) {
1960 DBGC ( qib7322, "QIB7322 %p could not read GUID: %s\n",
1961 qib7322, strerror ( rc ) );
1962 return rc;
1963 }
1964 DBGC2 ( qib7322, "QIB7322 %p has GUID " IB_GUID_FMT "\n",
1965 qib7322, IB_GUID_ARGS ( guid ) );
1966
1967 /* Read serial number (debug only) */
1968 if ( DBG_LOG ) {
1969 uint8_t serial[QIB7322_EEPROM_SERIAL_SIZE + 1];
1970
1971 serial[ sizeof ( serial ) - 1 ] = '\0';
1972 if ( ( rc = i2c->read ( i2c, &qib7322->eeprom,
1973 QIB7322_EEPROM_SERIAL_OFFSET, serial,
1974 ( sizeof ( serial ) - 1 ) ) ) != 0 ) {
1975 DBGC ( qib7322, "QIB7322 %p could not read serial: "
1976 "%s\n", qib7322, strerror ( rc ) );
1977 return rc;
1978 }
1979 DBGC2 ( qib7322, "QIB7322 %p has serial number \"%s\"\n",
1980 qib7322, serial );
1981 }
1982
1983 return 0;
1984 }
1985
1986 /***************************************************************************
1987 *
1988 * Advanced High-performance Bus (AHB) access
1989 *
1990 ***************************************************************************
1991 */
1992
1993 /**
1994 * Wait for AHB transaction to complete
1995 *
1996 * @v qib7322 QIB7322 device
1997 * @ret rc Return status code
1998 */
1999 static int qib7322_ahb_wait ( struct qib7322 *qib7322 ) {
2000 struct QIB_7322_ahb_transaction_reg transaction;
2001 unsigned int i;
2002
2003 /* Wait for Ready bit to be asserted */
2004 for ( i = 0 ; i < QIB7322_AHB_MAX_WAIT_US ; i++ ) {
2005 qib7322_readq ( qib7322, &transaction,
2006 QIB_7322_ahb_transaction_reg_offset );
2007 if ( BIT_GET ( &transaction, ahb_rdy ) )
2008 return 0;
2009 udelay ( 1 );
2010 }
2011
2012 DBGC ( qib7322, "QIB7322 %p timed out waiting for AHB transaction\n",
2013 qib7322 );
2014 return -ETIMEDOUT;
2015 }
2016
2017 /**
2018 * Request ownership of the AHB
2019 *
2020 * @v qib7322 QIB7322 device
2021 * @v location AHB location
2022 * @ret rc Return status code
2023 */
2024 static int qib7322_ahb_request ( struct qib7322 *qib7322,
2025 unsigned int location ) {
2026 struct QIB_7322_ahb_access_ctrl access;
2027 int rc;
2028
2029 /* Request ownership */
2030 memset ( &access, 0, sizeof ( access ) );
2031 BIT_FILL_2 ( &access,
2032 sw_ahb_sel, 1,
2033 sw_sel_ahb_trgt, QIB7322_AHB_LOC_TARGET ( location ) );
2034 qib7322_writeq ( qib7322, &access, QIB_7322_ahb_access_ctrl_offset );
2035
2036 /* Wait for ownership to be granted */
2037 if ( ( rc = qib7322_ahb_wait ( qib7322 ) ) != 0 ) {
2038 DBGC ( qib7322, "QIB7322 %p could not obtain AHB ownership: "
2039 "%s\n", qib7322, strerror ( rc ) );
2040 return rc;
2041 }
2042
2043 return 0;
2044 }
2045
2046 /**
2047 * Release ownership of the AHB
2048 *
2049 * @v qib7322 QIB7322 device
2050 */
2051 static void qib7322_ahb_release ( struct qib7322 *qib7322 ) {
2052 struct QIB_7322_ahb_access_ctrl access;
2053
2054 memset ( &access, 0, sizeof ( access ) );
2055 qib7322_writeq ( qib7322, &access, QIB_7322_ahb_access_ctrl_offset );
2056 }
2057
2058 /**
2059 * Read data via AHB
2060 *
2061 * @v qib7322 QIB7322 device
2062 * @v location AHB location
2063 * @v data Data to read
2064 * @ret rc Return status code
2065 *
2066 * You must have already acquired ownership of the AHB.
2067 */
2068 static int qib7322_ahb_read ( struct qib7322 *qib7322, unsigned int location,
2069 uint32_t *data ) {
2070 struct QIB_7322_ahb_transaction_reg xact;
2071 int rc;
2072
2073 /* Avoid returning uninitialised data on error */
2074 *data = 0;
2075
2076 /* Initiate transaction */
2077 memset ( &xact, 0, sizeof ( xact ) );
2078 BIT_FILL_2 ( &xact,
2079 ahb_address, QIB7322_AHB_LOC_ADDRESS ( location ),
2080 write_not_read, 0 );
2081 qib7322_writeq ( qib7322, &xact, QIB_7322_ahb_transaction_reg_offset );
2082
2083 /* Wait for transaction to complete */
2084 if ( ( rc = qib7322_ahb_wait ( qib7322 ) ) != 0 )
2085 return rc;
2086
2087 /* Read transaction data */
2088 qib7322_readq ( qib7322, &xact, QIB_7322_ahb_transaction_reg_offset );
2089 *data = BIT_GET ( &xact, ahb_data );
2090 return 0;
2091 }
2092
2093 /**
2094 * Write data via AHB
2095 *
2096 * @v qib7322 QIB7322 device
2097 * @v location AHB location
2098 * @v data Data to write
2099 * @ret rc Return status code
2100 *
2101 * You must have already acquired ownership of the AHB.
2102 */
2103 static int qib7322_ahb_write ( struct qib7322 *qib7322, unsigned int location,
2104 uint32_t data ) {
2105 struct QIB_7322_ahb_transaction_reg xact;
2106 int rc;
2107
2108 /* Initiate transaction */
2109 memset ( &xact, 0, sizeof ( xact ) );
2110 BIT_FILL_3 ( &xact,
2111 ahb_address, QIB7322_AHB_LOC_ADDRESS ( location ),
2112 write_not_read, 1,
2113 ahb_data, data );
2114 qib7322_writeq ( qib7322, &xact, QIB_7322_ahb_transaction_reg_offset );
2115
2116 /* Wait for transaction to complete */
2117 if ( ( rc = qib7322_ahb_wait ( qib7322 ) ) != 0 )
2118 return rc;
2119
2120 return 0;
2121 }
2122
2123 /**
2124 * Read/modify/write AHB register
2125 *
2126 * @v qib7322 QIB7322 device
2127 * @v location AHB location
2128 * @v value Value to set
2129 * @v mask Mask to apply to old value
2130 * @ret rc Return status code
2131 */
2132 static int qib7322_ahb_mod_reg ( struct qib7322 *qib7322, unsigned int location,
2133 uint32_t value, uint32_t mask ) {
2134 uint32_t old_value;
2135 uint32_t new_value;
2136 int rc;
2137
2138 DBG_DISABLE ( DBGLVL_IO );
2139
2140 /* Sanity check */
2141 assert ( ( value & mask ) == value );
2142
2143 /* Acquire bus ownership */
2144 if ( ( rc = qib7322_ahb_request ( qib7322, location ) ) != 0 )
2145 goto out;
2146
2147 /* Read existing value */
2148 if ( ( rc = qib7322_ahb_read ( qib7322, location, &old_value ) ) != 0 )
2149 goto out_release;
2150
2151 /* Update value */
2152 new_value = ( ( old_value & ~mask ) | value );
2153 DBGCP ( qib7322, "QIB7322 %p AHB %x %#08x => %#08x\n",
2154 qib7322, location, old_value, new_value );
2155 if ( ( rc = qib7322_ahb_write ( qib7322, location, new_value ) ) != 0 )
2156 goto out_release;
2157
2158 out_release:
2159 /* Release bus */
2160 qib7322_ahb_release ( qib7322 );
2161 out:
2162 DBG_ENABLE ( DBGLVL_IO );
2163 return rc;
2164 }
2165
2166 /**
2167 * Read/modify/write AHB register across all ports and channels
2168 *
2169 * @v qib7322 QIB7322 device
2170 * @v reg AHB register
2171 * @v value Value to set
2172 * @v mask Mask to apply to old value
2173 * @ret rc Return status code
2174 */
2175 static int qib7322_ahb_mod_reg_all ( struct qib7322 *qib7322, unsigned int reg,
2176 uint32_t value, uint32_t mask ) {
2177 unsigned int port;
2178 unsigned int channel;
2179 unsigned int location;
2180 int rc;
2181
2182 for ( port = 0 ; port < QIB7322_MAX_PORTS ; port++ ) {
2183 for ( channel = 0 ; channel < QIB7322_MAX_WIDTH ; channel++ ) {
2184 location = QIB7322_AHB_LOCATION ( port, channel, reg );
2185 if ( ( rc = qib7322_ahb_mod_reg ( qib7322, location,
2186 value, mask ) ) != 0 )
2187 return rc;
2188 }
2189 }
2190 return 0;
2191 }
2192
2193 /***************************************************************************
2194 *
2195 * Infiniband SerDes initialisation
2196 *
2197 ***************************************************************************
2198 */
2199
2200 /**
2201 * Initialise the IB SerDes
2202 *
2203 * @v qib7322 QIB7322 device
2204 * @ret rc Return status code
2205 */
2206 static int qib7322_init_ib_serdes ( struct qib7322 *qib7322 ) {
2207 struct QIB_7322_IBCCtrlA_0 ibcctrla;
2208 struct QIB_7322_IBCCtrlB_0 ibcctrlb;
2209 struct QIB_7322_IBPCSConfig_0 ibpcsconfig;
2210
2211 /* Configure sensible defaults for IBC */
2212 memset ( &ibcctrla, 0, sizeof ( ibcctrla ) );
2213 BIT_FILL_5 ( &ibcctrla, /* Tuning values taken from Linux driver */
2214 FlowCtrlPeriod, 0x03,
2215 FlowCtrlWaterMark, 0x05,
2216 MaxPktLen, ( ( QIB7322_RECV_HEADER_SIZE +
2217 QIB7322_RECV_PAYLOAD_SIZE +
2218 4 /* ICRC */ ) >> 2 ),
2219 PhyerrThreshold, 0xf,
2220 OverrunThreshold, 0xf );
2221 qib7322_writeq ( qib7322, &ibcctrla, QIB_7322_IBCCtrlA_0_offset );
2222 qib7322_writeq ( qib7322, &ibcctrla, QIB_7322_IBCCtrlA_1_offset );
2223
2224 /* Force SDR only to avoid needing all the DDR tuning,
2225 * Mellanox compatibility hacks etc. SDR is plenty for
2226 * boot-time operation.
2227 */
2228 qib7322_readq ( qib7322, &ibcctrlb, QIB_7322_IBCCtrlB_0_offset );
2229 BIT_SET ( &ibcctrlb, IB_ENHANCED_MODE, 0 );
2230 BIT_SET ( &ibcctrlb, SD_SPEED_SDR, 1 );
2231 BIT_SET ( &ibcctrlb, SD_SPEED_DDR, 0 );
2232 BIT_SET ( &ibcctrlb, SD_SPEED_QDR, 0 );
2233 BIT_SET ( &ibcctrlb, IB_NUM_CHANNELS, 1 ); /* 4X only */
2234 BIT_SET ( &ibcctrlb, IB_LANE_REV_SUPPORTED, 0 );
2235 BIT_SET ( &ibcctrlb, HRTBT_ENB, 0 );
2236 BIT_SET ( &ibcctrlb, HRTBT_AUTO, 0 );
2237 qib7322_writeq ( qib7322, &ibcctrlb, QIB_7322_IBCCtrlB_0_offset );
2238 qib7322_writeq ( qib7322, &ibcctrlb, QIB_7322_IBCCtrlB_1_offset );
2239
2240 /* Tune SerDes */
2241 qib7322_ahb_mod_reg_all ( qib7322, 2, 0, 0x00000e00UL );
2242
2243 /* Bring XGXS out of reset */
2244 memset ( &ibpcsconfig, 0, sizeof ( ibpcsconfig ) );
2245 qib7322_writeq ( qib7322, &ibpcsconfig, QIB_7322_IBPCSConfig_0_offset );
2246 qib7322_writeq ( qib7322, &ibpcsconfig, QIB_7322_IBPCSConfig_1_offset );
2247
2248 return 0;
2249 }
2250
2251 /***************************************************************************
2252 *
2253 * PCI layer interface
2254 *
2255 ***************************************************************************
2256 */
2257
2258 /**
2259 * Reset QIB7322
2260 *
2261 * @v qib7322 QIB7322 device
2262 * @v pci PCI device
2263 * @ret rc Return status code
2264 */
2265 static void qib7322_reset ( struct qib7322 *qib7322, struct pci_device *pci ) {
2266 struct QIB_7322_Control control;
2267 struct pci_config_backup backup;
2268
2269 /* Back up PCI configuration space */
2270 pci_backup ( pci, &backup, NULL );
2271
2272 /* Assert reset */
2273 memset ( &control, 0, sizeof ( control ) );
2274 BIT_FILL_1 ( &control, SyncReset, 1 );
2275 qib7322_writeq ( qib7322, &control, QIB_7322_Control_offset );
2276
2277 /* Wait for reset to complete */
2278 mdelay ( 1000 );
2279
2280 /* Restore PCI configuration space */
2281 pci_restore ( pci, &backup, NULL );
2282 }
2283
2284 /**
2285 * Probe PCI device
2286 *
2287 * @v pci PCI device
2288 * @v id PCI ID
2289 * @ret rc Return status code
2290 */
2291 static int qib7322_probe ( struct pci_device *pci ) {
2292 struct qib7322 *qib7322;
2293 struct QIB_7322_Revision revision;
2294 struct ib_device *ibdev;
2295 unsigned int link_speed_supported;
2296 int i;
2297 int rc;
2298
2299 /* Allocate QIB7322 device */
2300 qib7322 = zalloc ( sizeof ( *qib7322 ) );
2301 if ( ! qib7322 ) {
2302 rc = -ENOMEM;
2303 goto err_alloc_qib7322;
2304 }
2305 pci_set_drvdata ( pci, qib7322 );
2306
2307 /* Fix up PCI device */
2308 adjust_pci_device ( pci );
2309
2310 /* Get PCI BARs */
2311 qib7322->regs = ioremap ( pci->membase, QIB7322_BAR0_SIZE );
2312 DBGC2 ( qib7322, "QIB7322 %p has BAR at %08lx\n",
2313 qib7322, pci->membase );
2314
2315 /* Reset device */
2316 qib7322_reset ( qib7322, pci );
2317
2318 /* Print some general data */
2319 qib7322_readq ( qib7322, &revision, QIB_7322_Revision_offset );
2320 DBGC2 ( qib7322, "QIB7322 %p board %02lx v%ld.%ld.%ld.%ld\n", qib7322,
2321 BIT_GET ( &revision, BoardID ),
2322 BIT_GET ( &revision, R_SW ),
2323 BIT_GET ( &revision, R_Arch ),
2324 BIT_GET ( &revision, R_ChipRevMajor ),
2325 BIT_GET ( &revision, R_ChipRevMinor ) );
2326
2327 /* Initialise I2C subsystem */
2328 if ( ( rc = qib7322_init_i2c ( qib7322 ) ) != 0 )
2329 goto err_init_i2c;
2330
2331 /* Read EEPROM parameters */
2332 if ( ( rc = qib7322_read_eeprom ( qib7322 ) ) != 0 )
2333 goto err_read_eeprom;
2334
2335 /* Initialise send datapath */
2336 if ( ( rc = qib7322_init_send ( qib7322 ) ) != 0 )
2337 goto err_init_send;
2338
2339 /* Initialise receive datapath */
2340 if ( ( rc = qib7322_init_recv ( qib7322 ) ) != 0 )
2341 goto err_init_recv;
2342
2343 /* Initialise the IB SerDes */
2344 if ( ( rc = qib7322_init_ib_serdes ( qib7322 ) ) != 0 )
2345 goto err_init_ib_serdes;
2346
2347 /* Allocate Infiniband devices */
2348 for ( i = 0 ; i < QIB7322_MAX_PORTS ; i++ ) {
2349 link_speed_supported =
2350 qib7322_link_speed_supported ( qib7322, i );
2351 if ( ! link_speed_supported )
2352 continue;
2353 ibdev = alloc_ibdev ( 0 );
2354 if ( ! ibdev ) {
2355 rc = -ENOMEM;
2356 goto err_alloc_ibdev;
2357 }
2358 qib7322->ibdev[i] = ibdev;
2359 ibdev->dev = &pci->dev;
2360 ibdev->op = &qib7322_ib_operations;
2361 ibdev->port = ( QIB7322_PORT_BASE + i );
2362 ibdev->link_width_enabled = ibdev->link_width_supported =
2363 IB_LINK_WIDTH_4X; /* 1x does not work */
2364 ibdev->link_speed_enabled = ibdev->link_speed_supported =
2365 IB_LINK_SPEED_SDR; /* to avoid need for link tuning */
2366 memcpy ( &ibdev->node_guid, &qib7322->guid,
2367 sizeof ( ibdev->node_guid ) );
2368 memcpy ( &ibdev->gid.s.guid, &qib7322->guid,
2369 sizeof ( ibdev->gid.s.guid ) );
2370 assert ( ( ibdev->gid.s.guid.bytes[7] & i ) == 0 );
2371 ibdev->gid.s.guid.bytes[7] |= i;
2372 ib_set_drvdata ( ibdev, qib7322 );
2373 }
2374
2375 /* Register Infiniband devices */
2376 for ( i = 0 ; i < QIB7322_MAX_PORTS ; i++ ) {
2377 if ( ! qib7322->ibdev[i] )
2378 continue;
2379 if ( ( rc = register_ibdev ( qib7322->ibdev[i] ) ) != 0 ) {
2380 DBGC ( qib7322, "QIB7322 %p port %d could not register "
2381 "IB device: %s\n", qib7322, i, strerror ( rc ) );
2382 goto err_register_ibdev;
2383 }
2384 }
2385
2386 return 0;
2387
2388 i = QIB7322_MAX_PORTS;
2389 err_register_ibdev:
2390 for ( i-- ; i >= 0 ; i-- ) {
2391 if ( qib7322->ibdev[i] )
2392 unregister_ibdev ( qib7322->ibdev[i] );
2393 }
2394 i = QIB7322_MAX_PORTS;
2395 err_alloc_ibdev:
2396 for ( i-- ; i >= 0 ; i-- )
2397 ibdev_put ( qib7322->ibdev[i] );
2398 err_init_ib_serdes:
2399 qib7322_fini_send ( qib7322 );
2400 err_init_send:
2401 qib7322_fini_recv ( qib7322 );
2402 err_init_recv:
2403 err_read_eeprom:
2404 err_init_i2c:
2405 free ( qib7322 );
2406 err_alloc_qib7322:
2407 return rc;
2408 }
2409
2410 /**
2411 * Remove PCI device
2412 *
2413 * @v pci PCI device
2414 */
2415 static void qib7322_remove ( struct pci_device *pci ) {
2416 struct qib7322 *qib7322 = pci_get_drvdata ( pci );
2417 int i;
2418
2419 for ( i = ( QIB7322_MAX_PORTS - 1 ) ; i >= 0 ; i-- ) {
2420 if ( qib7322->ibdev[i] )
2421 unregister_ibdev ( qib7322->ibdev[i] );
2422 }
2423 for ( i = ( QIB7322_MAX_PORTS - 1 ) ; i >= 0 ; i-- )
2424 ibdev_put ( qib7322->ibdev[i] );
2425 qib7322_fini_send ( qib7322 );
2426 qib7322_fini_recv ( qib7322 );
2427 free ( qib7322 );
2428 }
2429
2430 static struct pci_device_id qib7322_nics[] = {
2431 PCI_ROM ( 0x1077, 0x7322, "iba7322", "IBA7322 QDR InfiniBand HCA", 0 ),
2432 };
2433
2434 struct pci_driver qib7322_driver __pci_driver = {
2435 .ids = qib7322_nics,
2436 .id_count = ( sizeof ( qib7322_nics ) / sizeof ( qib7322_nics[0] ) ),
2437 .probe = qib7322_probe,
2438 .remove = qib7322_remove,
2439 };