2 * Copyright (C) 2009 Michael Brown <mbrown@fensystems.co.uk>.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * You can also choose to distribute this program under the terms of
20 * the Unmodified Binary Distribution Licence (as given in the file
21 * COPYING.UBDL), provided that you have satisfied its requirements.
24 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL
);
33 #include <ipxe/infiniband.h>
35 #include <ipxe/bitbash.h>
36 #include <ipxe/malloc.h>
37 #include <ipxe/iobuf.h>
38 #include <ipxe/pcibackup.h>
44 * QLogic QIB7322 Infiniband HCA
48 /** A QIB7322 send buffer set */
49 struct qib7322_send_buffers
{
50 /** Offset within register space of the first send buffer */
52 /** Send buffer size */
54 /** Index of first send buffer */
56 /** Number of send buffers
58 * Must be a power of two.
61 /** Send buffer availability producer counter */
63 /** Send buffer availability consumer counter */
65 /** Send buffer availability */
69 /** A QIB7322 send work queue */
70 struct qib7322_send_work_queue
{
71 /** Send buffer set */
72 struct qib7322_send_buffers
*send_bufs
;
73 /** Send buffer usage */
81 /** A QIB7322 receive work queue */
82 struct qib7322_recv_work_queue
{
83 /** Receive header ring */
85 /** Receive header producer offset (written by hardware) */
86 struct QIB_7322_scalar header_prod
;
87 /** Receive header consumer offset */
88 unsigned int header_cons
;
89 /** Offset within register space of the eager array */
90 unsigned long eager_array
;
91 /** Number of entries in eager array */
92 unsigned int eager_entries
;
93 /** Eager array producer index */
94 unsigned int eager_prod
;
95 /** Eager array consumer index */
96 unsigned int eager_cons
;
104 /** In-use contexts */
105 uint8_t used_ctx
[QIB7322_NUM_CONTEXTS
];
106 /** Send work queues */
107 struct qib7322_send_work_queue send_wq
[QIB7322_NUM_CONTEXTS
];
108 /** Receive work queues */
109 struct qib7322_recv_work_queue recv_wq
[QIB7322_NUM_CONTEXTS
];
111 /** Send buffer availability (reported by hardware) */
112 struct QIB_7322_SendBufAvail
*sendbufavail
;
113 /** Small send buffers */
114 struct qib7322_send_buffers
*send_bufs_small
;
115 /** VL15 port 0 send buffers */
116 struct qib7322_send_buffers
*send_bufs_vl15_port0
;
117 /** VL15 port 1 send buffers */
118 struct qib7322_send_buffers
*send_bufs_vl15_port1
;
120 /** I2C bit-bashing interface */
121 struct i2c_bit_basher i2c
;
122 /** I2C serial EEPROM */
123 struct i2c_device eeprom
;
127 /** Infiniband devices */
128 struct ib_device
*ibdev
[QIB7322_MAX_PORTS
];
131 /***************************************************************************
133 * QIB7322 register access
135 ***************************************************************************
137 * This card requires atomic 64-bit accesses. Strange things happen
138 * if you try to use 32-bit accesses; sometimes they work, sometimes
139 * they don't, sometimes you get random data.
141 * These accessors use the "movq" MMX instruction, and so won't work
142 * on really old Pentiums (which won't have PCIe anyway, so this is
143 * something of a moot point).
147 * Read QIB7322 qword register
149 * @v qib7322 QIB7322 device
150 * @v dwords Register buffer to read into
151 * @v offset Register offset
153 static void qib7322_readq ( struct qib7322
*qib7322
, uint32_t *dwords
,
154 unsigned long offset
) {
155 void *addr
= ( qib7322
->regs
+ offset
);
157 __asm__
__volatile__ ( "movq (%1), %%mm0\n\t"
158 "movq %%mm0, (%0)\n\t"
159 : : "r" ( dwords
), "r" ( addr
) : "memory" );
161 DBGIO ( "[%08lx] => %08x%08x\n",
162 virt_to_phys ( addr
), dwords
[1], dwords
[0] );
164 #define qib7322_readq( _qib7322, _ptr, _offset ) \
165 qib7322_readq ( (_qib7322), (_ptr)->u.dwords, (_offset) )
166 #define qib7322_readq_array8b( _qib7322, _ptr, _offset, _idx ) \
167 qib7322_readq ( (_qib7322), (_ptr), ( (_offset) + ( (_idx) * 8 ) ) )
168 #define qib7322_readq_array64k( _qib7322, _ptr, _offset, _idx ) \
169 qib7322_readq ( (_qib7322), (_ptr), ( (_offset) + ( (_idx) * 65536 ) ) )
170 #define qib7322_readq_port( _qib7322, _ptr, _offset, _port ) \
171 qib7322_readq ( (_qib7322), (_ptr), ( (_offset) + ( (_port) * 4096 ) ) )
174 * Write QIB7322 qword register
176 * @v qib7322 QIB7322 device
177 * @v dwords Register buffer to write
178 * @v offset Register offset
180 static void qib7322_writeq ( struct qib7322
*qib7322
, const uint32_t *dwords
,
181 unsigned long offset
) {
182 void *addr
= ( qib7322
->regs
+ offset
);
184 DBGIO ( "[%08lx] <= %08x%08x\n",
185 virt_to_phys ( addr
), dwords
[1], dwords
[0] );
187 __asm__
__volatile__ ( "movq (%0), %%mm0\n\t"
188 "movq %%mm0, (%1)\n\t"
189 : : "r" ( dwords
), "r" ( addr
) : "memory" );
191 #define qib7322_writeq( _qib7322, _ptr, _offset ) \
192 qib7322_writeq ( (_qib7322), (_ptr)->u.dwords, (_offset) )
193 #define qib7322_writeq_array8b( _qib7322, _ptr, _offset, _idx ) \
194 qib7322_writeq ( (_qib7322), (_ptr), ( (_offset) + ( (_idx) * 8 ) ) )
195 #define qib7322_writeq_array64k( _qib7322, _ptr, _offset, _idx ) \
196 qib7322_writeq ( (_qib7322), (_ptr), ( (_offset) + ( (_idx) * 65536 ) ))
197 #define qib7322_writeq_port( _qib7322, _ptr, _offset, _port ) \
198 qib7322_writeq ( (_qib7322), (_ptr), ( (_offset) + ( (_port) * 4096 ) ))
201 * Write QIB7322 dword register
203 * @v qib7322 QIB7322 device
204 * @v dword Value to write
205 * @v offset Register offset
207 static void qib7322_writel ( struct qib7322
*qib7322
, uint32_t dword
,
208 unsigned long offset
) {
209 writel ( dword
, ( qib7322
->regs
+ offset
) );
212 /***************************************************************************
214 * Link state management
216 ***************************************************************************
220 * Textual representation of link state
222 * @v link_state Link state
223 * @ret link_text Link state text
225 static const char * qib7322_link_state_text ( unsigned int link_state
) {
226 switch ( link_state
) {
227 case QIB7322_LINK_STATE_DOWN
: return "DOWN";
228 case QIB7322_LINK_STATE_INIT
: return "INIT";
229 case QIB7322_LINK_STATE_ARM
: return "ARM";
230 case QIB7322_LINK_STATE_ACTIVE
: return "ACTIVE";
231 case QIB7322_LINK_STATE_ACT_DEFER
: return "ACT_DEFER";
232 default: return "UNKNOWN";
237 * Handle link state change
239 * @v qib7322 QIB7322 device
241 static void qib7322_link_state_changed ( struct ib_device
*ibdev
) {
242 struct qib7322
*qib7322
= ib_get_drvdata ( ibdev
);
243 struct QIB_7322_IBCStatusA_0 ibcstatusa
;
244 struct QIB_7322_EXTCtrl extctrl
;
245 unsigned int port
= ( ibdev
->port
- QIB7322_PORT_BASE
);
246 unsigned int link_training_state
;
247 unsigned int link_state
;
248 unsigned int link_width
;
249 unsigned int link_speed
;
250 unsigned int link_speed_qdr
;
254 /* Read link state */
255 qib7322_readq_port ( qib7322
, &ibcstatusa
,
256 QIB_7322_IBCStatusA_0_offset
, port
);
257 link_training_state
= BIT_GET ( &ibcstatusa
, LinkTrainingState
);
258 link_state
= BIT_GET ( &ibcstatusa
, LinkState
);
259 link_width
= BIT_GET ( &ibcstatusa
, LinkWidthActive
);
260 link_speed
= BIT_GET ( &ibcstatusa
, LinkSpeedActive
);
261 link_speed_qdr
= BIT_GET ( &ibcstatusa
, LinkSpeedQDR
);
262 DBGC ( qib7322
, "QIB7322 %p port %d training state %#x link state %s "
263 "(%s %s)\n", qib7322
, port
, link_training_state
,
264 qib7322_link_state_text ( link_state
),
265 ( link_speed_qdr ?
"QDR" : ( link_speed ?
"DDR" : "SDR" ) ),
266 ( link_width ?
"x4" : "x1" ) );
268 /* Set LEDs according to link state */
269 qib7322_readq ( qib7322
, &extctrl
, QIB_7322_EXTCtrl_offset
);
270 green
= ( ( link_state
>= QIB7322_LINK_STATE_INIT
) ?
1 : 0 );
271 yellow
= ( ( link_state
>= QIB7322_LINK_STATE_ACTIVE
) ?
1 : 0 );
273 BIT_SET ( &extctrl
, LEDPort0GreenOn
, green
);
274 BIT_SET ( &extctrl
, LEDPort0YellowOn
, yellow
);
276 BIT_SET ( &extctrl
, LEDPort1GreenOn
, green
);
277 BIT_SET ( &extctrl
, LEDPort1YellowOn
, yellow
);
279 qib7322_writeq ( qib7322
, &extctrl
, QIB_7322_EXTCtrl_offset
);
281 /* Notify Infiniband core of link state change */
282 ibdev
->port_state
= ( link_state
+ 1 );
283 ibdev
->link_width_active
=
284 ( link_width ? IB_LINK_WIDTH_4X
: IB_LINK_WIDTH_1X
);
285 ibdev
->link_speed_active
=
286 ( link_speed ? IB_LINK_SPEED_DDR
: IB_LINK_SPEED_SDR
);
287 ib_link_state_changed ( ibdev
);
291 * Wait for link state change to take effect
293 * @v ibdev Infiniband device
294 * @v new_link_state Expected link state
295 * @ret rc Return status code
297 static int qib7322_link_state_check ( struct ib_device
*ibdev
,
298 unsigned int new_link_state
) {
299 struct qib7322
*qib7322
= ib_get_drvdata ( ibdev
);
300 struct QIB_7322_IBCStatusA_0 ibcstatusa
;
301 unsigned int port
= ( ibdev
->port
- QIB7322_PORT_BASE
);
302 unsigned int link_state
;
305 for ( i
= 0 ; i
< QIB7322_LINK_STATE_MAX_WAIT_US
; i
++ ) {
306 qib7322_readq_port ( qib7322
, &ibcstatusa
,
307 QIB_7322_IBCStatusA_0_offset
, port
);
308 link_state
= BIT_GET ( &ibcstatusa
, LinkState
);
309 if ( link_state
== new_link_state
)
314 DBGC ( qib7322
, "QIB7322 %p port %d timed out waiting for link state "
315 "%s\n", qib7322
, port
, qib7322_link_state_text ( link_state
) );
320 * Set port information
322 * @v ibdev Infiniband device
323 * @v mad Set port information MAD
325 static int qib7322_set_port_info ( struct ib_device
*ibdev
,
326 union ib_mad
*mad
) {
327 struct qib7322
*qib7322
= ib_get_drvdata ( ibdev
);
328 struct ib_port_info
*port_info
= &mad
->smp
.smp_data
.port_info
;
329 struct QIB_7322_IBCCtrlA_0 ibcctrla
;
330 unsigned int port
= ( ibdev
->port
- QIB7322_PORT_BASE
);
331 unsigned int port_state
;
332 unsigned int link_state
;
334 /* Set new link state */
335 port_state
= ( port_info
->link_speed_supported__port_state
& 0xf );
337 link_state
= ( port_state
- 1 );
338 DBGC ( qib7322
, "QIB7322 %p set link state to %s (%x)\n",
339 qib7322
, qib7322_link_state_text ( link_state
),
341 qib7322_readq_port ( qib7322
, &ibcctrla
,
342 QIB_7322_IBCCtrlA_0_offset
, port
);
343 BIT_SET ( &ibcctrla
, LinkCmd
, link_state
);
344 qib7322_writeq_port ( qib7322
, &ibcctrla
,
345 QIB_7322_IBCCtrlA_0_offset
, port
);
347 /* Wait for link state change to take effect. Ignore
348 * errors; the current link state will be returned via
349 * the GetResponse MAD.
351 qib7322_link_state_check ( ibdev
, link_state
);
354 /* Detect and report link state change */
355 qib7322_link_state_changed ( ibdev
);
361 * Set partition key table
363 * @v ibdev Infiniband device
364 * @v mad Set partition key table MAD
366 static int qib7322_set_pkey_table ( struct ib_device
*ibdev __unused
,
367 union ib_mad
*mad __unused
) {
372 /***************************************************************************
376 ***************************************************************************
380 * Allocate a context and set queue pair number
382 * @v ibdev Infiniband device
384 * @ret rc Return status code
386 static int qib7322_alloc_ctx ( struct ib_device
*ibdev
,
387 struct ib_queue_pair
*qp
) {
388 struct qib7322
*qib7322
= ib_get_drvdata ( ibdev
);
389 unsigned int port
= ( ibdev
->port
- QIB7322_PORT_BASE
);
392 for ( ctx
= port
; ctx
< QIB7322_NUM_CONTEXTS
; ctx
+= 2 ) {
394 if ( ! qib7322
->used_ctx
[ctx
] ) {
395 qib7322
->used_ctx
[ctx
] = 1;
396 qp
->qpn
= ( ctx
& ~0x01 );
397 DBGC2 ( qib7322
, "QIB7322 %p port %d QPN %ld is CTX "
398 "%d\n", qib7322
, port
, qp
->qpn
, ctx
);
403 DBGC ( qib7322
, "QIB7322 %p port %d out of available contexts\n",
409 * Get queue pair context number
411 * @v ibdev Infiniband device
413 * @ret ctx Context index
415 static unsigned int qib7322_ctx ( struct ib_device
*ibdev
,
416 struct ib_queue_pair
*qp
) {
417 return ( qp
->qpn
+ ( ibdev
->port
- QIB7322_PORT_BASE
) );
423 * @v qib7322 QIB7322 device
424 * @v ctx Context index
426 static void qib7322_free_ctx ( struct ib_device
*ibdev
,
427 struct ib_queue_pair
*qp
) {
428 struct qib7322
*qib7322
= ib_get_drvdata ( ibdev
);
429 unsigned int port
= ( ibdev
->port
- QIB7322_PORT_BASE
);
430 unsigned int ctx
= qib7322_ctx ( ibdev
, qp
);
432 qib7322
->used_ctx
[ctx
] = 0;
433 DBGC2 ( qib7322
, "QIB7322 %p port %d CTX %d freed\n",
434 qib7322
, port
, ctx
);
437 /***************************************************************************
441 ***************************************************************************
444 /** Send buffer toggle bit
446 * We encode send buffers as 15 bits of send buffer index plus a
447 * single bit which should match the "check" bit in the SendBufAvail
450 #define QIB7322_SEND_BUF_TOGGLE 0x8000
453 * Create send buffer set
455 * @v qib7322 QIB7322 device
456 * @v base Send buffer base offset
457 * @v size Send buffer size
458 * @v start Index of first send buffer
459 * @v count Number of send buffers
460 * @ret send_bufs Send buffer set
462 static struct qib7322_send_buffers
*
463 qib7322_create_send_bufs ( struct qib7322
*qib7322
, unsigned long base
,
464 unsigned int size
, unsigned int start
,
465 unsigned int count
) {
466 struct qib7322_send_buffers
*send_bufs
;
469 /* Allocate send buffer set */
470 send_bufs
= zalloc ( sizeof ( *send_bufs
) +
471 ( count
* sizeof ( send_bufs
->avail
[0] ) ) );
475 /* Populate send buffer set */
476 send_bufs
->base
= base
;
477 send_bufs
->size
= size
;
478 send_bufs
->start
= start
;
479 send_bufs
->count
= count
;
480 for ( i
= 0 ; i
< count
; i
++ )
481 send_bufs
->avail
[i
] = ( start
+ i
);
483 DBGC2 ( qib7322
, "QIB7322 %p send buffer set %p [%d,%d] at %lx\n",
484 qib7322
, send_bufs
, start
, ( start
+ count
- 1 ),
491 * Destroy send buffer set
493 * @v qib7322 QIB7322 device
494 * @v send_bufs Send buffer set
497 qib7322_destroy_send_bufs ( struct qib7322
*qib7322 __unused
,
498 struct qib7322_send_buffers
*send_bufs
) {
503 * Allocate a send buffer
505 * @v qib7322 QIB7322 device
506 * @v send_bufs Send buffer set
507 * @ret send_buf Send buffer, or negative error
509 static int qib7322_alloc_send_buf ( struct qib7322
*qib7322
,
510 struct qib7322_send_buffers
*send_bufs
) {
513 unsigned int send_buf
;
515 used
= ( send_bufs
->cons
- send_bufs
->prod
);
516 if ( used
>= send_bufs
->count
) {
517 DBGC ( qib7322
, "QIB7322 %p send buffer set %p out of "
518 "buffers\n", qib7322
, send_bufs
);
522 mask
= ( send_bufs
->count
- 1 );
523 send_buf
= send_bufs
->avail
[ send_bufs
->cons
++ & mask
];
524 send_buf
^= QIB7322_SEND_BUF_TOGGLE
;
531 * @v qib7322 QIB7322 device
532 * @v send_bufs Send buffer set
533 * @v send_buf Send buffer
535 static void qib7322_free_send_buf ( struct qib7322
*qib7322 __unused
,
536 struct qib7322_send_buffers
*send_bufs
,
537 unsigned int send_buf
) {
540 mask
= ( send_bufs
->count
- 1 );
541 send_bufs
->avail
[ send_bufs
->prod
++ & mask
] = send_buf
;
545 * Check to see if send buffer is in use
547 * @v qib7322 QIB7322 device
548 * @v send_buf Send buffer
549 * @ret in_use Send buffer is in use
551 static int qib7322_send_buf_in_use ( struct qib7322
*qib7322
,
552 unsigned int send_buf
) {
553 unsigned int send_idx
;
554 unsigned int send_check
;
555 unsigned int inusecheck
;
559 send_idx
= ( send_buf
& ~QIB7322_SEND_BUF_TOGGLE
);
560 send_check
= ( !! ( send_buf
& QIB7322_SEND_BUF_TOGGLE
) );
561 inusecheck
= BIT_GET ( qib7322
->sendbufavail
, InUseCheck
[send_idx
] );
562 inuse
= ( !! ( inusecheck
& 0x02 ) );
563 check
= ( !! ( inusecheck
& 0x01 ) );
564 return ( inuse
|| ( check
!= send_check
) );
568 * Calculate starting offset for send buffer
570 * @v qib7322 QIB7322 device
571 * @v send_buf Send buffer
572 * @ret offset Starting offset
575 qib7322_send_buffer_offset ( struct qib7322
*qib7322 __unused
,
576 struct qib7322_send_buffers
*send_bufs
,
577 unsigned int send_buf
) {
580 index
= ( ( send_buf
& ~QIB7322_SEND_BUF_TOGGLE
) - send_bufs
->start
);
581 return ( send_bufs
->base
+ ( index
* send_bufs
->size
) );
585 * Create send work queue
587 * @v ibdev Infiniband device
590 static int qib7322_create_send_wq ( struct ib_device
*ibdev
,
591 struct ib_queue_pair
*qp
) {
592 struct qib7322
*qib7322
= ib_get_drvdata ( ibdev
);
593 struct ib_work_queue
*wq
= &qp
->send
;
594 struct qib7322_send_work_queue
*qib7322_wq
= ib_wq_get_drvdata ( wq
);
595 unsigned int port
= ( ibdev
->port
- QIB7322_PORT_BASE
);
597 /* Select send buffer set */
598 if ( qp
->type
== IB_QPT_SMI
) {
600 qib7322_wq
->send_bufs
= qib7322
->send_bufs_vl15_port0
;
602 qib7322_wq
->send_bufs
= qib7322
->send_bufs_vl15_port1
;
605 qib7322_wq
->send_bufs
= qib7322
->send_bufs_small
;
608 /* Allocate space for send buffer usage list */
609 qib7322_wq
->used
= zalloc ( qp
->send
.num_wqes
*
610 sizeof ( qib7322_wq
->used
[0] ) );
611 if ( ! qib7322_wq
->used
)
614 /* Reset work queue */
615 qib7322_wq
->prod
= 0;
616 qib7322_wq
->cons
= 0;
622 * Destroy send work queue
624 * @v ibdev Infiniband device
627 static void qib7322_destroy_send_wq ( struct ib_device
*ibdev __unused
,
628 struct ib_queue_pair
*qp
) {
629 struct ib_work_queue
*wq
= &qp
->send
;
630 struct qib7322_send_work_queue
*qib7322_wq
= ib_wq_get_drvdata ( wq
);
632 free ( qib7322_wq
->used
);
636 * Initialise send datapath
638 * @v qib7322 QIB7322 device
639 * @ret rc Return status code
641 static int qib7322_init_send ( struct qib7322
*qib7322
) {
642 struct QIB_7322_SendBufBase sendbufbase
;
643 struct QIB_7322_SendBufAvailAddr sendbufavailaddr
;
644 struct QIB_7322_SendCtrl sendctrl
;
645 struct QIB_7322_SendCtrl_0 sendctrlp
;
646 unsigned long baseaddr_smallpio
;
647 unsigned long baseaddr_largepio
;
648 unsigned long baseaddr_vl15_port0
;
649 unsigned long baseaddr_vl15_port1
;
652 /* Create send buffer sets */
653 qib7322_readq ( qib7322
, &sendbufbase
, QIB_7322_SendBufBase_offset
);
654 baseaddr_smallpio
= BIT_GET ( &sendbufbase
, BaseAddr_SmallPIO
);
655 baseaddr_largepio
= BIT_GET ( &sendbufbase
, BaseAddr_LargePIO
);
656 baseaddr_vl15_port0
= ( baseaddr_largepio
+
657 ( QIB7322_LARGE_SEND_BUF_SIZE
*
658 QIB7322_LARGE_SEND_BUF_COUNT
) );
659 baseaddr_vl15_port1
= ( baseaddr_vl15_port0
+
660 QIB7322_VL15_PORT0_SEND_BUF_SIZE
);
661 qib7322
->send_bufs_small
=
662 qib7322_create_send_bufs ( qib7322
, baseaddr_smallpio
,
663 QIB7322_SMALL_SEND_BUF_SIZE
,
664 QIB7322_SMALL_SEND_BUF_START
,
665 QIB7322_SMALL_SEND_BUF_USED
);
666 if ( ! qib7322
->send_bufs_small
) {
668 goto err_create_send_bufs_small
;
670 qib7322
->send_bufs_vl15_port0
=
671 qib7322_create_send_bufs ( qib7322
, baseaddr_vl15_port0
,
672 QIB7322_VL15_PORT0_SEND_BUF_SIZE
,
673 QIB7322_VL15_PORT0_SEND_BUF_START
,
674 QIB7322_VL15_PORT0_SEND_BUF_COUNT
);
675 if ( ! qib7322
->send_bufs_vl15_port0
) {
677 goto err_create_send_bufs_vl15_port0
;
679 qib7322
->send_bufs_vl15_port1
=
680 qib7322_create_send_bufs ( qib7322
, baseaddr_vl15_port1
,
681 QIB7322_VL15_PORT1_SEND_BUF_SIZE
,
682 QIB7322_VL15_PORT1_SEND_BUF_START
,
683 QIB7322_VL15_PORT1_SEND_BUF_COUNT
);
684 if ( ! qib7322
->send_bufs_vl15_port1
) {
686 goto err_create_send_bufs_vl15_port1
;
689 /* Allocate space for the SendBufAvail array */
690 qib7322
->sendbufavail
= malloc_dma ( sizeof ( *qib7322
->sendbufavail
),
691 QIB7322_SENDBUFAVAIL_ALIGN
);
692 if ( ! qib7322
->sendbufavail
) {
694 goto err_alloc_sendbufavail
;
696 memset ( qib7322
->sendbufavail
, 0, sizeof ( qib7322
->sendbufavail
) );
698 /* Program SendBufAvailAddr into the hardware */
699 memset ( &sendbufavailaddr
, 0, sizeof ( sendbufavailaddr
) );
700 BIT_FILL_1 ( &sendbufavailaddr
, SendBufAvailAddr
,
701 ( virt_to_bus ( qib7322
->sendbufavail
) >> 6 ) );
702 qib7322_writeq ( qib7322
, &sendbufavailaddr
,
703 QIB_7322_SendBufAvailAddr_offset
);
706 memset ( &sendctrlp
, 0, sizeof ( sendctrlp
) );
707 BIT_FILL_1 ( &sendctrlp
, SendEnable
, 1 );
708 qib7322_writeq ( qib7322
, &sendctrlp
, QIB_7322_SendCtrl_0_offset
);
709 qib7322_writeq ( qib7322
, &sendctrlp
, QIB_7322_SendCtrl_1_offset
);
711 /* Enable DMA of SendBufAvail */
712 memset ( &sendctrl
, 0, sizeof ( sendctrl
) );
713 BIT_FILL_1 ( &sendctrl
, SendBufAvailUpd
, 1 );
714 qib7322_writeq ( qib7322
, &sendctrl
, QIB_7322_SendCtrl_offset
);
718 free_dma ( qib7322
->sendbufavail
, sizeof ( *qib7322
->sendbufavail
) );
719 err_alloc_sendbufavail
:
720 qib7322_destroy_send_bufs ( qib7322
, qib7322
->send_bufs_vl15_port1
);
721 err_create_send_bufs_vl15_port1
:
722 qib7322_destroy_send_bufs ( qib7322
, qib7322
->send_bufs_vl15_port0
);
723 err_create_send_bufs_vl15_port0
:
724 qib7322_destroy_send_bufs ( qib7322
, qib7322
->send_bufs_small
);
725 err_create_send_bufs_small
:
730 * Shut down send datapath
732 * @v qib7322 QIB7322 device
734 static void qib7322_fini_send ( struct qib7322
*qib7322
) {
735 struct QIB_7322_SendCtrl sendctrl
;
737 /* Disable sending and DMA of SendBufAvail */
738 memset ( &sendctrl
, 0, sizeof ( sendctrl
) );
739 qib7322_writeq ( qib7322
, &sendctrl
, QIB_7322_SendCtrl_offset
);
742 /* Ensure hardware has seen this disable */
743 qib7322_readq ( qib7322
, &sendctrl
, QIB_7322_SendCtrl_offset
);
745 free_dma ( qib7322
->sendbufavail
, sizeof ( *qib7322
->sendbufavail
) );
746 qib7322_destroy_send_bufs ( qib7322
, qib7322
->send_bufs_vl15_port1
);
747 qib7322_destroy_send_bufs ( qib7322
, qib7322
->send_bufs_vl15_port0
);
748 qib7322_destroy_send_bufs ( qib7322
, qib7322
->send_bufs_small
);
751 /***************************************************************************
755 ***************************************************************************
759 * Create receive work queue
761 * @v ibdev Infiniband device
763 * @ret rc Return status code
765 static int qib7322_create_recv_wq ( struct ib_device
*ibdev
,
766 struct ib_queue_pair
*qp
) {
767 struct qib7322
*qib7322
= ib_get_drvdata ( ibdev
);
768 struct ib_work_queue
*wq
= &qp
->recv
;
769 struct qib7322_recv_work_queue
*qib7322_wq
= ib_wq_get_drvdata ( wq
);
770 struct QIB_7322_RcvHdrAddr0 rcvhdraddr
;
771 struct QIB_7322_RcvHdrTailAddr0 rcvhdrtailaddr
;
772 struct QIB_7322_RcvHdrHead0 rcvhdrhead
;
773 struct QIB_7322_scalar rcvegrindexhead
;
774 struct QIB_7322_RcvCtrl rcvctrl
;
775 struct QIB_7322_RcvCtrl_P rcvctrlp
;
776 unsigned int port
= ( ibdev
->port
- QIB7322_PORT_BASE
);
777 unsigned int ctx
= qib7322_ctx ( ibdev
, qp
);
780 /* Reset context information */
781 memset ( &qib7322_wq
->header_prod
, 0,
782 sizeof ( qib7322_wq
->header_prod
) );
783 qib7322_wq
->header_cons
= 0;
784 qib7322_wq
->eager_prod
= 0;
785 qib7322_wq
->eager_cons
= 0;
787 /* Allocate receive header buffer */
788 qib7322_wq
->header
= malloc_dma ( QIB7322_RECV_HEADERS_SIZE
,
789 QIB7322_RECV_HEADERS_ALIGN
);
790 if ( ! qib7322_wq
->header
) {
792 goto err_alloc_header
;
795 /* Enable context in hardware */
796 memset ( &rcvhdraddr
, 0, sizeof ( rcvhdraddr
) );
797 BIT_FILL_1 ( &rcvhdraddr
, RcvHdrAddr
,
798 ( virt_to_bus ( qib7322_wq
->header
) >> 2 ) );
799 qib7322_writeq_array8b ( qib7322
, &rcvhdraddr
,
800 QIB_7322_RcvHdrAddr0_offset
, ctx
);
801 memset ( &rcvhdrtailaddr
, 0, sizeof ( rcvhdrtailaddr
) );
802 BIT_FILL_1 ( &rcvhdrtailaddr
, RcvHdrTailAddr
,
803 ( virt_to_bus ( &qib7322_wq
->header_prod
) >> 2 ) );
804 qib7322_writeq_array8b ( qib7322
, &rcvhdrtailaddr
,
805 QIB_7322_RcvHdrTailAddr0_offset
, ctx
);
806 memset ( &rcvhdrhead
, 0, sizeof ( rcvhdrhead
) );
807 BIT_FILL_1 ( &rcvhdrhead
, counter
, 1 );
808 qib7322_writeq_array64k ( qib7322
, &rcvhdrhead
,
809 QIB_7322_RcvHdrHead0_offset
, ctx
);
810 memset ( &rcvegrindexhead
, 0, sizeof ( rcvegrindexhead
) );
811 BIT_FILL_1 ( &rcvegrindexhead
, Value
, 1 );
812 qib7322_writeq_array64k ( qib7322
, &rcvegrindexhead
,
813 QIB_7322_RcvEgrIndexHead0_offset
, ctx
);
814 qib7322_readq_port ( qib7322
, &rcvctrlp
,
815 QIB_7322_RcvCtrl_0_offset
, port
);
816 BIT_SET ( &rcvctrlp
, ContextEnable
[ctx
], 1 );
817 qib7322_writeq_port ( qib7322
, &rcvctrlp
,
818 QIB_7322_RcvCtrl_0_offset
, port
);
819 qib7322_readq ( qib7322
, &rcvctrl
, QIB_7322_RcvCtrl_offset
);
820 BIT_SET ( &rcvctrl
, IntrAvail
[ctx
], 1 );
821 qib7322_writeq ( qib7322
, &rcvctrl
, QIB_7322_RcvCtrl_offset
);
823 DBGC ( qib7322
, "QIB7322 %p port %d QPN %ld CTX %d hdrs [%lx,%lx) prod "
824 "%lx\n", qib7322
, port
, qp
->qpn
, ctx
,
825 virt_to_bus ( qib7322_wq
->header
),
826 ( virt_to_bus ( qib7322_wq
->header
)
827 + QIB7322_RECV_HEADERS_SIZE
),
828 virt_to_bus ( &qib7322_wq
->header_prod
) );
831 free_dma ( qib7322_wq
->header
, QIB7322_RECV_HEADERS_SIZE
);
837 * Destroy receive work queue
839 * @v ibdev Infiniband device
842 static void qib7322_destroy_recv_wq ( struct ib_device
*ibdev
,
843 struct ib_queue_pair
*qp
) {
844 struct qib7322
*qib7322
= ib_get_drvdata ( ibdev
);
845 struct ib_work_queue
*wq
= &qp
->recv
;
846 struct qib7322_recv_work_queue
*qib7322_wq
= ib_wq_get_drvdata ( wq
);
847 struct QIB_7322_RcvCtrl rcvctrl
;
848 struct QIB_7322_RcvCtrl_P rcvctrlp
;
849 unsigned int port
= ( ibdev
->port
- QIB7322_PORT_BASE
);
850 unsigned int ctx
= qib7322_ctx ( ibdev
, qp
);
852 /* Disable context in hardware */
853 qib7322_readq_port ( qib7322
, &rcvctrlp
,
854 QIB_7322_RcvCtrl_0_offset
, port
);
855 BIT_SET ( &rcvctrlp
, ContextEnable
[ctx
], 0 );
856 qib7322_writeq_port ( qib7322
, &rcvctrlp
,
857 QIB_7322_RcvCtrl_0_offset
, port
);
858 qib7322_readq ( qib7322
, &rcvctrl
, QIB_7322_RcvCtrl_offset
);
859 BIT_SET ( &rcvctrl
, IntrAvail
[ctx
], 0 );
860 qib7322_writeq ( qib7322
, &rcvctrl
, QIB_7322_RcvCtrl_offset
);
862 /* Make sure the hardware has seen that the context is disabled */
863 qib7322_readq ( qib7322
, &rcvctrl
, QIB_7322_RcvCtrl_offset
);
866 /* Free headers ring */
867 free_dma ( qib7322_wq
->header
, QIB7322_RECV_HEADERS_SIZE
);
871 * Initialise receive datapath
873 * @v qib7322 QIB7322 device
874 * @ret rc Return status code
876 static int qib7322_init_recv ( struct qib7322
*qib7322
) {
877 struct QIB_7322_RcvCtrl rcvctrl
;
878 struct QIB_7322_RcvCtrl_0 rcvctrlp
;
879 struct QIB_7322_RcvQPMapTableA_0 rcvqpmaptablea0
;
880 struct QIB_7322_RcvQPMapTableB_0 rcvqpmaptableb0
;
881 struct QIB_7322_RcvQPMapTableA_1 rcvqpmaptablea1
;
882 struct QIB_7322_RcvQPMapTableB_1 rcvqpmaptableb1
;
883 struct QIB_7322_RcvQPMulticastContext_0 rcvqpmcastctx0
;
884 struct QIB_7322_RcvQPMulticastContext_1 rcvqpmcastctx1
;
885 struct QIB_7322_scalar rcvegrbase
;
886 struct QIB_7322_scalar rcvhdrentsize
;
887 struct QIB_7322_scalar rcvhdrcnt
;
888 struct QIB_7322_RcvBTHQP_0 rcvbthqp
;
889 struct QIB_7322_RxCreditVL0_0 rxcreditvl
;
890 unsigned int contextcfg
;
891 unsigned long egrbase
;
892 unsigned int eager_array_size_kernel
;
893 unsigned int eager_array_size_user
;
896 /* Select configuration based on number of contexts */
897 switch ( QIB7322_NUM_CONTEXTS
) {
899 contextcfg
= QIB7322_CONTEXTCFG_6CTX
;
900 eager_array_size_kernel
= QIB7322_EAGER_ARRAY_SIZE_6CTX_KERNEL
;
901 eager_array_size_user
= QIB7322_EAGER_ARRAY_SIZE_6CTX_USER
;
904 contextcfg
= QIB7322_CONTEXTCFG_10CTX
;
905 eager_array_size_kernel
= QIB7322_EAGER_ARRAY_SIZE_10CTX_KERNEL
;
906 eager_array_size_user
= QIB7322_EAGER_ARRAY_SIZE_10CTX_USER
;
909 contextcfg
= QIB7322_CONTEXTCFG_18CTX
;
910 eager_array_size_kernel
= QIB7322_EAGER_ARRAY_SIZE_18CTX_KERNEL
;
911 eager_array_size_user
= QIB7322_EAGER_ARRAY_SIZE_18CTX_USER
;
914 linker_assert ( 0, invalid_QIB7322_NUM_CONTEXTS
);
918 /* Configure number of contexts */
919 memset ( &rcvctrl
, 0, sizeof ( rcvctrl
) );
920 BIT_FILL_2 ( &rcvctrl
,
922 ContextCfg
, contextcfg
);
923 qib7322_writeq ( qib7322
, &rcvctrl
, QIB_7322_RcvCtrl_offset
);
925 /* Map QPNs to contexts */
926 memset ( &rcvctrlp
, 0, sizeof ( rcvctrlp
) );
927 BIT_FILL_3 ( &rcvctrlp
,
930 RcvPartitionKeyDisable
, 1 );
931 qib7322_writeq ( qib7322
, &rcvctrlp
, QIB_7322_RcvCtrl_0_offset
);
932 qib7322_writeq ( qib7322
, &rcvctrlp
, QIB_7322_RcvCtrl_1_offset
);
933 memset ( &rcvqpmaptablea0
, 0, sizeof ( rcvqpmaptablea0
) );
934 BIT_FILL_6 ( &rcvqpmaptablea0
,
940 RcvQPMapContext5
, 10 );
941 qib7322_writeq ( qib7322
, &rcvqpmaptablea0
,
942 QIB_7322_RcvQPMapTableA_0_offset
);
943 memset ( &rcvqpmaptableb0
, 0, sizeof ( rcvqpmaptableb0
) );
944 BIT_FILL_3 ( &rcvqpmaptableb0
,
945 RcvQPMapContext6
, 12,
946 RcvQPMapContext7
, 14,
947 RcvQPMapContext8
, 16 );
948 qib7322_writeq ( qib7322
, &rcvqpmaptableb0
,
949 QIB_7322_RcvQPMapTableB_0_offset
);
950 memset ( &rcvqpmaptablea1
, 0, sizeof ( rcvqpmaptablea1
) );
951 BIT_FILL_6 ( &rcvqpmaptablea1
,
957 RcvQPMapContext5
, 11 );
958 qib7322_writeq ( qib7322
, &rcvqpmaptablea1
,
959 QIB_7322_RcvQPMapTableA_1_offset
);
960 memset ( &rcvqpmaptableb1
, 0, sizeof ( rcvqpmaptableb1
) );
961 BIT_FILL_3 ( &rcvqpmaptableb1
,
962 RcvQPMapContext6
, 13,
963 RcvQPMapContext7
, 15,
964 RcvQPMapContext8
, 17 );
965 qib7322_writeq ( qib7322
, &rcvqpmaptableb1
,
966 QIB_7322_RcvQPMapTableB_1_offset
);
968 /* Map multicast QPNs to contexts */
969 memset ( &rcvqpmcastctx0
, 0, sizeof ( rcvqpmcastctx0
) );
970 BIT_FILL_1 ( &rcvqpmcastctx0
, RcvQpMcContext
, 0 );
971 qib7322_writeq ( qib7322
, &rcvqpmcastctx0
,
972 QIB_7322_RcvQPMulticastContext_0_offset
);
973 memset ( &rcvqpmcastctx1
, 0, sizeof ( rcvqpmcastctx1
) );
974 BIT_FILL_1 ( &rcvqpmcastctx1
, RcvQpMcContext
, 1 );
975 qib7322_writeq ( qib7322
, &rcvqpmcastctx1
,
976 QIB_7322_RcvQPMulticastContext_1_offset
);
978 /* Configure receive header buffer sizes */
979 memset ( &rcvhdrcnt
, 0, sizeof ( rcvhdrcnt
) );
980 BIT_FILL_1 ( &rcvhdrcnt
, Value
, QIB7322_RECV_HEADER_COUNT
);
981 qib7322_writeq ( qib7322
, &rcvhdrcnt
, QIB_7322_RcvHdrCnt_offset
);
982 memset ( &rcvhdrentsize
, 0, sizeof ( rcvhdrentsize
) );
983 BIT_FILL_1 ( &rcvhdrentsize
, Value
, ( QIB7322_RECV_HEADER_SIZE
>> 2 ) );
984 qib7322_writeq ( qib7322
, &rcvhdrentsize
,
985 QIB_7322_RcvHdrEntSize_offset
);
987 /* Calculate eager array start addresses for each context */
988 qib7322_readq ( qib7322
, &rcvegrbase
, QIB_7322_RcvEgrBase_offset
);
989 egrbase
= BIT_GET ( &rcvegrbase
, Value
);
990 for ( ctx
= 0 ; ctx
< QIB7322_MAX_PORTS
; ctx
++ ) {
991 qib7322
->recv_wq
[ctx
].eager_array
= egrbase
;
992 qib7322
->recv_wq
[ctx
].eager_entries
= eager_array_size_kernel
;
993 egrbase
+= ( eager_array_size_kernel
*
994 sizeof ( struct QIB_7322_RcvEgr
) );
996 for ( ; ctx
< QIB7322_NUM_CONTEXTS
; ctx
++ ) {
997 qib7322
->recv_wq
[ctx
].eager_array
= egrbase
;
998 qib7322
->recv_wq
[ctx
].eager_entries
= eager_array_size_user
;
999 egrbase
+= ( eager_array_size_user
*
1000 sizeof ( struct QIB_7322_RcvEgr
) );
1002 for ( ctx
= 0 ; ctx
< QIB7322_NUM_CONTEXTS
; ctx
++ ) {
1003 DBGC ( qib7322
, "QIB7322 %p CTX %d eager array at %lx (%d "
1004 "entries)\n", qib7322
, ctx
,
1005 qib7322
->recv_wq
[ctx
].eager_array
,
1006 qib7322
->recv_wq
[ctx
].eager_entries
);
1009 /* Set the BTH QP for Infinipath packets to an unused value */
1010 memset ( &rcvbthqp
, 0, sizeof ( rcvbthqp
) );
1011 BIT_FILL_1 ( &rcvbthqp
, RcvBTHQP
, QIB7322_QP_IDETH
);
1012 qib7322_writeq ( qib7322
, &rcvbthqp
, QIB_7322_RcvBTHQP_0_offset
);
1013 qib7322_writeq ( qib7322
, &rcvbthqp
, QIB_7322_RcvBTHQP_1_offset
);
1015 /* Assign initial credits */
1016 memset ( &rxcreditvl
, 0, sizeof ( rxcreditvl
) );
1017 BIT_FILL_1 ( &rxcreditvl
, RxMaxCreditVL
, QIB7322_MAX_CREDITS_VL0
);
1018 qib7322_writeq_array8b ( qib7322
, &rxcreditvl
,
1019 QIB_7322_RxCreditVL0_0_offset
, 0 );
1020 qib7322_writeq_array8b ( qib7322
, &rxcreditvl
,
1021 QIB_7322_RxCreditVL0_1_offset
, 0 );
1022 BIT_FILL_1 ( &rxcreditvl
, RxMaxCreditVL
, QIB7322_MAX_CREDITS_VL15
);
1023 qib7322_writeq_array8b ( qib7322
, &rxcreditvl
,
1024 QIB_7322_RxCreditVL0_0_offset
, 15 );
1025 qib7322_writeq_array8b ( qib7322
, &rxcreditvl
,
1026 QIB_7322_RxCreditVL0_1_offset
, 15 );
1032 * Shut down receive datapath
1034 * @v qib7322 QIB7322 device
1036 static void qib7322_fini_recv ( struct qib7322
*qib7322 __unused
) {
1037 /* Nothing to do; all contexts were already disabled when the
1038 * queue pairs were destroyed
1042 /***************************************************************************
1044 * Completion queue operations
1046 ***************************************************************************
1050 * Create completion queue
1052 * @v ibdev Infiniband device
1053 * @v cq Completion queue
1054 * @ret rc Return status code
1056 static int qib7322_create_cq ( struct ib_device
*ibdev
,
1057 struct ib_completion_queue
*cq
) {
1058 struct qib7322
*qib7322
= ib_get_drvdata ( ibdev
);
1061 /* The hardware has no concept of completion queues. We
1062 * simply use the association between CQs and WQs (already
1063 * handled by the IB core) to decide which WQs to poll.
1065 * We do set a CQN, just to avoid confusing debug messages
1069 DBGC ( qib7322
, "QIB7322 %p CQN %ld created\n", qib7322
, cq
->cqn
);
1075 * Destroy completion queue
1077 * @v ibdev Infiniband device
1078 * @v cq Completion queue
1080 static void qib7322_destroy_cq ( struct ib_device
*ibdev
,
1081 struct ib_completion_queue
*cq
) {
1082 struct qib7322
*qib7322
= ib_get_drvdata ( ibdev
);
1085 DBGC ( qib7322
, "QIB7322 %p CQN %ld destroyed\n", qib7322
, cq
->cqn
);
1088 /***************************************************************************
1090 * Queue pair operations
1092 ***************************************************************************
1098 * @v ibdev Infiniband device
1100 * @ret rc Return status code
1102 static int qib7322_create_qp ( struct ib_device
*ibdev
,
1103 struct ib_queue_pair
*qp
) {
1104 struct qib7322
*qib7322
= ib_get_drvdata ( ibdev
);
1108 /* Allocate a context and QPN */
1109 if ( ( rc
= qib7322_alloc_ctx ( ibdev
, qp
) ) != 0 )
1111 ctx
= qib7322_ctx ( ibdev
, qp
);
1113 /* Set work-queue private data pointers */
1114 ib_wq_set_drvdata ( &qp
->send
, &qib7322
->send_wq
[ctx
] );
1115 ib_wq_set_drvdata ( &qp
->recv
, &qib7322
->recv_wq
[ctx
] );
1117 /* Create receive work queue */
1118 if ( ( rc
= qib7322_create_recv_wq ( ibdev
, qp
) ) != 0 )
1119 goto err_create_recv_wq
;
1121 /* Create send work queue */
1122 if ( ( rc
= qib7322_create_send_wq ( ibdev
, qp
) ) != 0 )
1123 goto err_create_send_wq
;
1127 qib7322_destroy_send_wq ( ibdev
, qp
);
1129 qib7322_destroy_recv_wq ( ibdev
, qp
);
1131 qib7322_free_ctx ( ibdev
, qp
);
1139 * @v ibdev Infiniband device
1141 * @ret rc Return status code
1143 static int qib7322_modify_qp ( struct ib_device
*ibdev
,
1144 struct ib_queue_pair
*qp
) {
1145 struct qib7322
*qib7322
= ib_get_drvdata ( ibdev
);
1147 /* Nothing to do; the hardware doesn't have a notion of queue
1150 DBGC2 ( qib7322
, "QIB7322 %p QPN %ld modified\n", qib7322
, qp
->qpn
);
1155 * Destroy queue pair
1157 * @v ibdev Infiniband device
1160 static void qib7322_destroy_qp ( struct ib_device
*ibdev
,
1161 struct ib_queue_pair
*qp
) {
1163 qib7322_destroy_send_wq ( ibdev
, qp
);
1164 qib7322_destroy_recv_wq ( ibdev
, qp
);
1165 qib7322_free_ctx ( ibdev
, qp
);
1168 /***************************************************************************
1170 * Work request operations
1172 ***************************************************************************
1176 * Post send work queue entry
1178 * @v ibdev Infiniband device
1180 * @v dest Destination address vector
1181 * @v iobuf I/O buffer
1182 * @ret rc Return status code
1184 static int qib7322_post_send ( struct ib_device
*ibdev
,
1185 struct ib_queue_pair
*qp
,
1186 struct ib_address_vector
*dest
,
1187 struct io_buffer
*iobuf
) {
1188 struct qib7322
*qib7322
= ib_get_drvdata ( ibdev
);
1189 struct ib_work_queue
*wq
= &qp
->send
;
1190 struct qib7322_send_work_queue
*qib7322_wq
= ib_wq_get_drvdata ( wq
);
1191 struct QIB_7322_SendPbc sendpbc
;
1192 unsigned int port
= ( ibdev
->port
- QIB7322_PORT_BASE
);
1193 uint8_t header_buf
[IB_MAX_HEADER_SIZE
];
1194 struct io_buffer headers
;
1196 unsigned long start_offset
;
1197 unsigned long offset
;
1202 /* Allocate send buffer and calculate offset */
1203 send_buf
= qib7322_alloc_send_buf ( qib7322
, qib7322_wq
->send_bufs
);
1206 start_offset
= offset
=
1207 qib7322_send_buffer_offset ( qib7322
, qib7322_wq
->send_bufs
,
1210 /* Store I/O buffer and send buffer index */
1211 assert ( wq
->iobufs
[qib7322_wq
->prod
] == NULL
);
1212 wq
->iobufs
[qib7322_wq
->prod
] = iobuf
;
1213 qib7322_wq
->used
[qib7322_wq
->prod
] = send_buf
;
1215 /* Construct headers */
1216 iob_populate ( &headers
, header_buf
, 0, sizeof ( header_buf
) );
1217 iob_reserve ( &headers
, sizeof ( header_buf
) );
1218 ib_push ( ibdev
, &headers
, qp
, iob_len ( iobuf
), dest
);
1220 /* Calculate packet length */
1221 len
= ( ( sizeof ( sendpbc
) + iob_len ( &headers
) +
1222 iob_len ( iobuf
) + 3 ) & ~3 );
1224 /* Construct send per-buffer control word */
1225 memset ( &sendpbc
, 0, sizeof ( sendpbc
) );
1226 BIT_FILL_3 ( &sendpbc
,
1227 LengthP1_toibc
, ( ( len
>> 2 ) - 1 ),
1229 VL15
, ( ( qp
->type
== IB_QPT_SMI
) ?
1 : 0 ) );
1232 DBG_DISABLE ( DBGLVL_IO
);
1233 qib7322_writeq ( qib7322
, &sendpbc
, offset
);
1234 offset
+= sizeof ( sendpbc
);
1237 for ( data
= headers
.data
, frag_len
= iob_len ( &headers
) ;
1238 frag_len
> 0 ; data
++, offset
+= 4, frag_len
-= 4 ) {
1239 qib7322_writel ( qib7322
, *data
, offset
);
1243 for ( data
= iobuf
->data
, frag_len
= iob_len ( iobuf
) ;
1244 frag_len
> 0 ; data
++, offset
+= 4, frag_len
-= 4 ) {
1245 qib7322_writel ( qib7322
, *data
, offset
);
1247 DBG_ENABLE ( DBGLVL_IO
);
1249 assert ( ( start_offset
+ len
) == offset
);
1250 DBGC2 ( qib7322
, "QIB7322 %p QPN %ld TX %04x(%04x) posted [%lx,%lx)\n",
1251 qib7322
, qp
->qpn
, send_buf
, qib7322_wq
->prod
,
1252 start_offset
, offset
);
1254 /* Increment producer counter */
1255 qib7322_wq
->prod
= ( ( qib7322_wq
->prod
+ 1 ) & ( wq
->num_wqes
- 1 ) );
1261 * Complete send work queue entry
1263 * @v ibdev Infiniband device
1265 * @v wqe_idx Work queue entry index
1267 static void qib7322_complete_send ( struct ib_device
*ibdev
,
1268 struct ib_queue_pair
*qp
,
1269 unsigned int wqe_idx
) {
1270 struct qib7322
*qib7322
= ib_get_drvdata ( ibdev
);
1271 struct ib_work_queue
*wq
= &qp
->send
;
1272 struct qib7322_send_work_queue
*qib7322_wq
= ib_wq_get_drvdata ( wq
);
1273 struct io_buffer
*iobuf
;
1274 unsigned int send_buf
;
1276 /* Parse completion */
1277 send_buf
= qib7322_wq
->used
[wqe_idx
];
1278 DBGC2 ( qib7322
, "QIB7322 %p QPN %ld TX %04x(%04x) complete\n",
1279 qib7322
, qp
->qpn
, send_buf
, wqe_idx
);
1281 /* Complete work queue entry */
1282 iobuf
= wq
->iobufs
[wqe_idx
];
1283 assert ( iobuf
!= NULL
);
1284 ib_complete_send ( ibdev
, qp
, iobuf
, 0 );
1285 wq
->iobufs
[wqe_idx
] = NULL
;
1287 /* Free send buffer */
1288 qib7322_free_send_buf ( qib7322
, qib7322_wq
->send_bufs
, send_buf
);
1292 * Poll send work queue
1294 * @v ibdev Infiniband device
1297 static void qib7322_poll_send_wq ( struct ib_device
*ibdev
,
1298 struct ib_queue_pair
*qp
) {
1299 struct qib7322
*qib7322
= ib_get_drvdata ( ibdev
);
1300 struct ib_work_queue
*wq
= &qp
->send
;
1301 struct qib7322_send_work_queue
*qib7322_wq
= ib_wq_get_drvdata ( wq
);
1302 unsigned int send_buf
;
1304 /* Look for completions */
1305 while ( wq
->fill
) {
1307 /* Check to see if send buffer has completed */
1308 send_buf
= qib7322_wq
->used
[qib7322_wq
->cons
];
1309 if ( qib7322_send_buf_in_use ( qib7322
, send_buf
) )
1312 /* Complete this buffer */
1313 qib7322_complete_send ( ibdev
, qp
, qib7322_wq
->cons
);
1315 /* Increment consumer counter */
1316 qib7322_wq
->cons
= ( ( qib7322_wq
->cons
+ 1 ) &
1317 ( wq
->num_wqes
- 1 ) );
1322 * Post receive work queue entry
1324 * @v ibdev Infiniband device
1326 * @v iobuf I/O buffer
1327 * @ret rc Return status code
1329 static int qib7322_post_recv ( struct ib_device
*ibdev
,
1330 struct ib_queue_pair
*qp
,
1331 struct io_buffer
*iobuf
) {
1332 struct qib7322
*qib7322
= ib_get_drvdata ( ibdev
);
1333 struct ib_work_queue
*wq
= &qp
->recv
;
1334 struct qib7322_recv_work_queue
*qib7322_wq
= ib_wq_get_drvdata ( wq
);
1335 struct QIB_7322_RcvEgr rcvegr
;
1336 struct QIB_7322_scalar rcvegrindexhead
;
1337 unsigned int ctx
= qib7322_ctx ( ibdev
, qp
);
1340 unsigned int wqe_idx
;
1341 unsigned int bufsize
;
1344 addr
= virt_to_bus ( iobuf
->data
);
1345 len
= iob_tailroom ( iobuf
);
1346 if ( addr
& ( QIB7322_EAGER_BUFFER_ALIGN
- 1 ) ) {
1347 DBGC ( qib7322
, "QIB7322 %p QPN %ld misaligned RX buffer "
1348 "(%08lx)\n", qib7322
, qp
->qpn
, addr
);
1351 if ( len
!= QIB7322_RECV_PAYLOAD_SIZE
) {
1352 DBGC ( qib7322
, "QIB7322 %p QPN %ld wrong RX buffer size "
1353 "(%zd)\n", qib7322
, qp
->qpn
, len
);
1357 /* Calculate eager producer index and WQE index */
1358 wqe_idx
= ( qib7322_wq
->eager_prod
& ( wq
->num_wqes
- 1 ) );
1359 assert ( wq
->iobufs
[wqe_idx
] == NULL
);
1361 /* Store I/O buffer */
1362 wq
->iobufs
[wqe_idx
] = iobuf
;
1364 /* Calculate buffer size */
1365 switch ( QIB7322_RECV_PAYLOAD_SIZE
) {
1366 case 2048: bufsize
= QIB7322_EAGER_BUFFER_2K
; break;
1367 case 4096: bufsize
= QIB7322_EAGER_BUFFER_4K
; break;
1368 case 8192: bufsize
= QIB7322_EAGER_BUFFER_8K
; break;
1369 case 16384: bufsize
= QIB7322_EAGER_BUFFER_16K
; break;
1370 case 32768: bufsize
= QIB7322_EAGER_BUFFER_32K
; break;
1371 case 65536: bufsize
= QIB7322_EAGER_BUFFER_64K
; break;
1372 default: linker_assert ( 0, invalid_rx_payload_size
);
1373 bufsize
= QIB7322_EAGER_BUFFER_NONE
;
1376 /* Post eager buffer */
1377 memset ( &rcvegr
, 0, sizeof ( rcvegr
) );
1378 BIT_FILL_2 ( &rcvegr
,
1379 Addr
, ( addr
>> 11 ),
1381 qib7322_writeq_array8b ( qib7322
, &rcvegr
, qib7322_wq
->eager_array
,
1382 qib7322_wq
->eager_prod
);
1383 DBGC2 ( qib7322
, "QIB7322 %p QPN %ld RX egr %04x(%04x) posted "
1384 "[%lx,%lx)\n", qib7322
, qp
->qpn
, qib7322_wq
->eager_prod
,
1385 wqe_idx
, addr
, ( addr
+ len
) );
1387 /* Increment producer index */
1388 qib7322_wq
->eager_prod
= ( ( qib7322_wq
->eager_prod
+ 1 ) &
1389 ( qib7322_wq
->eager_entries
- 1 ) );
1391 /* Update head index */
1392 memset ( &rcvegrindexhead
, 0, sizeof ( rcvegrindexhead
) );
1393 BIT_FILL_1 ( &rcvegrindexhead
,
1394 Value
, ( ( qib7322_wq
->eager_prod
+ 1 ) &
1395 ( qib7322_wq
->eager_entries
- 1 ) ) );
1396 qib7322_writeq_array64k ( qib7322
, &rcvegrindexhead
,
1397 QIB_7322_RcvEgrIndexHead0_offset
, ctx
);
1403 * Complete receive work queue entry
1405 * @v ibdev Infiniband device
1407 * @v header_offs Header offset
1409 static void qib7322_complete_recv ( struct ib_device
*ibdev
,
1410 struct ib_queue_pair
*qp
,
1411 unsigned int header_offs
) {
1412 struct qib7322
*qib7322
= ib_get_drvdata ( ibdev
);
1413 struct ib_work_queue
*wq
= &qp
->recv
;
1414 struct qib7322_recv_work_queue
*qib7322_wq
= ib_wq_get_drvdata ( wq
);
1415 struct QIB_7322_RcvHdrFlags
*rcvhdrflags
;
1416 struct QIB_7322_RcvEgr rcvegr
;
1417 struct io_buffer headers
;
1418 struct io_buffer
*iobuf
;
1419 struct ib_queue_pair
*intended_qp
;
1420 struct ib_address_vector dest
;
1421 struct ib_address_vector source
;
1422 unsigned int rcvtype
;
1423 unsigned int pktlen
;
1424 unsigned int egrindex
;
1425 unsigned int useegrbfr
;
1426 unsigned int iberr
, mkerr
, tiderr
, khdrerr
, mtuerr
;
1427 unsigned int lenerr
, parityerr
, vcrcerr
, icrcerr
;
1429 unsigned int hdrqoffset
;
1430 unsigned int header_len
;
1431 unsigned int padded_payload_len
;
1432 unsigned int wqe_idx
;
1437 /* RcvHdrFlags are at the end of the header entry */
1438 rcvhdrflags
= ( qib7322_wq
->header
+ header_offs
+
1439 QIB7322_RECV_HEADER_SIZE
- sizeof ( *rcvhdrflags
) );
1440 rcvtype
= BIT_GET ( rcvhdrflags
, RcvType
);
1441 pktlen
= ( BIT_GET ( rcvhdrflags
, PktLen
) << 2 );
1442 egrindex
= BIT_GET ( rcvhdrflags
, EgrIndex
);
1443 useegrbfr
= BIT_GET ( rcvhdrflags
, UseEgrBfr
);
1444 hdrqoffset
= ( BIT_GET ( rcvhdrflags
, HdrqOffset
) << 2 );
1445 iberr
= BIT_GET ( rcvhdrflags
, IBErr
);
1446 mkerr
= BIT_GET ( rcvhdrflags
, MKErr
);
1447 tiderr
= BIT_GET ( rcvhdrflags
, TIDErr
);
1448 khdrerr
= BIT_GET ( rcvhdrflags
, KHdrErr
);
1449 mtuerr
= BIT_GET ( rcvhdrflags
, MTUErr
);
1450 lenerr
= BIT_GET ( rcvhdrflags
, LenErr
);
1451 parityerr
= BIT_GET ( rcvhdrflags
, ParityErr
);
1452 vcrcerr
= BIT_GET ( rcvhdrflags
, VCRCErr
);
1453 icrcerr
= BIT_GET ( rcvhdrflags
, ICRCErr
);
1454 header_len
= ( QIB7322_RECV_HEADER_SIZE
- hdrqoffset
-
1455 sizeof ( *rcvhdrflags
) );
1456 padded_payload_len
= ( pktlen
- header_len
- 4 /* ICRC */ );
1457 err
= ( iberr
| mkerr
| tiderr
| khdrerr
| mtuerr
|
1458 lenerr
| parityerr
| vcrcerr
| icrcerr
);
1459 /* IB header is placed immediately before RcvHdrFlags */
1460 iob_populate ( &headers
, ( ( ( void * ) rcvhdrflags
) - header_len
),
1461 header_len
, header_len
);
1463 /* Dump diagnostic information */
1464 DBGC2 ( qib7322
, "QIB7322 %p QPN %ld RX egr %04x%s hdr %d type %d len "
1465 "%d(%d+%d+4)%s%s%s%s%s%s%s%s%s%s%s\n", qib7322
, qp
->qpn
,
1466 egrindex
, ( useegrbfr ?
"" : "(unused)" ),
1467 ( header_offs
/ QIB7322_RECV_HEADER_SIZE
),
1468 rcvtype
, pktlen
, header_len
, padded_payload_len
,
1469 ( err ?
" [Err" : "" ), ( iberr ?
" IB" : "" ),
1470 ( mkerr ?
" MK" : "" ), ( tiderr ?
" TID" : "" ),
1471 ( khdrerr ?
" KHdr" : "" ), ( mtuerr ?
" MTU" : "" ),
1472 ( lenerr ?
" Len" : "" ), ( parityerr ?
" Parity" : ""),
1473 ( vcrcerr ?
" VCRC" : "" ), ( icrcerr ?
" ICRC" : "" ),
1474 ( err ?
"]" : "" ) );
1475 DBGCP_HDA ( qib7322
, hdrqoffset
, headers
.data
,
1476 ( header_len
+ sizeof ( *rcvhdrflags
) ) );
1478 /* Parse header to generate address vector */
1479 qp0
= ( qp
->qpn
== 0 );
1481 if ( ( rc
= ib_pull ( ibdev
, &headers
, ( qp0 ?
&intended_qp
: NULL
),
1482 &payload_len
, &dest
, &source
) ) != 0 ) {
1483 DBGC ( qib7322
, "QIB7322 %p could not parse headers: %s\n",
1484 qib7322
, strerror ( rc
) );
1487 if ( ! intended_qp
)
1490 /* Complete this buffer and any skipped buffers. Note that
1491 * when the hardware runs out of buffers, it will repeatedly
1492 * report the same buffer (the tail) as a TID error, and that
1493 * it also has a habit of sometimes skipping over several
1498 /* If we have caught up to the producer counter, stop.
1499 * This will happen when the hardware first runs out
1500 * of buffers and starts reporting TID errors against
1501 * the eager buffer it wants to use next.
1503 if ( qib7322_wq
->eager_cons
== qib7322_wq
->eager_prod
)
1506 /* If we have caught up to where we should be after
1507 * completing this egrindex, stop. We phrase the test
1508 * this way to avoid completing the entire ring when
1509 * we receive the same egrindex twice in a row.
1511 if ( ( qib7322_wq
->eager_cons
==
1512 ( ( egrindex
+ 1 ) & ( qib7322_wq
->eager_entries
- 1 ))))
1515 /* Identify work queue entry and corresponding I/O
1518 wqe_idx
= ( qib7322_wq
->eager_cons
& ( wq
->num_wqes
- 1 ) );
1519 iobuf
= wq
->iobufs
[wqe_idx
];
1520 assert ( iobuf
!= NULL
);
1521 wq
->iobufs
[wqe_idx
] = NULL
;
1523 /* Complete the eager buffer */
1524 if ( qib7322_wq
->eager_cons
== egrindex
) {
1525 /* Completing the eager buffer described in
1526 * this header entry.
1528 iob_put ( iobuf
, payload_len
);
1529 rc
= ( err ?
-EIO
: ( useegrbfr ?
0 : -ECANCELED
) );
1530 /* Redirect to target QP if necessary */
1531 if ( qp
!= intended_qp
) {
1532 DBGC2 ( qib7322
, "QIB7322 %p redirecting QPN "
1534 qib7322
, qp
->qpn
, intended_qp
->qpn
);
1535 /* Compensate for incorrect fill levels */
1537 intended_qp
->recv
.fill
++;
1539 ib_complete_recv ( ibdev
, intended_qp
, &dest
, &source
,
1542 /* Completing on a skipped-over eager buffer */
1543 ib_complete_recv ( ibdev
, qp
, &dest
, &source
, iobuf
,
1547 /* Clear eager buffer */
1548 memset ( &rcvegr
, 0, sizeof ( rcvegr
) );
1549 qib7322_writeq_array8b ( qib7322
, &rcvegr
,
1550 qib7322_wq
->eager_array
,
1551 qib7322_wq
->eager_cons
);
1553 /* Increment consumer index */
1554 qib7322_wq
->eager_cons
= ( ( qib7322_wq
->eager_cons
+ 1 ) &
1555 ( qib7322_wq
->eager_entries
- 1 ) );
1560 * Poll receive work queue
1562 * @v ibdev Infiniband device
1565 static void qib7322_poll_recv_wq ( struct ib_device
*ibdev
,
1566 struct ib_queue_pair
*qp
) {
1567 struct qib7322
*qib7322
= ib_get_drvdata ( ibdev
);
1568 struct ib_work_queue
*wq
= &qp
->recv
;
1569 struct qib7322_recv_work_queue
*qib7322_wq
= ib_wq_get_drvdata ( wq
);
1570 struct QIB_7322_RcvHdrHead0 rcvhdrhead
;
1571 unsigned int ctx
= qib7322_ctx ( ibdev
, qp
);
1572 unsigned int header_prod
;
1574 /* Check for received packets */
1575 header_prod
= ( BIT_GET ( &qib7322_wq
->header_prod
, Value
) << 2 );
1576 if ( header_prod
== qib7322_wq
->header_cons
)
1579 /* Process all received packets */
1580 while ( qib7322_wq
->header_cons
!= header_prod
) {
1582 /* Complete the receive */
1583 qib7322_complete_recv ( ibdev
, qp
, qib7322_wq
->header_cons
);
1585 /* Increment the consumer offset */
1586 qib7322_wq
->header_cons
+= QIB7322_RECV_HEADER_SIZE
;
1587 qib7322_wq
->header_cons
%= QIB7322_RECV_HEADERS_SIZE
;
1589 /* QIB7322 has only one send buffer per port for VL15,
1590 * which almost always leads to send buffer exhaustion
1591 * and dropped MADs. Mitigate this by refusing to
1592 * process more than one VL15 MAD per poll, which will
1593 * enforce interleaved TX/RX polls.
1595 if ( qp
->type
== IB_QPT_SMI
)
1599 /* Update consumer offset */
1600 memset ( &rcvhdrhead
, 0, sizeof ( rcvhdrhead
) );
1601 BIT_FILL_2 ( &rcvhdrhead
,
1602 RcvHeadPointer
, ( qib7322_wq
->header_cons
>> 2 ),
1604 qib7322_writeq_array64k ( qib7322
, &rcvhdrhead
,
1605 QIB_7322_RcvHdrHead0_offset
, ctx
);
1609 * Poll completion queue
1611 * @v ibdev Infiniband device
1612 * @v cq Completion queue
1614 static void qib7322_poll_cq ( struct ib_device
*ibdev
,
1615 struct ib_completion_queue
*cq
) {
1616 struct ib_work_queue
*wq
;
1618 /* Poll associated send and receive queues */
1619 list_for_each_entry ( wq
, &cq
->work_queues
, list
) {
1620 if ( wq
->is_send
) {
1621 qib7322_poll_send_wq ( ibdev
, wq
->qp
);
1623 qib7322_poll_recv_wq ( ibdev
, wq
->qp
);
1628 /***************************************************************************
1632 ***************************************************************************
1638 * @v ibdev Infiniband device
1640 static void qib7322_poll_eq ( struct ib_device
*ibdev
) {
1641 struct qib7322
*qib7322
= ib_get_drvdata ( ibdev
);
1642 struct QIB_7322_ErrStatus_0 errstatus
;
1643 unsigned int port
= ( ibdev
->port
- QIB7322_PORT_BASE
);
1645 /* Check for and clear status bits */
1646 DBG_DISABLE ( DBGLVL_IO
);
1647 qib7322_readq_port ( qib7322
, &errstatus
,
1648 QIB_7322_ErrStatus_0_offset
, port
);
1649 if ( errstatus
.u
.qwords
[0] ) {
1650 DBGC ( qib7322
, "QIB7322 %p port %d status %08x%08x\n", qib7322
,
1651 port
, errstatus
.u
.dwords
[1], errstatus
.u
.dwords
[0] );
1652 qib7322_writeq_port ( qib7322
, &errstatus
,
1653 QIB_7322_ErrClear_0_offset
, port
);
1655 DBG_ENABLE ( DBGLVL_IO
);
1657 /* Check for link status changes */
1658 if ( BIT_GET ( &errstatus
, IBStatusChanged
) )
1659 qib7322_link_state_changed ( ibdev
);
1662 /***************************************************************************
1664 * Infiniband link-layer operations
1666 ***************************************************************************
1670 * Determine supported link speeds
1672 * @v qib7322 QIB7322 device
1673 * @ret supported Supported link speeds
1675 static unsigned int qib7322_link_speed_supported ( struct qib7322
*qib7322
,
1676 unsigned int port
) {
1677 struct QIB_7322_feature_mask features
;
1678 struct QIB_7322_Revision revision
;
1679 unsigned int supported
;
1680 unsigned int boardid
;
1682 /* Read the active feature mask */
1683 qib7322_readq ( qib7322
, &features
,
1684 QIB_7322_active_feature_mask_offset
);
1687 supported
= BIT_GET ( &features
, Port0_Link_Speed_Supported
);
1690 supported
= BIT_GET ( &features
, Port1_Link_Speed_Supported
);
1693 DBGC ( qib7322
, "QIB7322 %p port %d is invalid\n",
1699 /* Apply hacks for specific board IDs */
1700 qib7322_readq ( qib7322
, &revision
, QIB_7322_Revision_offset
);
1701 boardid
= BIT_GET ( &revision
, BoardID
);
1702 switch ( boardid
) {
1703 case QIB7322_BOARD_QMH7342
:
1704 DBGC2 ( qib7322
, "QIB7322 %p is a QMH7342; forcing QDR-only\n",
1706 supported
= IB_LINK_SPEED_QDR
;
1713 DBGC2 ( qib7322
, "QIB7322 %p port %d %s%s%s%s\n", qib7322
, port
,
1714 ( supported ?
"supports" : "disabled" ),
1715 ( ( supported
& IB_LINK_SPEED_SDR
) ?
" SDR" : "" ),
1716 ( ( supported
& IB_LINK_SPEED_DDR
) ?
" DDR" : "" ),
1717 ( ( supported
& IB_LINK_SPEED_QDR
) ?
" QDR" : "" ) );
1722 * Initialise Infiniband link
1724 * @v ibdev Infiniband device
1725 * @ret rc Return status code
1727 static int qib7322_open ( struct ib_device
*ibdev
) {
1728 struct qib7322
*qib7322
= ib_get_drvdata ( ibdev
);
1729 struct QIB_7322_IBCCtrlA_0 ibcctrla
;
1730 unsigned int port
= ( ibdev
->port
- QIB7322_PORT_BASE
);
1733 qib7322_readq_port ( qib7322
, &ibcctrla
,
1734 QIB_7322_IBCCtrlA_0_offset
, port
);
1735 BIT_SET ( &ibcctrla
, IBLinkEn
, 1 );
1736 qib7322_writeq_port ( qib7322
, &ibcctrla
,
1737 QIB_7322_IBCCtrlA_0_offset
, port
);
1743 * Close Infiniband link
1745 * @v ibdev Infiniband device
1747 static void qib7322_close ( struct ib_device
*ibdev
) {
1748 struct qib7322
*qib7322
= ib_get_drvdata ( ibdev
);
1749 struct QIB_7322_IBCCtrlA_0 ibcctrla
;
1750 unsigned int port
= ( ibdev
->port
- QIB7322_PORT_BASE
);
1753 qib7322_readq_port ( qib7322
, &ibcctrla
,
1754 QIB_7322_IBCCtrlA_0_offset
, port
);
1755 BIT_SET ( &ibcctrla
, IBLinkEn
, 0 );
1756 qib7322_writeq_port ( qib7322
, &ibcctrla
,
1757 QIB_7322_IBCCtrlA_0_offset
, port
);
1760 /***************************************************************************
1762 * Multicast group operations
1764 ***************************************************************************
1768 * Attach to multicast group
1770 * @v ibdev Infiniband device
1772 * @v gid Multicast GID
1773 * @ret rc Return status code
1775 static int qib7322_mcast_attach ( struct ib_device
*ibdev
,
1776 struct ib_queue_pair
*qp
,
1777 union ib_gid
*gid
) {
1778 struct qib7322
*qib7322
= ib_get_drvdata ( ibdev
);
1787 * Detach from multicast group
1789 * @v ibdev Infiniband device
1791 * @v gid Multicast GID
1793 static void qib7322_mcast_detach ( struct ib_device
*ibdev
,
1794 struct ib_queue_pair
*qp
,
1795 union ib_gid
*gid
) {
1796 struct qib7322
*qib7322
= ib_get_drvdata ( ibdev
);
1803 /** QIB7322 Infiniband operations */
1804 static struct ib_device_operations qib7322_ib_operations
= {
1805 .create_cq
= qib7322_create_cq
,
1806 .destroy_cq
= qib7322_destroy_cq
,
1807 .create_qp
= qib7322_create_qp
,
1808 .modify_qp
= qib7322_modify_qp
,
1809 .destroy_qp
= qib7322_destroy_qp
,
1810 .post_send
= qib7322_post_send
,
1811 .post_recv
= qib7322_post_recv
,
1812 .poll_cq
= qib7322_poll_cq
,
1813 .poll_eq
= qib7322_poll_eq
,
1814 .open
= qib7322_open
,
1815 .close
= qib7322_close
,
1816 .mcast_attach
= qib7322_mcast_attach
,
1817 .mcast_detach
= qib7322_mcast_detach
,
1818 .set_port_info
= qib7322_set_port_info
,
1819 .set_pkey_table
= qib7322_set_pkey_table
,
1822 /***************************************************************************
1824 * I2C bus operations
1826 ***************************************************************************
1829 /** QIB7322 I2C bit to GPIO mappings */
1830 static unsigned int qib7322_i2c_bits
[] = {
1831 [I2C_BIT_SCL
] = ( 1 << QIB7322_GPIO_SCL
),
1832 [I2C_BIT_SDA
] = ( 1 << QIB7322_GPIO_SDA
),
1836 * Read QIB7322 I2C line status
1838 * @v basher Bit-bashing interface
1839 * @v bit_id Bit number
1840 * @ret zero Input is a logic 0
1841 * @ret non-zero Input is a logic 1
1843 static int qib7322_i2c_read_bit ( struct bit_basher
*basher
,
1844 unsigned int bit_id
) {
1845 struct qib7322
*qib7322
=
1846 container_of ( basher
, struct qib7322
, i2c
.basher
);
1847 struct QIB_7322_EXTStatus extstatus
;
1848 unsigned int status
;
1850 DBG_DISABLE ( DBGLVL_IO
);
1852 qib7322_readq ( qib7322
, &extstatus
, QIB_7322_EXTStatus_offset
);
1853 status
= ( BIT_GET ( &extstatus
, GPIOIn
) & qib7322_i2c_bits
[bit_id
] );
1855 DBG_ENABLE ( DBGLVL_IO
);
1861 * Write QIB7322 I2C line status
1863 * @v basher Bit-bashing interface
1864 * @v bit_id Bit number
1865 * @v data Value to write
1867 static void qib7322_i2c_write_bit ( struct bit_basher
*basher
,
1868 unsigned int bit_id
, unsigned long data
) {
1869 struct qib7322
*qib7322
=
1870 container_of ( basher
, struct qib7322
, i2c
.basher
);
1871 struct QIB_7322_EXTCtrl extctrl
;
1872 struct QIB_7322_GPIO gpioout
;
1873 unsigned int bit
= qib7322_i2c_bits
[bit_id
];
1874 unsigned int outputs
= 0;
1875 unsigned int output_enables
= 0;
1877 DBG_DISABLE ( DBGLVL_IO
);
1879 /* Read current GPIO mask and outputs */
1880 qib7322_readq ( qib7322
, &extctrl
, QIB_7322_EXTCtrl_offset
);
1881 qib7322_readq ( qib7322
, &gpioout
, QIB_7322_GPIOOut_offset
);
1883 /* Update outputs and output enables. I2C lines are tied
1884 * high, so we always set the output to 0 and use the output
1885 * enable to control the line.
1887 output_enables
= BIT_GET ( &extctrl
, GPIOOe
);
1888 output_enables
= ( ( output_enables
& ~bit
) | ( ~data
& bit
) );
1889 outputs
= BIT_GET ( &gpioout
, GPIO
);
1890 outputs
= ( outputs
& ~bit
);
1891 BIT_SET ( &extctrl
, GPIOOe
, output_enables
);
1892 BIT_SET ( &gpioout
, GPIO
, outputs
);
1894 /* Write the output enable first; that way we avoid logic
1897 qib7322_writeq ( qib7322
, &extctrl
, QIB_7322_EXTCtrl_offset
);
1898 qib7322_writeq ( qib7322
, &gpioout
, QIB_7322_GPIOOut_offset
);
1901 DBG_ENABLE ( DBGLVL_IO
);
1904 /** QIB7322 I2C bit-bashing interface operations */
1905 static struct bit_basher_operations qib7322_i2c_basher_ops
= {
1906 .read
= qib7322_i2c_read_bit
,
1907 .write
= qib7322_i2c_write_bit
,
1911 * Initialise QIB7322 I2C subsystem
1913 * @v qib7322 QIB7322 device
1914 * @ret rc Return status code
1916 static int qib7322_init_i2c ( struct qib7322
*qib7322
) {
1917 static int try_eeprom_address
[] = { 0x51, 0x50 };
1921 /* Initialise bus */
1922 if ( ( rc
= init_i2c_bit_basher ( &qib7322
->i2c
,
1923 &qib7322_i2c_basher_ops
) ) != 0 ) {
1924 DBGC ( qib7322
, "QIB7322 %p could not initialise I2C bus: %s\n",
1925 qib7322
, strerror ( rc
) );
1929 /* Probe for devices */
1930 for ( i
= 0 ; i
< ( sizeof ( try_eeprom_address
) /
1931 sizeof ( try_eeprom_address
[0] ) ) ; i
++ ) {
1932 init_i2c_eeprom ( &qib7322
->eeprom
, try_eeprom_address
[i
] );
1933 if ( ( rc
= i2c_check_presence ( &qib7322
->i2c
.i2c
,
1934 &qib7322
->eeprom
) ) == 0 ) {
1935 DBGC2 ( qib7322
, "QIB7322 %p found EEPROM at %02x\n",
1936 qib7322
, try_eeprom_address
[i
] );
1941 DBGC ( qib7322
, "QIB7322 %p could not find EEPROM\n", qib7322
);
1946 * Read EEPROM parameters
1948 * @v qib7322 QIB7322 device
1949 * @ret rc Return status code
1951 static int qib7322_read_eeprom ( struct qib7322
*qib7322
) {
1952 struct i2c_interface
*i2c
= &qib7322
->i2c
.i2c
;
1953 union ib_guid
*guid
= &qib7322
->guid
;
1957 if ( ( rc
= i2c
->read ( i2c
, &qib7322
->eeprom
,
1958 QIB7322_EEPROM_GUID_OFFSET
, guid
->bytes
,
1959 sizeof ( *guid
) ) ) != 0 ) {
1960 DBGC ( qib7322
, "QIB7322 %p could not read GUID: %s\n",
1961 qib7322
, strerror ( rc
) );
1964 DBGC2 ( qib7322
, "QIB7322 %p has GUID " IB_GUID_FMT
"\n",
1965 qib7322
, IB_GUID_ARGS ( guid
) );
1967 /* Read serial number (debug only) */
1969 uint8_t serial
[QIB7322_EEPROM_SERIAL_SIZE
+ 1];
1971 serial
[ sizeof ( serial
) - 1 ] = '\0';
1972 if ( ( rc
= i2c
->read ( i2c
, &qib7322
->eeprom
,
1973 QIB7322_EEPROM_SERIAL_OFFSET
, serial
,
1974 ( sizeof ( serial
) - 1 ) ) ) != 0 ) {
1975 DBGC ( qib7322
, "QIB7322 %p could not read serial: "
1976 "%s\n", qib7322
, strerror ( rc
) );
1979 DBGC2 ( qib7322
, "QIB7322 %p has serial number \"%s\"\n",
1986 /***************************************************************************
1988 * Advanced High-performance Bus (AHB) access
1990 ***************************************************************************
1994 * Wait for AHB transaction to complete
1996 * @v qib7322 QIB7322 device
1997 * @ret rc Return status code
1999 static int qib7322_ahb_wait ( struct qib7322
*qib7322
) {
2000 struct QIB_7322_ahb_transaction_reg transaction
;
2003 /* Wait for Ready bit to be asserted */
2004 for ( i
= 0 ; i
< QIB7322_AHB_MAX_WAIT_US
; i
++ ) {
2005 qib7322_readq ( qib7322
, &transaction
,
2006 QIB_7322_ahb_transaction_reg_offset
);
2007 if ( BIT_GET ( &transaction
, ahb_rdy
) )
2012 DBGC ( qib7322
, "QIB7322 %p timed out waiting for AHB transaction\n",
2018 * Request ownership of the AHB
2020 * @v qib7322 QIB7322 device
2021 * @v location AHB location
2022 * @ret rc Return status code
2024 static int qib7322_ahb_request ( struct qib7322
*qib7322
,
2025 unsigned int location
) {
2026 struct QIB_7322_ahb_access_ctrl access
;
2029 /* Request ownership */
2030 memset ( &access
, 0, sizeof ( access
) );
2031 BIT_FILL_2 ( &access
,
2033 sw_sel_ahb_trgt
, QIB7322_AHB_LOC_TARGET ( location
) );
2034 qib7322_writeq ( qib7322
, &access
, QIB_7322_ahb_access_ctrl_offset
);
2036 /* Wait for ownership to be granted */
2037 if ( ( rc
= qib7322_ahb_wait ( qib7322
) ) != 0 ) {
2038 DBGC ( qib7322
, "QIB7322 %p could not obtain AHB ownership: "
2039 "%s\n", qib7322
, strerror ( rc
) );
2047 * Release ownership of the AHB
2049 * @v qib7322 QIB7322 device
2051 static void qib7322_ahb_release ( struct qib7322
*qib7322
) {
2052 struct QIB_7322_ahb_access_ctrl access
;
2054 memset ( &access
, 0, sizeof ( access
) );
2055 qib7322_writeq ( qib7322
, &access
, QIB_7322_ahb_access_ctrl_offset
);
2061 * @v qib7322 QIB7322 device
2062 * @v location AHB location
2063 * @v data Data to read
2064 * @ret rc Return status code
2066 * You must have already acquired ownership of the AHB.
2068 static int qib7322_ahb_read ( struct qib7322
*qib7322
, unsigned int location
,
2070 struct QIB_7322_ahb_transaction_reg xact
;
2073 /* Avoid returning uninitialised data on error */
2076 /* Initiate transaction */
2077 memset ( &xact
, 0, sizeof ( xact
) );
2079 ahb_address
, QIB7322_AHB_LOC_ADDRESS ( location
),
2080 write_not_read
, 0 );
2081 qib7322_writeq ( qib7322
, &xact
, QIB_7322_ahb_transaction_reg_offset
);
2083 /* Wait for transaction to complete */
2084 if ( ( rc
= qib7322_ahb_wait ( qib7322
) ) != 0 )
2087 /* Read transaction data */
2088 qib7322_readq ( qib7322
, &xact
, QIB_7322_ahb_transaction_reg_offset
);
2089 *data
= BIT_GET ( &xact
, ahb_data
);
2094 * Write data via AHB
2096 * @v qib7322 QIB7322 device
2097 * @v location AHB location
2098 * @v data Data to write
2099 * @ret rc Return status code
2101 * You must have already acquired ownership of the AHB.
2103 static int qib7322_ahb_write ( struct qib7322
*qib7322
, unsigned int location
,
2105 struct QIB_7322_ahb_transaction_reg xact
;
2108 /* Initiate transaction */
2109 memset ( &xact
, 0, sizeof ( xact
) );
2111 ahb_address
, QIB7322_AHB_LOC_ADDRESS ( location
),
2114 qib7322_writeq ( qib7322
, &xact
, QIB_7322_ahb_transaction_reg_offset
);
2116 /* Wait for transaction to complete */
2117 if ( ( rc
= qib7322_ahb_wait ( qib7322
) ) != 0 )
2124 * Read/modify/write AHB register
2126 * @v qib7322 QIB7322 device
2127 * @v location AHB location
2128 * @v value Value to set
2129 * @v mask Mask to apply to old value
2130 * @ret rc Return status code
2132 static int qib7322_ahb_mod_reg ( struct qib7322
*qib7322
, unsigned int location
,
2133 uint32_t value
, uint32_t mask
) {
2138 DBG_DISABLE ( DBGLVL_IO
);
2141 assert ( ( value
& mask
) == value
);
2143 /* Acquire bus ownership */
2144 if ( ( rc
= qib7322_ahb_request ( qib7322
, location
) ) != 0 )
2147 /* Read existing value */
2148 if ( ( rc
= qib7322_ahb_read ( qib7322
, location
, &old_value
) ) != 0 )
2152 new_value
= ( ( old_value
& ~mask
) | value
);
2153 DBGCP ( qib7322
, "QIB7322 %p AHB %x %#08x => %#08x\n",
2154 qib7322
, location
, old_value
, new_value
);
2155 if ( ( rc
= qib7322_ahb_write ( qib7322
, location
, new_value
) ) != 0 )
2160 qib7322_ahb_release ( qib7322
);
2162 DBG_ENABLE ( DBGLVL_IO
);
2167 * Read/modify/write AHB register across all ports and channels
2169 * @v qib7322 QIB7322 device
2170 * @v reg AHB register
2171 * @v value Value to set
2172 * @v mask Mask to apply to old value
2173 * @ret rc Return status code
2175 static int qib7322_ahb_mod_reg_all ( struct qib7322
*qib7322
, unsigned int reg
,
2176 uint32_t value
, uint32_t mask
) {
2178 unsigned int channel
;
2179 unsigned int location
;
2182 for ( port
= 0 ; port
< QIB7322_MAX_PORTS
; port
++ ) {
2183 for ( channel
= 0 ; channel
< QIB7322_MAX_WIDTH
; channel
++ ) {
2184 location
= QIB7322_AHB_LOCATION ( port
, channel
, reg
);
2185 if ( ( rc
= qib7322_ahb_mod_reg ( qib7322
, location
,
2186 value
, mask
) ) != 0 )
2193 /***************************************************************************
2195 * Infiniband SerDes initialisation
2197 ***************************************************************************
2201 * Initialise the IB SerDes
2203 * @v qib7322 QIB7322 device
2204 * @ret rc Return status code
2206 static int qib7322_init_ib_serdes ( struct qib7322
*qib7322
) {
2207 struct QIB_7322_IBCCtrlA_0 ibcctrla
;
2208 struct QIB_7322_IBCCtrlB_0 ibcctrlb
;
2209 struct QIB_7322_IBPCSConfig_0 ibpcsconfig
;
2211 /* Configure sensible defaults for IBC */
2212 memset ( &ibcctrla
, 0, sizeof ( ibcctrla
) );
2213 BIT_FILL_5 ( &ibcctrla
, /* Tuning values taken from Linux driver */
2214 FlowCtrlPeriod
, 0x03,
2215 FlowCtrlWaterMark
, 0x05,
2216 MaxPktLen
, ( ( QIB7322_RECV_HEADER_SIZE
+
2217 QIB7322_RECV_PAYLOAD_SIZE
+
2218 4 /* ICRC */ ) >> 2 ),
2219 PhyerrThreshold
, 0xf,
2220 OverrunThreshold
, 0xf );
2221 qib7322_writeq ( qib7322
, &ibcctrla
, QIB_7322_IBCCtrlA_0_offset
);
2222 qib7322_writeq ( qib7322
, &ibcctrla
, QIB_7322_IBCCtrlA_1_offset
);
2224 /* Force SDR only to avoid needing all the DDR tuning,
2225 * Mellanox compatibility hacks etc. SDR is plenty for
2226 * boot-time operation.
2228 qib7322_readq ( qib7322
, &ibcctrlb
, QIB_7322_IBCCtrlB_0_offset
);
2229 BIT_SET ( &ibcctrlb
, IB_ENHANCED_MODE
, 0 );
2230 BIT_SET ( &ibcctrlb
, SD_SPEED_SDR
, 1 );
2231 BIT_SET ( &ibcctrlb
, SD_SPEED_DDR
, 0 );
2232 BIT_SET ( &ibcctrlb
, SD_SPEED_QDR
, 0 );
2233 BIT_SET ( &ibcctrlb
, IB_NUM_CHANNELS
, 1 ); /* 4X only */
2234 BIT_SET ( &ibcctrlb
, IB_LANE_REV_SUPPORTED
, 0 );
2235 BIT_SET ( &ibcctrlb
, HRTBT_ENB
, 0 );
2236 BIT_SET ( &ibcctrlb
, HRTBT_AUTO
, 0 );
2237 qib7322_writeq ( qib7322
, &ibcctrlb
, QIB_7322_IBCCtrlB_0_offset
);
2238 qib7322_writeq ( qib7322
, &ibcctrlb
, QIB_7322_IBCCtrlB_1_offset
);
2241 qib7322_ahb_mod_reg_all ( qib7322
, 2, 0, 0x00000e00UL
);
2243 /* Bring XGXS out of reset */
2244 memset ( &ibpcsconfig
, 0, sizeof ( ibpcsconfig
) );
2245 qib7322_writeq ( qib7322
, &ibpcsconfig
, QIB_7322_IBPCSConfig_0_offset
);
2246 qib7322_writeq ( qib7322
, &ibpcsconfig
, QIB_7322_IBPCSConfig_1_offset
);
2251 /***************************************************************************
2253 * PCI layer interface
2255 ***************************************************************************
2261 * @v qib7322 QIB7322 device
2263 * @ret rc Return status code
2265 static void qib7322_reset ( struct qib7322
*qib7322
, struct pci_device
*pci
) {
2266 struct QIB_7322_Control control
;
2267 struct pci_config_backup backup
;
2269 /* Back up PCI configuration space */
2270 pci_backup ( pci
, &backup
, NULL
);
2273 memset ( &control
, 0, sizeof ( control
) );
2274 BIT_FILL_1 ( &control
, SyncReset
, 1 );
2275 qib7322_writeq ( qib7322
, &control
, QIB_7322_Control_offset
);
2277 /* Wait for reset to complete */
2280 /* Restore PCI configuration space */
2281 pci_restore ( pci
, &backup
, NULL
);
2289 * @ret rc Return status code
2291 static int qib7322_probe ( struct pci_device
*pci
) {
2292 struct qib7322
*qib7322
;
2293 struct QIB_7322_Revision revision
;
2294 struct ib_device
*ibdev
;
2295 unsigned int link_speed_supported
;
2299 /* Allocate QIB7322 device */
2300 qib7322
= zalloc ( sizeof ( *qib7322
) );
2303 goto err_alloc_qib7322
;
2305 pci_set_drvdata ( pci
, qib7322
);
2307 /* Fix up PCI device */
2308 adjust_pci_device ( pci
);
2311 qib7322
->regs
= ioremap ( pci
->membase
, QIB7322_BAR0_SIZE
);
2312 DBGC2 ( qib7322
, "QIB7322 %p has BAR at %08lx\n",
2313 qib7322
, pci
->membase
);
2316 qib7322_reset ( qib7322
, pci
);
2318 /* Print some general data */
2319 qib7322_readq ( qib7322
, &revision
, QIB_7322_Revision_offset
);
2320 DBGC2 ( qib7322
, "QIB7322 %p board %02lx v%ld.%ld.%ld.%ld\n", qib7322
,
2321 BIT_GET ( &revision
, BoardID
),
2322 BIT_GET ( &revision
, R_SW
),
2323 BIT_GET ( &revision
, R_Arch
),
2324 BIT_GET ( &revision
, R_ChipRevMajor
),
2325 BIT_GET ( &revision
, R_ChipRevMinor
) );
2327 /* Initialise I2C subsystem */
2328 if ( ( rc
= qib7322_init_i2c ( qib7322
) ) != 0 )
2331 /* Read EEPROM parameters */
2332 if ( ( rc
= qib7322_read_eeprom ( qib7322
) ) != 0 )
2333 goto err_read_eeprom
;
2335 /* Initialise send datapath */
2336 if ( ( rc
= qib7322_init_send ( qib7322
) ) != 0 )
2339 /* Initialise receive datapath */
2340 if ( ( rc
= qib7322_init_recv ( qib7322
) ) != 0 )
2343 /* Initialise the IB SerDes */
2344 if ( ( rc
= qib7322_init_ib_serdes ( qib7322
) ) != 0 )
2345 goto err_init_ib_serdes
;
2347 /* Allocate Infiniband devices */
2348 for ( i
= 0 ; i
< QIB7322_MAX_PORTS
; i
++ ) {
2349 link_speed_supported
=
2350 qib7322_link_speed_supported ( qib7322
, i
);
2351 if ( ! link_speed_supported
)
2353 ibdev
= alloc_ibdev ( 0 );
2356 goto err_alloc_ibdev
;
2358 qib7322
->ibdev
[i
] = ibdev
;
2359 ibdev
->dev
= &pci
->dev
;
2360 ibdev
->op
= &qib7322_ib_operations
;
2361 ibdev
->port
= ( QIB7322_PORT_BASE
+ i
);
2362 ibdev
->link_width_enabled
= ibdev
->link_width_supported
=
2363 IB_LINK_WIDTH_4X
; /* 1x does not work */
2364 ibdev
->link_speed_enabled
= ibdev
->link_speed_supported
=
2365 IB_LINK_SPEED_SDR
; /* to avoid need for link tuning */
2366 memcpy ( &ibdev
->node_guid
, &qib7322
->guid
,
2367 sizeof ( ibdev
->node_guid
) );
2368 memcpy ( &ibdev
->gid
.s
.guid
, &qib7322
->guid
,
2369 sizeof ( ibdev
->gid
.s
.guid
) );
2370 assert ( ( ibdev
->gid
.s
.guid
.bytes
[7] & i
) == 0 );
2371 ibdev
->gid
.s
.guid
.bytes
[7] |= i
;
2372 ib_set_drvdata ( ibdev
, qib7322
);
2375 /* Register Infiniband devices */
2376 for ( i
= 0 ; i
< QIB7322_MAX_PORTS
; i
++ ) {
2377 if ( ! qib7322
->ibdev
[i
] )
2379 if ( ( rc
= register_ibdev ( qib7322
->ibdev
[i
] ) ) != 0 ) {
2380 DBGC ( qib7322
, "QIB7322 %p port %d could not register "
2381 "IB device: %s\n", qib7322
, i
, strerror ( rc
) );
2382 goto err_register_ibdev
;
2388 i
= QIB7322_MAX_PORTS
;
2390 for ( i
-- ; i
>= 0 ; i
-- ) {
2391 if ( qib7322
->ibdev
[i
] )
2392 unregister_ibdev ( qib7322
->ibdev
[i
] );
2394 i
= QIB7322_MAX_PORTS
;
2396 for ( i
-- ; i
>= 0 ; i
-- )
2397 ibdev_put ( qib7322
->ibdev
[i
] );
2399 qib7322_fini_send ( qib7322
);
2401 qib7322_fini_recv ( qib7322
);
2415 static void qib7322_remove ( struct pci_device
*pci
) {
2416 struct qib7322
*qib7322
= pci_get_drvdata ( pci
);
2419 for ( i
= ( QIB7322_MAX_PORTS
- 1 ) ; i
>= 0 ; i
-- ) {
2420 if ( qib7322
->ibdev
[i
] )
2421 unregister_ibdev ( qib7322
->ibdev
[i
] );
2423 for ( i
= ( QIB7322_MAX_PORTS
- 1 ) ; i
>= 0 ; i
-- )
2424 ibdev_put ( qib7322
->ibdev
[i
] );
2425 qib7322_fini_send ( qib7322
);
2426 qib7322_fini_recv ( qib7322
);
2430 static struct pci_device_id qib7322_nics
[] = {
2431 PCI_ROM ( 0x1077, 0x7322, "iba7322", "IBA7322 QDR InfiniBand HCA", 0 ),
2434 struct pci_driver qib7322_driver __pci_driver
= {
2435 .ids
= qib7322_nics
,
2436 .id_count
= ( sizeof ( qib7322_nics
) / sizeof ( qib7322_nics
[0] ) ),
2437 .probe
= qib7322_probe
,
2438 .remove
= qib7322_remove
,