2 * Copyright (C) 2008 Michael Brown <mbrown@fensystems.co.uk>.
3 * Copyright (C) 2008 Mellanox Technologies Ltd.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation; either version 2 of the
8 * License, or any later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 FILE_LICENCE ( GPL2_OR_LATER
);
33 #include <ipxe/pcibackup.h>
34 #include <ipxe/malloc.h>
35 #include <ipxe/umalloc.h>
36 #include <ipxe/iobuf.h>
37 #include <ipxe/netdevice.h>
38 #include <ipxe/infiniband.h>
39 #include <ipxe/ib_smc.h>
40 #include <ipxe/if_ether.h>
41 #include <ipxe/ethernet.h>
42 #include <ipxe/fcoe.h>
43 #include <ipxe/vlan.h>
44 #include <ipxe/bofm.h>
45 #include <ipxe/nvsvpd.h>
52 * Mellanox Hermon Infiniband HCA
56 /***************************************************************************
58 * Queue number allocation
60 ***************************************************************************
64 * Allocate offsets within usage bitmask
66 * @v bits Usage bitmask
67 * @v bits_len Length of usage bitmask
68 * @v num_bits Number of contiguous bits to allocate within bitmask
69 * @ret bit First free bit within bitmask, or negative error
71 static int hermon_bitmask_alloc ( hermon_bitmask_t
*bits
,
72 unsigned int bits_len
,
73 unsigned int num_bits
) {
75 hermon_bitmask_t mask
= 1;
76 unsigned int found
= 0;
78 /* Search bits for num_bits contiguous free bits */
79 while ( bit
< bits_len
) {
80 if ( ( mask
& *bits
) == 0 ) {
81 if ( ++found
== num_bits
)
87 mask
= ( mask
<< 1 ) | ( mask
>> ( 8 * sizeof ( mask
) - 1 ) );
94 /* Mark bits as in-use */
99 mask
= ( mask
>> 1 ) | ( mask
<< ( 8 * sizeof ( mask
) - 1 ) );
102 return ( bit
- num_bits
+ 1 );
106 * Free offsets within usage bitmask
108 * @v bits Usage bitmask
109 * @v bit Starting bit within bitmask
110 * @v num_bits Number of contiguous bits to free within bitmask
112 static void hermon_bitmask_free ( hermon_bitmask_t
*bits
,
113 int bit
, unsigned int num_bits
) {
114 hermon_bitmask_t mask
;
116 for ( ; num_bits
; bit
++, num_bits
-- ) {
117 mask
= ( 1 << ( bit
% ( 8 * sizeof ( mask
) ) ) );
118 bits
[ ( bit
/ ( 8 * sizeof ( mask
) ) ) ] &= ~mask
;
122 /***************************************************************************
126 ***************************************************************************
130 * Wait for Hermon command completion
132 * @v hermon Hermon device
133 * @v hcr HCA command registers
134 * @ret rc Return status code
136 static int hermon_cmd_wait ( struct hermon
*hermon
,
137 struct hermonprm_hca_command_register
*hcr
) {
140 for ( wait
= HERMON_HCR_MAX_WAIT_MS
; wait
; wait
-- ) {
142 readl ( hermon
->config
+ HERMON_HCR_REG ( 6 ) );
143 if ( ( MLX_GET ( hcr
, go
) == 0 ) &&
144 ( MLX_GET ( hcr
, t
) == hermon
->toggle
) )
154 * @v hermon Hermon device
155 * @v command Command opcode, flags and input/output lengths
156 * @v op_mod Opcode modifier (0 if no modifier applicable)
157 * @v in Input parameters
158 * @v in_mod Input modifier (0 if no modifier applicable)
159 * @v out Output parameters
160 * @ret rc Return status code
162 static int hermon_cmd ( struct hermon
*hermon
, unsigned long command
,
163 unsigned int op_mod
, const void *in
,
164 unsigned int in_mod
, void *out
) {
165 struct hermonprm_hca_command_register hcr
;
166 unsigned int opcode
= HERMON_HCR_OPCODE ( command
);
167 size_t in_len
= HERMON_HCR_IN_LEN ( command
);
168 size_t out_len
= HERMON_HCR_OUT_LEN ( command
);
175 assert ( in_len
<= HERMON_MBOX_SIZE
);
176 assert ( out_len
<= HERMON_MBOX_SIZE
);
178 DBGC2 ( hermon
, "Hermon %p command %02x in %zx%s out %zx%s\n",
179 hermon
, opcode
, in_len
,
180 ( ( command
& HERMON_HCR_IN_MBOX
) ?
"(mbox)" : "" ), out_len
,
181 ( ( command
& HERMON_HCR_OUT_MBOX
) ?
"(mbox)" : "" ) );
183 /* Check that HCR is free */
184 if ( ( rc
= hermon_cmd_wait ( hermon
, &hcr
) ) != 0 ) {
185 DBGC ( hermon
, "Hermon %p command interface locked\n",
190 /* Flip HCR toggle */
191 hermon
->toggle
= ( 1 - hermon
->toggle
);
194 memset ( &hcr
, 0, sizeof ( hcr
) );
195 in_buffer
= &hcr
.u
.dwords
[0];
196 if ( in_len
&& ( command
& HERMON_HCR_IN_MBOX
) ) {
197 memset ( hermon
->mailbox_in
, 0, HERMON_MBOX_SIZE
);
198 in_buffer
= hermon
->mailbox_in
;
199 MLX_FILL_H ( &hcr
, 0, in_param_h
, virt_to_bus ( in_buffer
) );
200 MLX_FILL_1 ( &hcr
, 1, in_param_l
, virt_to_bus ( in_buffer
) );
202 memcpy ( in_buffer
, in
, in_len
);
203 MLX_FILL_1 ( &hcr
, 2, input_modifier
, in_mod
);
204 out_buffer
= &hcr
.u
.dwords
[3];
205 if ( out_len
&& ( command
& HERMON_HCR_OUT_MBOX
) ) {
206 out_buffer
= hermon
->mailbox_out
;
207 MLX_FILL_H ( &hcr
, 3, out_param_h
,
208 virt_to_bus ( out_buffer
) );
209 MLX_FILL_1 ( &hcr
, 4, out_param_l
,
210 virt_to_bus ( out_buffer
) );
212 MLX_FILL_4 ( &hcr
, 6,
214 opcode_modifier
, op_mod
,
217 DBGC ( hermon
, "Hermon %p issuing command %04x\n",
219 DBGC2_HDA ( hermon
, virt_to_phys ( hermon
->config
+ HERMON_HCR_BASE
),
220 &hcr
, sizeof ( hcr
) );
221 if ( in_len
&& ( command
& HERMON_HCR_IN_MBOX
) ) {
222 DBGC2 ( hermon
, "Input mailbox:\n" );
223 DBGC2_HDA ( hermon
, virt_to_phys ( in_buffer
), in_buffer
,
224 ( ( in_len
< 512 ) ? in_len
: 512 ) );
228 for ( i
= 0 ; i
< ( sizeof ( hcr
) / sizeof ( hcr
.u
.dwords
[0] ) ) ;
230 writel ( hcr
.u
.dwords
[i
],
231 hermon
->config
+ HERMON_HCR_REG ( i
) );
235 /* Wait for command completion */
236 if ( ( rc
= hermon_cmd_wait ( hermon
, &hcr
) ) != 0 ) {
237 DBGC ( hermon
, "Hermon %p timed out waiting for command:\n",
240 virt_to_phys ( hermon
->config
+ HERMON_HCR_BASE
),
241 &hcr
, sizeof ( hcr
) );
245 /* Check command status */
246 status
= MLX_GET ( &hcr
, status
);
248 DBGC ( hermon
, "Hermon %p command failed with status %02x:\n",
251 virt_to_phys ( hermon
->config
+ HERMON_HCR_BASE
),
252 &hcr
, sizeof ( hcr
) );
256 /* Read output parameters, if any */
257 hcr
.u
.dwords
[3] = readl ( hermon
->config
+ HERMON_HCR_REG ( 3 ) );
258 hcr
.u
.dwords
[4] = readl ( hermon
->config
+ HERMON_HCR_REG ( 4 ) );
259 memcpy ( out
, out_buffer
, out_len
);
261 DBGC2 ( hermon
, "Output%s:\n",
262 ( command
& HERMON_HCR_OUT_MBOX
) ?
" mailbox" : "" );
263 DBGC2_HDA ( hermon
, virt_to_phys ( out_buffer
), out_buffer
,
264 ( ( out_len
< 512 ) ? out_len
: 512 ) );
271 hermon_cmd_query_dev_cap ( struct hermon
*hermon
,
272 struct hermonprm_query_dev_cap
*dev_cap
) {
273 return hermon_cmd ( hermon
,
274 HERMON_HCR_OUT_CMD ( HERMON_HCR_QUERY_DEV_CAP
,
275 1, sizeof ( *dev_cap
) ),
276 0, NULL
, 0, dev_cap
);
280 hermon_cmd_query_fw ( struct hermon
*hermon
, struct hermonprm_query_fw
*fw
) {
281 return hermon_cmd ( hermon
,
282 HERMON_HCR_OUT_CMD ( HERMON_HCR_QUERY_FW
,
288 hermon_cmd_init_hca ( struct hermon
*hermon
,
289 const struct hermonprm_init_hca
*init_hca
) {
290 return hermon_cmd ( hermon
,
291 HERMON_HCR_IN_CMD ( HERMON_HCR_INIT_HCA
,
292 1, sizeof ( *init_hca
) ),
293 0, init_hca
, 0, NULL
);
297 hermon_cmd_close_hca ( struct hermon
*hermon
) {
298 return hermon_cmd ( hermon
,
299 HERMON_HCR_VOID_CMD ( HERMON_HCR_CLOSE_HCA
),
304 hermon_cmd_init_port ( struct hermon
*hermon
, unsigned int port
) {
305 return hermon_cmd ( hermon
,
306 HERMON_HCR_VOID_CMD ( HERMON_HCR_INIT_PORT
),
307 0, NULL
, port
, NULL
);
311 hermon_cmd_close_port ( struct hermon
*hermon
, unsigned int port
) {
312 return hermon_cmd ( hermon
,
313 HERMON_HCR_VOID_CMD ( HERMON_HCR_CLOSE_PORT
),
314 0, NULL
, port
, NULL
);
318 hermon_cmd_set_port ( struct hermon
*hermon
, int is_ethernet
,
319 unsigned int port_selector
,
320 const union hermonprm_set_port
*set_port
) {
321 return hermon_cmd ( hermon
,
322 HERMON_HCR_IN_CMD ( HERMON_HCR_SET_PORT
,
323 1, sizeof ( *set_port
) ),
324 is_ethernet
, set_port
, port_selector
, NULL
);
328 hermon_cmd_sw2hw_mpt ( struct hermon
*hermon
, unsigned int index
,
329 const struct hermonprm_mpt
*mpt
) {
330 return hermon_cmd ( hermon
,
331 HERMON_HCR_IN_CMD ( HERMON_HCR_SW2HW_MPT
,
332 1, sizeof ( *mpt
) ),
333 0, mpt
, index
, NULL
);
337 hermon_cmd_write_mtt ( struct hermon
*hermon
,
338 const struct hermonprm_write_mtt
*write_mtt
) {
339 return hermon_cmd ( hermon
,
340 HERMON_HCR_IN_CMD ( HERMON_HCR_WRITE_MTT
,
341 1, sizeof ( *write_mtt
) ),
342 0, write_mtt
, 1, NULL
);
346 hermon_cmd_map_eq ( struct hermon
*hermon
, unsigned long index_map
,
347 const struct hermonprm_event_mask
*mask
) {
348 return hermon_cmd ( hermon
,
349 HERMON_HCR_IN_CMD ( HERMON_HCR_MAP_EQ
,
350 0, sizeof ( *mask
) ),
351 0, mask
, index_map
, NULL
);
355 hermon_cmd_sw2hw_eq ( struct hermon
*hermon
, unsigned int index
,
356 const struct hermonprm_eqc
*eqctx
) {
357 return hermon_cmd ( hermon
,
358 HERMON_HCR_IN_CMD ( HERMON_HCR_SW2HW_EQ
,
359 1, sizeof ( *eqctx
) ),
360 0, eqctx
, index
, NULL
);
364 hermon_cmd_hw2sw_eq ( struct hermon
*hermon
, unsigned int index
,
365 struct hermonprm_eqc
*eqctx
) {
366 return hermon_cmd ( hermon
,
367 HERMON_HCR_OUT_CMD ( HERMON_HCR_HW2SW_EQ
,
368 1, sizeof ( *eqctx
) ),
369 1, NULL
, index
, eqctx
);
373 hermon_cmd_query_eq ( struct hermon
*hermon
, unsigned int index
,
374 struct hermonprm_eqc
*eqctx
) {
375 return hermon_cmd ( hermon
,
376 HERMON_HCR_OUT_CMD ( HERMON_HCR_QUERY_EQ
,
377 1, sizeof ( *eqctx
) ),
378 0, NULL
, index
, eqctx
);
382 hermon_cmd_sw2hw_cq ( struct hermon
*hermon
, unsigned long cqn
,
383 const struct hermonprm_completion_queue_context
*cqctx
){
384 return hermon_cmd ( hermon
,
385 HERMON_HCR_IN_CMD ( HERMON_HCR_SW2HW_CQ
,
386 1, sizeof ( *cqctx
) ),
387 0, cqctx
, cqn
, NULL
);
391 hermon_cmd_hw2sw_cq ( struct hermon
*hermon
, unsigned long cqn
,
392 struct hermonprm_completion_queue_context
*cqctx
) {
393 return hermon_cmd ( hermon
,
394 HERMON_HCR_OUT_CMD ( HERMON_HCR_HW2SW_CQ
,
395 1, sizeof ( *cqctx
) ),
396 0, NULL
, cqn
, cqctx
);
400 hermon_cmd_query_cq ( struct hermon
*hermon
, unsigned long cqn
,
401 struct hermonprm_completion_queue_context
*cqctx
) {
402 return hermon_cmd ( hermon
,
403 HERMON_HCR_OUT_CMD ( HERMON_HCR_QUERY_CQ
,
404 1, sizeof ( *cqctx
) ),
405 0, NULL
, cqn
, cqctx
);
409 hermon_cmd_rst2init_qp ( struct hermon
*hermon
, unsigned long qpn
,
410 const struct hermonprm_qp_ee_state_transitions
*ctx
){
411 return hermon_cmd ( hermon
,
412 HERMON_HCR_IN_CMD ( HERMON_HCR_RST2INIT_QP
,
413 1, sizeof ( *ctx
) ),
418 hermon_cmd_init2rtr_qp ( struct hermon
*hermon
, unsigned long qpn
,
419 const struct hermonprm_qp_ee_state_transitions
*ctx
){
420 return hermon_cmd ( hermon
,
421 HERMON_HCR_IN_CMD ( HERMON_HCR_INIT2RTR_QP
,
422 1, sizeof ( *ctx
) ),
427 hermon_cmd_rtr2rts_qp ( struct hermon
*hermon
, unsigned long qpn
,
428 const struct hermonprm_qp_ee_state_transitions
*ctx
) {
429 return hermon_cmd ( hermon
,
430 HERMON_HCR_IN_CMD ( HERMON_HCR_RTR2RTS_QP
,
431 1, sizeof ( *ctx
) ),
436 hermon_cmd_rts2rts_qp ( struct hermon
*hermon
, unsigned long qpn
,
437 const struct hermonprm_qp_ee_state_transitions
*ctx
) {
438 return hermon_cmd ( hermon
,
439 HERMON_HCR_IN_CMD ( HERMON_HCR_RTS2RTS_QP
,
440 1, sizeof ( *ctx
) ),
445 hermon_cmd_2rst_qp ( struct hermon
*hermon
, unsigned long qpn
) {
446 return hermon_cmd ( hermon
,
447 HERMON_HCR_VOID_CMD ( HERMON_HCR_2RST_QP
),
448 0x03, NULL
, qpn
, NULL
);
452 hermon_cmd_query_qp ( struct hermon
*hermon
, unsigned long qpn
,
453 struct hermonprm_qp_ee_state_transitions
*ctx
) {
454 return hermon_cmd ( hermon
,
455 HERMON_HCR_OUT_CMD ( HERMON_HCR_QUERY_QP
,
456 1, sizeof ( *ctx
) ),
461 hermon_cmd_conf_special_qp ( struct hermon
*hermon
, unsigned int internal_qps
,
462 unsigned long base_qpn
) {
463 return hermon_cmd ( hermon
,
464 HERMON_HCR_VOID_CMD ( HERMON_HCR_CONF_SPECIAL_QP
),
465 internal_qps
, NULL
, base_qpn
, NULL
);
469 hermon_cmd_mad_ifc ( struct hermon
*hermon
, unsigned int port
,
470 union hermonprm_mad
*mad
) {
471 return hermon_cmd ( hermon
,
472 HERMON_HCR_INOUT_CMD ( HERMON_HCR_MAD_IFC
,
474 1, sizeof ( *mad
) ),
475 0x03, mad
, port
, mad
);
479 hermon_cmd_read_mcg ( struct hermon
*hermon
, unsigned int index
,
480 struct hermonprm_mcg_entry
*mcg
) {
481 return hermon_cmd ( hermon
,
482 HERMON_HCR_OUT_CMD ( HERMON_HCR_READ_MCG
,
483 1, sizeof ( *mcg
) ),
484 0, NULL
, index
, mcg
);
488 hermon_cmd_write_mcg ( struct hermon
*hermon
, unsigned int index
,
489 const struct hermonprm_mcg_entry
*mcg
) {
490 return hermon_cmd ( hermon
,
491 HERMON_HCR_IN_CMD ( HERMON_HCR_WRITE_MCG
,
492 1, sizeof ( *mcg
) ),
493 0, mcg
, index
, NULL
);
497 hermon_cmd_mgid_hash ( struct hermon
*hermon
, const union ib_gid
*gid
,
498 struct hermonprm_mgm_hash
*hash
) {
499 return hermon_cmd ( hermon
,
500 HERMON_HCR_INOUT_CMD ( HERMON_HCR_MGID_HASH
,
502 0, sizeof ( *hash
) ),
507 hermon_cmd_mod_stat_cfg ( struct hermon
*hermon
, unsigned int mode
,
508 unsigned int input_mod
,
509 struct hermonprm_scalar_parameter
*portion
) {
510 return hermon_cmd ( hermon
,
511 HERMON_HCR_INOUT_CMD ( HERMON_HCR_MOD_STAT_CFG
,
512 0, sizeof ( *portion
),
513 0, sizeof ( *portion
) ),
514 mode
, portion
, input_mod
, portion
);
518 hermon_cmd_query_port ( struct hermon
*hermon
, unsigned int port
,
519 struct hermonprm_query_port_cap
*query_port
) {
520 return hermon_cmd ( hermon
,
521 HERMON_HCR_OUT_CMD ( HERMON_HCR_QUERY_PORT
,
522 1, sizeof ( *query_port
) ),
523 0, NULL
, port
, query_port
);
527 hermon_cmd_sense_port ( struct hermon
*hermon
, unsigned int port
,
528 struct hermonprm_sense_port
*port_type
) {
529 return hermon_cmd ( hermon
,
530 HERMON_HCR_OUT_CMD ( HERMON_HCR_SENSE_PORT
,
531 0, sizeof ( *port_type
) ),
532 0, NULL
, port
, port_type
);
536 hermon_cmd_run_fw ( struct hermon
*hermon
) {
537 return hermon_cmd ( hermon
,
538 HERMON_HCR_VOID_CMD ( HERMON_HCR_RUN_FW
),
543 hermon_cmd_unmap_icm ( struct hermon
*hermon
, unsigned int page_count
,
544 const struct hermonprm_scalar_parameter
*offset
) {
545 return hermon_cmd ( hermon
,
546 HERMON_HCR_IN_CMD ( HERMON_HCR_UNMAP_ICM
,
547 0, sizeof ( *offset
) ),
548 0, offset
, page_count
, NULL
);
552 hermon_cmd_map_icm ( struct hermon
*hermon
,
553 const struct hermonprm_virtual_physical_mapping
*map
) {
554 return hermon_cmd ( hermon
,
555 HERMON_HCR_IN_CMD ( HERMON_HCR_MAP_ICM
,
556 1, sizeof ( *map
) ),
561 hermon_cmd_unmap_icm_aux ( struct hermon
*hermon
) {
562 return hermon_cmd ( hermon
,
563 HERMON_HCR_VOID_CMD ( HERMON_HCR_UNMAP_ICM_AUX
),
568 hermon_cmd_map_icm_aux ( struct hermon
*hermon
,
569 const struct hermonprm_virtual_physical_mapping
*map
) {
570 return hermon_cmd ( hermon
,
571 HERMON_HCR_IN_CMD ( HERMON_HCR_MAP_ICM_AUX
,
572 1, sizeof ( *map
) ),
577 hermon_cmd_set_icm_size ( struct hermon
*hermon
,
578 const struct hermonprm_scalar_parameter
*icm_size
,
579 struct hermonprm_scalar_parameter
*icm_aux_size
) {
580 return hermon_cmd ( hermon
,
581 HERMON_HCR_INOUT_CMD ( HERMON_HCR_SET_ICM_SIZE
,
582 0, sizeof ( *icm_size
),
583 0, sizeof (*icm_aux_size
) ),
584 0, icm_size
, 0, icm_aux_size
);
588 hermon_cmd_unmap_fa ( struct hermon
*hermon
) {
589 return hermon_cmd ( hermon
,
590 HERMON_HCR_VOID_CMD ( HERMON_HCR_UNMAP_FA
),
595 hermon_cmd_map_fa ( struct hermon
*hermon
,
596 const struct hermonprm_virtual_physical_mapping
*map
) {
597 return hermon_cmd ( hermon
,
598 HERMON_HCR_IN_CMD ( HERMON_HCR_MAP_FA
,
599 1, sizeof ( *map
) ),
603 /***************************************************************************
605 * Memory translation table operations
607 ***************************************************************************
611 * Allocate MTT entries
613 * @v hermon Hermon device
614 * @v memory Memory to map into MTT
615 * @v len Length of memory to map
616 * @v mtt MTT descriptor to fill in
617 * @ret rc Return status code
619 static int hermon_alloc_mtt ( struct hermon
*hermon
,
620 const void *memory
, size_t len
,
621 struct hermon_mtt
*mtt
) {
622 struct hermonprm_write_mtt write_mtt
;
625 unsigned int page_offset
;
626 unsigned int num_pages
;
628 unsigned int mtt_base_addr
;
632 /* Find available MTT entries */
633 start
= virt_to_phys ( memory
);
634 page_offset
= ( start
& ( HERMON_PAGE_SIZE
- 1 ) );
635 start
-= page_offset
;
637 num_pages
= ( ( len
+ HERMON_PAGE_SIZE
- 1 ) / HERMON_PAGE_SIZE
);
638 mtt_offset
= hermon_bitmask_alloc ( hermon
->mtt_inuse
, HERMON_MAX_MTTS
,
640 if ( mtt_offset
< 0 ) {
641 DBGC ( hermon
, "Hermon %p could not allocate %d MTT entries\n",
646 mtt_base_addr
= ( ( hermon
->cap
.reserved_mtts
+ mtt_offset
) *
647 hermon
->cap
.mtt_entry_size
);
650 /* Fill in MTT structure */
651 mtt
->mtt_offset
= mtt_offset
;
652 mtt
->num_pages
= num_pages
;
653 mtt
->mtt_base_addr
= mtt_base_addr
;
654 mtt
->page_offset
= page_offset
;
656 /* Construct and issue WRITE_MTT commands */
657 for ( i
= 0 ; i
< num_pages
; i
++ ) {
658 memset ( &write_mtt
, 0, sizeof ( write_mtt
) );
659 MLX_FILL_1 ( &write_mtt
.mtt_base_addr
, 1,
660 value
, mtt_base_addr
);
661 MLX_FILL_H ( &write_mtt
.mtt
, 0, ptag_h
, addr
);
662 MLX_FILL_2 ( &write_mtt
.mtt
, 1,
664 ptag_l
, ( addr
>> 3 ) );
665 if ( ( rc
= hermon_cmd_write_mtt ( hermon
,
666 &write_mtt
) ) != 0 ) {
667 DBGC ( hermon
, "Hermon %p could not write MTT at %x\n",
668 hermon
, mtt_base_addr
);
671 addr
+= HERMON_PAGE_SIZE
;
672 mtt_base_addr
+= hermon
->cap
.mtt_entry_size
;
675 DBGC ( hermon
, "Hermon %p MTT entries [%#x,%#x] for "
676 "[%08lx,%08lx,%08lx,%08lx)\n", hermon
, mtt
->mtt_offset
,
677 ( mtt
->mtt_offset
+ mtt
->num_pages
- 1 ), start
,
678 ( start
+ page_offset
), ( start
+ len
), addr
);
683 hermon_bitmask_free ( hermon
->mtt_inuse
, mtt_offset
, num_pages
);
691 * @v hermon Hermon device
692 * @v mtt MTT descriptor
694 static void hermon_free_mtt ( struct hermon
*hermon
,
695 struct hermon_mtt
*mtt
) {
697 DBGC ( hermon
, "Hermon %p MTT entries [%#x,%#x] freed\n",
698 hermon
, mtt
->mtt_offset
,
699 ( mtt
->mtt_offset
+ mtt
->num_pages
- 1 ) );
700 hermon_bitmask_free ( hermon
->mtt_inuse
, mtt
->mtt_offset
,
704 /***************************************************************************
706 * Static configuration operations
708 ***************************************************************************
712 * Calculate offset within static configuration
717 #define HERMON_MOD_STAT_CFG_OFFSET( field ) \
718 ( ( MLX_BIT_OFFSET ( struct hermonprm_mod_stat_cfg_st, field ) / 8 ) \
719 & ~( sizeof ( struct hermonprm_scalar_parameter ) - 1 ) )
722 * Query or modify static configuration
724 * @v hermon Hermon device
726 * @v mode Command mode
727 * @v offset Offset within static configuration
728 * @v stat_cfg Static configuration
729 * @ret rc Return status code
731 static int hermon_mod_stat_cfg ( struct hermon
*hermon
, unsigned int port
,
732 unsigned int mode
, unsigned int offset
,
733 struct hermonprm_mod_stat_cfg
*stat_cfg
) {
734 struct hermonprm_scalar_parameter
*portion
=
735 ( ( void * ) &stat_cfg
->u
.bytes
[offset
] );
736 struct hermonprm_mod_stat_cfg_input_mod mod
;
740 assert ( ( offset
% sizeof ( *portion
) ) == 0 );
742 /* Construct input modifier */
743 memset ( &mod
, 0, sizeof ( mod
) );
744 MLX_FILL_2 ( &mod
, 0,
749 if ( ( rc
= hermon_cmd_mod_stat_cfg ( hermon
, mode
,
750 be32_to_cpu ( mod
.u
.dwords
[0] ),
757 /***************************************************************************
761 ***************************************************************************
765 * Issue management datagram
767 * @v ibdev Infiniband device
768 * @v mad Management datagram
769 * @ret rc Return status code
771 static int hermon_mad ( struct ib_device
*ibdev
, union ib_mad
*mad
) {
772 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
773 union hermonprm_mad mad_ifc
;
776 linker_assert ( sizeof ( *mad
) == sizeof ( mad_ifc
.mad
),
779 /* Copy in request packet */
780 memcpy ( &mad_ifc
.mad
, mad
, sizeof ( mad_ifc
.mad
) );
783 if ( ( rc
= hermon_cmd_mad_ifc ( hermon
, ibdev
->port
,
784 &mad_ifc
) ) != 0 ) {
785 DBGC ( hermon
, "Hermon %p port %d could not issue MAD IFC: "
786 "%s\n", hermon
, ibdev
->port
, strerror ( rc
) );
790 /* Copy out reply packet */
791 memcpy ( mad
, &mad_ifc
.mad
, sizeof ( *mad
) );
793 if ( mad
->hdr
.status
!= 0 ) {
794 DBGC ( hermon
, "Hermon %p port %d MAD IFC status %04x\n",
795 hermon
, ibdev
->port
, ntohs ( mad
->hdr
.status
) );
801 /***************************************************************************
803 * Completion queue operations
805 ***************************************************************************
809 * Dump completion queue context (for debugging only)
811 * @v hermon Hermon device
812 * @v cq Completion queue
813 * @ret rc Return status code
815 static __attribute__ (( unused
)) int
816 hermon_dump_cqctx ( struct hermon
*hermon
, struct ib_completion_queue
*cq
) {
817 struct hermonprm_completion_queue_context cqctx
;
820 memset ( &cqctx
, 0, sizeof ( cqctx
) );
821 if ( ( rc
= hermon_cmd_query_cq ( hermon
, cq
->cqn
, &cqctx
) ) != 0 ) {
822 DBGC ( hermon
, "Hermon %p CQN %#lx QUERY_CQ failed: %s\n",
823 hermon
, cq
->cqn
, strerror ( rc
) );
826 DBGC ( hermon
, "Hermon %p CQN %#lx context:\n", hermon
, cq
->cqn
);
827 DBGC_HDA ( hermon
, 0, &cqctx
, sizeof ( cqctx
) );
833 * Create completion queue
835 * @v ibdev Infiniband device
836 * @v cq Completion queue
837 * @ret rc Return status code
839 static int hermon_create_cq ( struct ib_device
*ibdev
,
840 struct ib_completion_queue
*cq
) {
841 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
842 struct hermon_completion_queue
*hermon_cq
;
843 struct hermonprm_completion_queue_context cqctx
;
848 /* Find a free completion queue number */
849 cqn_offset
= hermon_bitmask_alloc ( hermon
->cq_inuse
,
851 if ( cqn_offset
< 0 ) {
852 DBGC ( hermon
, "Hermon %p out of completion queues\n",
857 cq
->cqn
= ( hermon
->cap
.reserved_cqs
+ cqn_offset
);
859 /* Allocate control structures */
860 hermon_cq
= zalloc ( sizeof ( *hermon_cq
) );
866 /* Allocate doorbell */
867 hermon_cq
->doorbell
= malloc_dma ( sizeof ( hermon_cq
->doorbell
[0] ),
868 sizeof ( hermon_cq
->doorbell
[0] ) );
869 if ( ! hermon_cq
->doorbell
) {
873 memset ( hermon_cq
->doorbell
, 0, sizeof ( hermon_cq
->doorbell
[0] ) );
875 /* Allocate completion queue itself */
876 hermon_cq
->cqe_size
= ( cq
->num_cqes
* sizeof ( hermon_cq
->cqe
[0] ) );
877 hermon_cq
->cqe
= malloc_dma ( hermon_cq
->cqe_size
,
878 sizeof ( hermon_cq
->cqe
[0] ) );
879 if ( ! hermon_cq
->cqe
) {
883 memset ( hermon_cq
->cqe
, 0, hermon_cq
->cqe_size
);
884 for ( i
= 0 ; i
< cq
->num_cqes
; i
++ ) {
885 MLX_FILL_1 ( &hermon_cq
->cqe
[i
].normal
, 7, owner
, 1 );
889 /* Allocate MTT entries */
890 if ( ( rc
= hermon_alloc_mtt ( hermon
, hermon_cq
->cqe
,
892 &hermon_cq
->mtt
) ) != 0 )
895 /* Hand queue over to hardware */
896 memset ( &cqctx
, 0, sizeof ( cqctx
) );
897 MLX_FILL_1 ( &cqctx
, 0, st
, 0xa /* "Event fired" */ );
898 MLX_FILL_1 ( &cqctx
, 2,
899 page_offset
, ( hermon_cq
->mtt
.page_offset
>> 5 ) );
900 MLX_FILL_2 ( &cqctx
, 3,
901 usr_page
, HERMON_UAR_NON_EQ_PAGE
,
902 log_cq_size
, fls ( cq
->num_cqes
- 1 ) );
903 MLX_FILL_1 ( &cqctx
, 5, c_eqn
, hermon
->eq
.eqn
);
904 MLX_FILL_H ( &cqctx
, 6, mtt_base_addr_h
,
905 hermon_cq
->mtt
.mtt_base_addr
);
906 MLX_FILL_1 ( &cqctx
, 7, mtt_base_addr_l
,
907 ( hermon_cq
->mtt
.mtt_base_addr
>> 3 ) );
908 MLX_FILL_H ( &cqctx
, 14, db_record_addr_h
,
909 virt_to_phys ( hermon_cq
->doorbell
) );
910 MLX_FILL_1 ( &cqctx
, 15, db_record_addr_l
,
911 ( virt_to_phys ( hermon_cq
->doorbell
) >> 3 ) );
912 if ( ( rc
= hermon_cmd_sw2hw_cq ( hermon
, cq
->cqn
, &cqctx
) ) != 0 ) {
913 DBGC ( hermon
, "Hermon %p CQN %#lx SW2HW_CQ failed: %s\n",
914 hermon
, cq
->cqn
, strerror ( rc
) );
918 DBGC ( hermon
, "Hermon %p CQN %#lx ring [%08lx,%08lx), doorbell "
919 "%08lx\n", hermon
, cq
->cqn
, virt_to_phys ( hermon_cq
->cqe
),
920 ( virt_to_phys ( hermon_cq
->cqe
) + hermon_cq
->cqe_size
),
921 virt_to_phys ( hermon_cq
->doorbell
) );
922 ib_cq_set_drvdata ( cq
, hermon_cq
);
926 hermon_free_mtt ( hermon
, &hermon_cq
->mtt
);
928 free_dma ( hermon_cq
->cqe
, hermon_cq
->cqe_size
);
930 free_dma ( hermon_cq
->doorbell
, sizeof ( hermon_cq
->doorbell
[0] ) );
934 hermon_bitmask_free ( hermon
->cq_inuse
, cqn_offset
, 1 );
940 * Destroy completion queue
942 * @v ibdev Infiniband device
943 * @v cq Completion queue
945 static void hermon_destroy_cq ( struct ib_device
*ibdev
,
946 struct ib_completion_queue
*cq
) {
947 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
948 struct hermon_completion_queue
*hermon_cq
= ib_cq_get_drvdata ( cq
);
949 struct hermonprm_completion_queue_context cqctx
;
953 /* Take ownership back from hardware */
954 if ( ( rc
= hermon_cmd_hw2sw_cq ( hermon
, cq
->cqn
, &cqctx
) ) != 0 ) {
955 DBGC ( hermon
, "Hermon %p CQN %#lx FATAL HW2SW_CQ failed: "
956 "%s\n", hermon
, cq
->cqn
, strerror ( rc
) );
957 /* Leak memory and return; at least we avoid corruption */
961 /* Free MTT entries */
962 hermon_free_mtt ( hermon
, &hermon_cq
->mtt
);
965 free_dma ( hermon_cq
->cqe
, hermon_cq
->cqe_size
);
966 free_dma ( hermon_cq
->doorbell
, sizeof ( hermon_cq
->doorbell
[0] ) );
969 /* Mark queue number as free */
970 cqn_offset
= ( cq
->cqn
- hermon
->cap
.reserved_cqs
);
971 hermon_bitmask_free ( hermon
->cq_inuse
, cqn_offset
, 1 );
973 ib_cq_set_drvdata ( cq
, NULL
);
976 /***************************************************************************
978 * Queue pair operations
980 ***************************************************************************
984 * Assign queue pair number
986 * @v ibdev Infiniband device
988 * @ret rc Return status code
990 static int hermon_alloc_qpn ( struct ib_device
*ibdev
,
991 struct ib_queue_pair
*qp
) {
992 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
993 unsigned int port_offset
;
996 /* Calculate queue pair number */
997 port_offset
= ( ibdev
->port
- HERMON_PORT_BASE
);
999 switch ( qp
->type
) {
1001 qp
->qpn
= ( hermon
->special_qpn_base
+ port_offset
);
1004 qp
->qpn
= ( hermon
->special_qpn_base
+ 2 + port_offset
);
1009 /* Find a free queue pair number */
1010 qpn_offset
= hermon_bitmask_alloc ( hermon
->qp_inuse
,
1011 HERMON_MAX_QPS
, 1 );
1012 if ( qpn_offset
< 0 ) {
1013 DBGC ( hermon
, "Hermon %p out of queue pairs\n",
1017 qp
->qpn
= ( ( random() & HERMON_QPN_RANDOM_MASK
) |
1018 ( hermon
->qpn_base
+ qpn_offset
) );
1021 DBGC ( hermon
, "Hermon %p unsupported QP type %d\n",
1028 * Free queue pair number
1030 * @v ibdev Infiniband device
1033 static void hermon_free_qpn ( struct ib_device
*ibdev
,
1034 struct ib_queue_pair
*qp
) {
1035 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
1038 qpn_offset
= ( ( qp
->qpn
& ~HERMON_QPN_RANDOM_MASK
)
1039 - hermon
->qpn_base
);
1040 if ( qpn_offset
>= 0 )
1041 hermon_bitmask_free ( hermon
->qp_inuse
, qpn_offset
, 1 );
1045 * Calculate transmission rate
1047 * @v av Address vector
1048 * @ret hermon_rate Hermon rate
1050 static unsigned int hermon_rate ( struct ib_address_vector
*av
) {
1051 return ( ( ( av
->rate
>= IB_RATE_2_5
) && ( av
->rate
<= IB_RATE_120
) )
1052 ?
( av
->rate
+ 5 ) : 0 );
1056 * Calculate schedule queue
1058 * @v ibdev Infiniband device
1060 * @ret sched_queue Schedule queue
1062 static unsigned int hermon_sched_queue ( struct ib_device
*ibdev
,
1063 struct ib_queue_pair
*qp
) {
1064 return ( ( ( qp
->type
== IB_QPT_SMI
) ?
1065 HERMON_SCHED_QP0
: HERMON_SCHED_DEFAULT
) |
1066 ( ( ibdev
->port
- 1 ) << 6 ) );
1069 /** Queue pair transport service type map */
1070 static uint8_t hermon_qp_st
[] = {
1071 [IB_QPT_SMI
] = HERMON_ST_MLX
,
1072 [IB_QPT_GSI
] = HERMON_ST_MLX
,
1073 [IB_QPT_UD
] = HERMON_ST_UD
,
1074 [IB_QPT_RC
] = HERMON_ST_RC
,
1075 [IB_QPT_ETH
] = HERMON_ST_MLX
,
1079 * Dump queue pair context (for debugging only)
1081 * @v hermon Hermon device
1083 * @ret rc Return status code
1085 static __attribute__ (( unused
)) int
1086 hermon_dump_qpctx ( struct hermon
*hermon
, struct ib_queue_pair
*qp
) {
1087 struct hermonprm_qp_ee_state_transitions qpctx
;
1090 memset ( &qpctx
, 0, sizeof ( qpctx
) );
1091 if ( ( rc
= hermon_cmd_query_qp ( hermon
, qp
->qpn
, &qpctx
) ) != 0 ) {
1092 DBGC ( hermon
, "Hermon %p QPN %#lx QUERY_QP failed: %s\n",
1093 hermon
, qp
->qpn
, strerror ( rc
) );
1096 DBGC ( hermon
, "Hermon %p QPN %#lx context:\n", hermon
, qp
->qpn
);
1097 DBGC_HDA ( hermon
, 0, &qpctx
.u
.dwords
[2], ( sizeof ( qpctx
) - 8 ) );
1105 * @v ibdev Infiniband device
1107 * @ret rc Return status code
1109 static int hermon_create_qp ( struct ib_device
*ibdev
,
1110 struct ib_queue_pair
*qp
) {
1111 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
1112 struct hermon_queue_pair
*hermon_qp
;
1113 struct hermonprm_qp_ee_state_transitions qpctx
;
1116 /* Calculate queue pair number */
1117 if ( ( rc
= hermon_alloc_qpn ( ibdev
, qp
) ) != 0 )
1120 /* Allocate control structures */
1121 hermon_qp
= zalloc ( sizeof ( *hermon_qp
) );
1122 if ( ! hermon_qp
) {
1127 /* Allocate doorbells */
1128 hermon_qp
->recv
.doorbell
=
1129 malloc_dma ( sizeof ( hermon_qp
->recv
.doorbell
[0] ),
1130 sizeof ( hermon_qp
->recv
.doorbell
[0] ) );
1131 if ( ! hermon_qp
->recv
.doorbell
) {
1133 goto err_recv_doorbell
;
1135 memset ( hermon_qp
->recv
.doorbell
, 0,
1136 sizeof ( hermon_qp
->recv
.doorbell
[0] ) );
1137 hermon_qp
->send
.doorbell
=
1138 ( hermon
->uar
+ HERMON_UAR_NON_EQ_PAGE
* HERMON_PAGE_SIZE
+
1139 HERMON_DB_POST_SND_OFFSET
);
1141 /* Allocate work queue buffer */
1142 hermon_qp
->send
.num_wqes
= ( qp
->send
.num_wqes
/* headroom */ + 1 +
1143 ( 2048 / sizeof ( hermon_qp
->send
.wqe
[0] ) ) );
1144 hermon_qp
->send
.num_wqes
=
1145 ( 1 << fls ( hermon_qp
->send
.num_wqes
- 1 ) ); /* round up */
1146 hermon_qp
->send
.wqe_size
= ( hermon_qp
->send
.num_wqes
*
1147 sizeof ( hermon_qp
->send
.wqe
[0] ) );
1148 hermon_qp
->recv
.wqe_size
= ( qp
->recv
.num_wqes
*
1149 sizeof ( hermon_qp
->recv
.wqe
[0] ) );
1150 hermon_qp
->wqe_size
= ( hermon_qp
->send
.wqe_size
+
1151 hermon_qp
->recv
.wqe_size
);
1152 hermon_qp
->wqe
= malloc_dma ( hermon_qp
->wqe_size
,
1153 sizeof ( hermon_qp
->send
.wqe
[0] ) );
1154 if ( ! hermon_qp
->wqe
) {
1158 hermon_qp
->send
.wqe
= hermon_qp
->wqe
;
1159 memset ( hermon_qp
->send
.wqe
, 0xff, hermon_qp
->send
.wqe_size
);
1160 hermon_qp
->recv
.wqe
= ( hermon_qp
->wqe
+ hermon_qp
->send
.wqe_size
);
1161 memset ( hermon_qp
->recv
.wqe
, 0, hermon_qp
->recv
.wqe_size
);
1163 /* Allocate MTT entries */
1164 if ( ( rc
= hermon_alloc_mtt ( hermon
, hermon_qp
->wqe
,
1165 hermon_qp
->wqe_size
,
1166 &hermon_qp
->mtt
) ) != 0 ) {
1170 /* Transition queue to INIT state */
1171 memset ( &qpctx
, 0, sizeof ( qpctx
) );
1172 MLX_FILL_2 ( &qpctx
, 2,
1173 qpc_eec_data
.pm_state
, HERMON_PM_STATE_MIGRATED
,
1174 qpc_eec_data
.st
, hermon_qp_st
[qp
->type
] );
1175 MLX_FILL_1 ( &qpctx
, 3, qpc_eec_data
.pd
, HERMON_GLOBAL_PD
);
1176 MLX_FILL_4 ( &qpctx
, 4,
1177 qpc_eec_data
.log_rq_size
, fls ( qp
->recv
.num_wqes
- 1 ),
1178 qpc_eec_data
.log_rq_stride
,
1179 ( fls ( sizeof ( hermon_qp
->recv
.wqe
[0] ) - 1 ) - 4 ),
1180 qpc_eec_data
.log_sq_size
,
1181 fls ( hermon_qp
->send
.num_wqes
- 1 ),
1182 qpc_eec_data
.log_sq_stride
,
1183 ( fls ( sizeof ( hermon_qp
->send
.wqe
[0] ) - 1 ) - 4 ) );
1184 MLX_FILL_1 ( &qpctx
, 5,
1185 qpc_eec_data
.usr_page
, HERMON_UAR_NON_EQ_PAGE
);
1186 MLX_FILL_1 ( &qpctx
, 33, qpc_eec_data
.cqn_snd
, qp
->send
.cq
->cqn
);
1187 MLX_FILL_4 ( &qpctx
, 38,
1188 qpc_eec_data
.rre
, 1,
1189 qpc_eec_data
.rwe
, 1,
1190 qpc_eec_data
.rae
, 1,
1191 qpc_eec_data
.page_offset
,
1192 ( hermon_qp
->mtt
.page_offset
>> 6 ) );
1193 MLX_FILL_1 ( &qpctx
, 41, qpc_eec_data
.cqn_rcv
, qp
->recv
.cq
->cqn
);
1194 MLX_FILL_H ( &qpctx
, 42, qpc_eec_data
.db_record_addr_h
,
1195 virt_to_phys ( hermon_qp
->recv
.doorbell
) );
1196 MLX_FILL_1 ( &qpctx
, 43, qpc_eec_data
.db_record_addr_l
,
1197 ( virt_to_phys ( hermon_qp
->recv
.doorbell
) >> 2 ) );
1198 MLX_FILL_H ( &qpctx
, 52, qpc_eec_data
.mtt_base_addr_h
,
1199 hermon_qp
->mtt
.mtt_base_addr
);
1200 MLX_FILL_1 ( &qpctx
, 53, qpc_eec_data
.mtt_base_addr_l
,
1201 ( hermon_qp
->mtt
.mtt_base_addr
>> 3 ) );
1202 if ( ( rc
= hermon_cmd_rst2init_qp ( hermon
, qp
->qpn
,
1204 DBGC ( hermon
, "Hermon %p QPN %#lx RST2INIT_QP failed: %s\n",
1205 hermon
, qp
->qpn
, strerror ( rc
) );
1206 goto err_rst2init_qp
;
1208 hermon_qp
->state
= HERMON_QP_ST_INIT
;
1210 DBGC ( hermon
, "Hermon %p QPN %#lx send ring [%08lx,%08lx), doorbell "
1211 "%08lx\n", hermon
, qp
->qpn
,
1212 virt_to_phys ( hermon_qp
->send
.wqe
),
1213 ( virt_to_phys ( hermon_qp
->send
.wqe
) +
1214 hermon_qp
->send
.wqe_size
),
1215 virt_to_phys ( hermon_qp
->send
.doorbell
) );
1216 DBGC ( hermon
, "Hermon %p QPN %#lx receive ring [%08lx,%08lx), "
1217 "doorbell %08lx\n", hermon
, qp
->qpn
,
1218 virt_to_phys ( hermon_qp
->recv
.wqe
),
1219 ( virt_to_phys ( hermon_qp
->recv
.wqe
) +
1220 hermon_qp
->recv
.wqe_size
),
1221 virt_to_phys ( hermon_qp
->recv
.doorbell
) );
1222 DBGC ( hermon
, "Hermon %p QPN %#lx send CQN %#lx receive CQN %#lx\n",
1223 hermon
, qp
->qpn
, qp
->send
.cq
->cqn
, qp
->recv
.cq
->cqn
);
1224 ib_qp_set_drvdata ( qp
, hermon_qp
);
1227 hermon_cmd_2rst_qp ( hermon
, qp
->qpn
);
1229 hermon_free_mtt ( hermon
, &hermon_qp
->mtt
);
1231 free_dma ( hermon_qp
->wqe
, hermon_qp
->wqe_size
);
1233 free_dma ( hermon_qp
->recv
.doorbell
,
1234 sizeof ( hermon_qp
->recv
.doorbell
[0] ) );
1238 hermon_free_qpn ( ibdev
, qp
);
1246 * @v ibdev Infiniband device
1248 * @ret rc Return status code
1250 static int hermon_modify_qp ( struct ib_device
*ibdev
,
1251 struct ib_queue_pair
*qp
) {
1252 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
1253 struct hermon_queue_pair
*hermon_qp
= ib_qp_get_drvdata ( qp
);
1254 struct hermonprm_qp_ee_state_transitions qpctx
;
1257 /* Transition queue to RTR state, if applicable */
1258 if ( hermon_qp
->state
< HERMON_QP_ST_RTR
) {
1259 memset ( &qpctx
, 0, sizeof ( qpctx
) );
1260 MLX_FILL_2 ( &qpctx
, 4,
1262 ( ( qp
->type
== IB_QPT_ETH
) ?
1263 HERMON_MTU_ETH
: HERMON_MTU_2048
),
1264 qpc_eec_data
.msg_max
, 31 );
1265 MLX_FILL_1 ( &qpctx
, 7,
1266 qpc_eec_data
.remote_qpn_een
, qp
->av
.qpn
);
1267 MLX_FILL_1 ( &qpctx
, 9,
1268 qpc_eec_data
.primary_address_path
.rlid
,
1270 MLX_FILL_1 ( &qpctx
, 10,
1271 qpc_eec_data
.primary_address_path
.max_stat_rate
,
1272 hermon_rate ( &qp
->av
) );
1273 memcpy ( &qpctx
.u
.dwords
[12], &qp
->av
.gid
,
1274 sizeof ( qp
->av
.gid
) );
1275 MLX_FILL_1 ( &qpctx
, 16,
1276 qpc_eec_data
.primary_address_path
.sched_queue
,
1277 hermon_sched_queue ( ibdev
, qp
) );
1278 MLX_FILL_1 ( &qpctx
, 39,
1279 qpc_eec_data
.next_rcv_psn
, qp
->recv
.psn
);
1280 if ( ( rc
= hermon_cmd_init2rtr_qp ( hermon
, qp
->qpn
,
1282 DBGC ( hermon
, "Hermon %p QPN %#lx INIT2RTR_QP failed:"
1283 " %s\n", hermon
, qp
->qpn
, strerror ( rc
) );
1286 hermon_qp
->state
= HERMON_QP_ST_RTR
;
1289 /* Transition queue to RTS state */
1290 if ( hermon_qp
->state
< HERMON_QP_ST_RTS
) {
1291 memset ( &qpctx
, 0, sizeof ( qpctx
) );
1292 MLX_FILL_1 ( &qpctx
, 10,
1293 qpc_eec_data
.primary_address_path
.ack_timeout
,
1294 14 /* 4.096us * 2^(14) = 67ms */ );
1295 MLX_FILL_2 ( &qpctx
, 30,
1296 qpc_eec_data
.retry_count
, HERMON_RETRY_MAX
,
1297 qpc_eec_data
.rnr_retry
, HERMON_RETRY_MAX
);
1298 MLX_FILL_1 ( &qpctx
, 32,
1299 qpc_eec_data
.next_send_psn
, qp
->send
.psn
);
1300 if ( ( rc
= hermon_cmd_rtr2rts_qp ( hermon
, qp
->qpn
,
1302 DBGC ( hermon
, "Hermon %p QPN %#lx RTR2RTS_QP failed: "
1303 "%s\n", hermon
, qp
->qpn
, strerror ( rc
) );
1306 hermon_qp
->state
= HERMON_QP_ST_RTS
;
1309 /* Update parameters in RTS state */
1310 memset ( &qpctx
, 0, sizeof ( qpctx
) );
1311 MLX_FILL_1 ( &qpctx
, 0, opt_param_mask
, HERMON_QP_OPT_PARAM_QKEY
);
1312 MLX_FILL_1 ( &qpctx
, 44, qpc_eec_data
.q_key
, qp
->qkey
);
1313 if ( ( rc
= hermon_cmd_rts2rts_qp ( hermon
, qp
->qpn
, &qpctx
) ) != 0 ){
1314 DBGC ( hermon
, "Hermon %p QPN %#lx RTS2RTS_QP failed: %s\n",
1315 hermon
, qp
->qpn
, strerror ( rc
) );
1323 * Destroy queue pair
1325 * @v ibdev Infiniband device
1328 static void hermon_destroy_qp ( struct ib_device
*ibdev
,
1329 struct ib_queue_pair
*qp
) {
1330 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
1331 struct hermon_queue_pair
*hermon_qp
= ib_qp_get_drvdata ( qp
);
1334 /* Take ownership back from hardware */
1335 if ( ( rc
= hermon_cmd_2rst_qp ( hermon
, qp
->qpn
) ) != 0 ) {
1336 DBGC ( hermon
, "Hermon %p QPN %#lx FATAL 2RST_QP failed: %s\n",
1337 hermon
, qp
->qpn
, strerror ( rc
) );
1338 /* Leak memory and return; at least we avoid corruption */
1342 /* Free MTT entries */
1343 hermon_free_mtt ( hermon
, &hermon_qp
->mtt
);
1346 free_dma ( hermon_qp
->wqe
, hermon_qp
->wqe_size
);
1347 free_dma ( hermon_qp
->recv
.doorbell
,
1348 sizeof ( hermon_qp
->recv
.doorbell
[0] ) );
1351 /* Mark queue number as free */
1352 hermon_free_qpn ( ibdev
, qp
);
1354 ib_qp_set_drvdata ( qp
, NULL
);
1357 /***************************************************************************
1359 * Work request operations
1361 ***************************************************************************
1365 * Construct UD send work queue entry
1367 * @v ibdev Infiniband device
1369 * @v dest Destination address vector
1370 * @v iobuf I/O buffer
1371 * @v wqe Send work queue entry
1372 * @ret opcode Control opcode
1374 static __attribute__ (( unused
)) unsigned int
1375 hermon_fill_nop_send_wqe ( struct ib_device
*ibdev __unused
,
1376 struct ib_queue_pair
*qp __unused
,
1377 struct ib_address_vector
*dest __unused
,
1378 struct io_buffer
*iobuf __unused
,
1379 union hermon_send_wqe
*wqe
) {
1381 MLX_FILL_1 ( &wqe
->ctrl
, 1, ds
, ( sizeof ( wqe
->ctrl
) / 16 ) );
1382 MLX_FILL_1 ( &wqe
->ctrl
, 2, c
, 0x03 /* generate completion */ );
1383 return HERMON_OPCODE_NOP
;
1387 * Construct UD send work queue entry
1389 * @v ibdev Infiniband device
1391 * @v dest Destination address vector
1392 * @v iobuf I/O buffer
1393 * @v wqe Send work queue entry
1394 * @ret opcode Control opcode
1397 hermon_fill_ud_send_wqe ( struct ib_device
*ibdev
,
1398 struct ib_queue_pair
*qp __unused
,
1399 struct ib_address_vector
*dest
,
1400 struct io_buffer
*iobuf
,
1401 union hermon_send_wqe
*wqe
) {
1402 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
1404 MLX_FILL_1 ( &wqe
->ud
.ctrl
, 1, ds
,
1405 ( ( offsetof ( typeof ( wqe
->ud
), data
[1] ) / 16 ) ) );
1406 MLX_FILL_1 ( &wqe
->ud
.ctrl
, 2, c
, 0x03 /* generate completion */ );
1407 MLX_FILL_2 ( &wqe
->ud
.ud
, 0,
1408 ud_address_vector
.pd
, HERMON_GLOBAL_PD
,
1409 ud_address_vector
.port_number
, ibdev
->port
);
1410 MLX_FILL_2 ( &wqe
->ud
.ud
, 1,
1411 ud_address_vector
.rlid
, dest
->lid
,
1412 ud_address_vector
.g
, dest
->gid_present
);
1413 MLX_FILL_1 ( &wqe
->ud
.ud
, 2,
1414 ud_address_vector
.max_stat_rate
, hermon_rate ( dest
) );
1415 MLX_FILL_1 ( &wqe
->ud
.ud
, 3, ud_address_vector
.sl
, dest
->sl
);
1416 memcpy ( &wqe
->ud
.ud
.u
.dwords
[4], &dest
->gid
, sizeof ( dest
->gid
) );
1417 MLX_FILL_1 ( &wqe
->ud
.ud
, 8, destination_qp
, dest
->qpn
);
1418 MLX_FILL_1 ( &wqe
->ud
.ud
, 9, q_key
, dest
->qkey
);
1419 MLX_FILL_1 ( &wqe
->ud
.data
[0], 0, byte_count
, iob_len ( iobuf
) );
1420 MLX_FILL_1 ( &wqe
->ud
.data
[0], 1, l_key
, hermon
->lkey
);
1421 MLX_FILL_H ( &wqe
->ud
.data
[0], 2,
1422 local_address_h
, virt_to_bus ( iobuf
->data
) );
1423 MLX_FILL_1 ( &wqe
->ud
.data
[0], 3,
1424 local_address_l
, virt_to_bus ( iobuf
->data
) );
1425 return HERMON_OPCODE_SEND
;
1429 * Construct MLX send work queue entry
1431 * @v ibdev Infiniband device
1433 * @v dest Destination address vector
1434 * @v iobuf I/O buffer
1435 * @v wqe Send work queue entry
1436 * @ret opcode Control opcode
1439 hermon_fill_mlx_send_wqe ( struct ib_device
*ibdev
,
1440 struct ib_queue_pair
*qp
,
1441 struct ib_address_vector
*dest
,
1442 struct io_buffer
*iobuf
,
1443 union hermon_send_wqe
*wqe
) {
1444 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
1445 struct io_buffer headers
;
1447 /* Construct IB headers */
1448 iob_populate ( &headers
, &wqe
->mlx
.headers
, 0,
1449 sizeof ( wqe
->mlx
.headers
) );
1450 iob_reserve ( &headers
, sizeof ( wqe
->mlx
.headers
) );
1451 ib_push ( ibdev
, &headers
, qp
, iob_len ( iobuf
), dest
);
1453 /* Fill work queue entry */
1454 MLX_FILL_1 ( &wqe
->mlx
.ctrl
, 1, ds
,
1455 ( ( offsetof ( typeof ( wqe
->mlx
), data
[2] ) / 16 ) ) );
1456 MLX_FILL_5 ( &wqe
->mlx
.ctrl
, 2,
1457 c
, 0x03 /* generate completion */,
1458 icrc
, 0 /* generate ICRC */,
1459 max_statrate
, hermon_rate ( dest
),
1461 v15
, ( ( qp
->ext_qpn
== IB_QPN_SMI
) ?
1 : 0 ) );
1462 MLX_FILL_1 ( &wqe
->mlx
.ctrl
, 3, rlid
, dest
->lid
);
1463 MLX_FILL_1 ( &wqe
->mlx
.data
[0], 0,
1464 byte_count
, iob_len ( &headers
) );
1465 MLX_FILL_1 ( &wqe
->mlx
.data
[0], 1, l_key
, hermon
->lkey
);
1466 MLX_FILL_H ( &wqe
->mlx
.data
[0], 2,
1467 local_address_h
, virt_to_bus ( headers
.data
) );
1468 MLX_FILL_1 ( &wqe
->mlx
.data
[0], 3,
1469 local_address_l
, virt_to_bus ( headers
.data
) );
1470 MLX_FILL_1 ( &wqe
->mlx
.data
[1], 0,
1471 byte_count
, ( iob_len ( iobuf
) + 4 /* ICRC */ ) );
1472 MLX_FILL_1 ( &wqe
->mlx
.data
[1], 1, l_key
, hermon
->lkey
);
1473 MLX_FILL_H ( &wqe
->mlx
.data
[1], 2,
1474 local_address_h
, virt_to_bus ( iobuf
->data
) );
1475 MLX_FILL_1 ( &wqe
->mlx
.data
[1], 3,
1476 local_address_l
, virt_to_bus ( iobuf
->data
) );
1477 return HERMON_OPCODE_SEND
;
1481 * Construct RC send work queue entry
1483 * @v ibdev Infiniband device
1485 * @v dest Destination address vector
1486 * @v iobuf I/O buffer
1487 * @v wqe Send work queue entry
1488 * @ret opcode Control opcode
1491 hermon_fill_rc_send_wqe ( struct ib_device
*ibdev
,
1492 struct ib_queue_pair
*qp __unused
,
1493 struct ib_address_vector
*dest __unused
,
1494 struct io_buffer
*iobuf
,
1495 union hermon_send_wqe
*wqe
) {
1496 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
1498 MLX_FILL_1 ( &wqe
->rc
.ctrl
, 1, ds
,
1499 ( ( offsetof ( typeof ( wqe
->rc
), data
[1] ) / 16 ) ) );
1500 MLX_FILL_1 ( &wqe
->rc
.ctrl
, 2, c
, 0x03 /* generate completion */ );
1501 MLX_FILL_1 ( &wqe
->rc
.data
[0], 0, byte_count
, iob_len ( iobuf
) );
1502 MLX_FILL_1 ( &wqe
->rc
.data
[0], 1, l_key
, hermon
->lkey
);
1503 MLX_FILL_H ( &wqe
->rc
.data
[0], 2,
1504 local_address_h
, virt_to_bus ( iobuf
->data
) );
1505 MLX_FILL_1 ( &wqe
->rc
.data
[0], 3,
1506 local_address_l
, virt_to_bus ( iobuf
->data
) );
1507 return HERMON_OPCODE_SEND
;
1511 * Construct Ethernet send work queue entry
1513 * @v ibdev Infiniband device
1515 * @v dest Destination address vector
1516 * @v iobuf I/O buffer
1517 * @v wqe Send work queue entry
1518 * @ret opcode Control opcode
1521 hermon_fill_eth_send_wqe ( struct ib_device
*ibdev
,
1522 struct ib_queue_pair
*qp __unused
,
1523 struct ib_address_vector
*dest __unused
,
1524 struct io_buffer
*iobuf
,
1525 union hermon_send_wqe
*wqe
) {
1526 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
1528 /* Fill work queue entry */
1529 MLX_FILL_1 ( &wqe
->eth
.ctrl
, 1, ds
,
1530 ( ( offsetof ( typeof ( wqe
->mlx
), data
[1] ) / 16 ) ) );
1531 MLX_FILL_2 ( &wqe
->eth
.ctrl
, 2,
1532 c
, 0x03 /* generate completion */,
1533 s
, 1 /* inhibit ICRC */ );
1534 MLX_FILL_1 ( &wqe
->eth
.data
[0], 0,
1535 byte_count
, iob_len ( iobuf
) );
1536 MLX_FILL_1 ( &wqe
->eth
.data
[0], 1, l_key
, hermon
->lkey
);
1537 MLX_FILL_H ( &wqe
->eth
.data
[0], 2,
1538 local_address_h
, virt_to_bus ( iobuf
->data
) );
1539 MLX_FILL_1 ( &wqe
->eth
.data
[0], 3,
1540 local_address_l
, virt_to_bus ( iobuf
->data
) );
1541 return HERMON_OPCODE_SEND
;
1544 /** Work queue entry constructors */
1546 ( * hermon_fill_send_wqe
[] ) ( struct ib_device
*ibdev
,
1547 struct ib_queue_pair
*qp
,
1548 struct ib_address_vector
*dest
,
1549 struct io_buffer
*iobuf
,
1550 union hermon_send_wqe
*wqe
) = {
1551 [IB_QPT_SMI
] = hermon_fill_mlx_send_wqe
,
1552 [IB_QPT_GSI
] = hermon_fill_mlx_send_wqe
,
1553 [IB_QPT_UD
] = hermon_fill_ud_send_wqe
,
1554 [IB_QPT_RC
] = hermon_fill_rc_send_wqe
,
1555 [IB_QPT_ETH
] = hermon_fill_eth_send_wqe
,
1559 * Post send work queue entry
1561 * @v ibdev Infiniband device
1563 * @v dest Destination address vector
1564 * @v iobuf I/O buffer
1565 * @ret rc Return status code
1567 static int hermon_post_send ( struct ib_device
*ibdev
,
1568 struct ib_queue_pair
*qp
,
1569 struct ib_address_vector
*dest
,
1570 struct io_buffer
*iobuf
) {
1571 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
1572 struct hermon_queue_pair
*hermon_qp
= ib_qp_get_drvdata ( qp
);
1573 struct ib_work_queue
*wq
= &qp
->send
;
1574 struct hermon_send_work_queue
*hermon_send_wq
= &hermon_qp
->send
;
1575 union hermon_send_wqe
*wqe
;
1576 union hermonprm_doorbell_register db_reg
;
1577 unsigned long wqe_idx_mask
;
1578 unsigned long wqe_idx
;
1580 unsigned int opcode
;
1582 /* Allocate work queue entry */
1583 wqe_idx
= ( wq
->next_idx
& ( hermon_send_wq
->num_wqes
- 1 ) );
1584 owner
= ( ( wq
->next_idx
& hermon_send_wq
->num_wqes
) ?
1 : 0 );
1585 wqe_idx_mask
= ( wq
->num_wqes
- 1 );
1586 if ( wq
->iobufs
[ wqe_idx
& wqe_idx_mask
] ) {
1587 DBGC ( hermon
, "Hermon %p QPN %#lx send queue full",
1591 wq
->iobufs
[ wqe_idx
& wqe_idx_mask
] = iobuf
;
1592 wqe
= &hermon_send_wq
->wqe
[wqe_idx
];
1594 /* Construct work queue entry */
1595 memset ( ( ( ( void * ) wqe
) + 4 /* avoid ctrl.owner */ ), 0,
1596 ( sizeof ( *wqe
) - 4 ) );
1597 assert ( qp
->type
< ( sizeof ( hermon_fill_send_wqe
) /
1598 sizeof ( hermon_fill_send_wqe
[0] ) ) );
1599 assert ( hermon_fill_send_wqe
[qp
->type
] != NULL
);
1600 opcode
= hermon_fill_send_wqe
[qp
->type
] ( ibdev
, qp
, dest
, iobuf
, wqe
);
1602 MLX_FILL_2 ( &wqe
->ctrl
, 0,
1605 DBGCP ( hermon
, "Hermon %p QPN %#lx posting send WQE %#lx:\n",
1606 hermon
, qp
->qpn
, wqe_idx
);
1607 DBGCP_HDA ( hermon
, virt_to_phys ( wqe
), wqe
, sizeof ( *wqe
) );
1609 /* Ring doorbell register */
1610 MLX_FILL_1 ( &db_reg
.send
, 0, qn
, qp
->qpn
);
1612 writel ( db_reg
.dword
[0], hermon_send_wq
->doorbell
);
1614 /* Update work queue's index */
1621 * Post receive work queue entry
1623 * @v ibdev Infiniband device
1625 * @v iobuf I/O buffer
1626 * @ret rc Return status code
1628 static int hermon_post_recv ( struct ib_device
*ibdev
,
1629 struct ib_queue_pair
*qp
,
1630 struct io_buffer
*iobuf
) {
1631 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
1632 struct hermon_queue_pair
*hermon_qp
= ib_qp_get_drvdata ( qp
);
1633 struct ib_work_queue
*wq
= &qp
->recv
;
1634 struct hermon_recv_work_queue
*hermon_recv_wq
= &hermon_qp
->recv
;
1635 struct hermonprm_recv_wqe
*wqe
;
1636 unsigned int wqe_idx_mask
;
1638 /* Allocate work queue entry */
1639 wqe_idx_mask
= ( wq
->num_wqes
- 1 );
1640 if ( wq
->iobufs
[wq
->next_idx
& wqe_idx_mask
] ) {
1641 DBGC ( hermon
, "Hermon %p QPN %#lx receive queue full",
1645 wq
->iobufs
[wq
->next_idx
& wqe_idx_mask
] = iobuf
;
1646 wqe
= &hermon_recv_wq
->wqe
[wq
->next_idx
& wqe_idx_mask
].recv
;
1648 /* Construct work queue entry */
1649 MLX_FILL_1 ( &wqe
->data
[0], 0, byte_count
, iob_tailroom ( iobuf
) );
1650 MLX_FILL_1 ( &wqe
->data
[0], 1, l_key
, hermon
->lkey
);
1651 MLX_FILL_H ( &wqe
->data
[0], 2,
1652 local_address_h
, virt_to_bus ( iobuf
->data
) );
1653 MLX_FILL_1 ( &wqe
->data
[0], 3,
1654 local_address_l
, virt_to_bus ( iobuf
->data
) );
1656 /* Update work queue's index */
1659 /* Update doorbell record */
1661 MLX_FILL_1 ( hermon_recv_wq
->doorbell
, 0, receive_wqe_counter
,
1662 ( wq
->next_idx
& 0xffff ) );
1670 * @v ibdev Infiniband device
1671 * @v cq Completion queue
1672 * @v cqe Hardware completion queue entry
1673 * @ret rc Return status code
1675 static int hermon_complete ( struct ib_device
*ibdev
,
1676 struct ib_completion_queue
*cq
,
1677 union hermonprm_completion_entry
*cqe
) {
1678 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
1679 struct ib_work_queue
*wq
;
1680 struct ib_queue_pair
*qp
;
1681 struct io_buffer
*iobuf
;
1682 struct ib_address_vector recv_dest
;
1683 struct ib_address_vector recv_source
;
1684 struct ib_global_route_header
*grh
;
1685 struct ib_address_vector
*source
;
1686 unsigned int opcode
;
1689 unsigned long wqe_idx
;
1690 unsigned long wqe_idx_mask
;
1694 /* Parse completion */
1695 qpn
= MLX_GET ( &cqe
->normal
, qpn
);
1696 is_send
= MLX_GET ( &cqe
->normal
, s_r
);
1697 opcode
= MLX_GET ( &cqe
->normal
, opcode
);
1698 if ( opcode
>= HERMON_OPCODE_RECV_ERROR
) {
1699 /* "s" field is not valid for error opcodes */
1700 is_send
= ( opcode
== HERMON_OPCODE_SEND_ERROR
);
1701 DBGC ( hermon
, "Hermon %p CQN %#lx syndrome %x vendor %x\n",
1702 hermon
, cq
->cqn
, MLX_GET ( &cqe
->error
, syndrome
),
1703 MLX_GET ( &cqe
->error
, vendor_error_syndrome
) );
1705 /* Don't return immediately; propagate error to completer */
1708 /* Identify work queue */
1709 wq
= ib_find_wq ( cq
, qpn
, is_send
);
1711 DBGC ( hermon
, "Hermon %p CQN %#lx unknown %s QPN %#lx\n",
1712 hermon
, cq
->cqn
, ( is_send ?
"send" : "recv" ), qpn
);
1717 /* Identify work queue entry */
1718 wqe_idx
= MLX_GET ( &cqe
->normal
, wqe_counter
);
1719 wqe_idx_mask
= ( wq
->num_wqes
- 1 );
1720 DBGCP ( hermon
, "Hermon %p CQN %#lx QPN %#lx %s WQE %#lx completed:\n",
1721 hermon
, cq
->cqn
, qp
->qpn
, ( is_send ?
"send" : "recv" ),
1723 DBGCP_HDA ( hermon
, virt_to_phys ( cqe
), cqe
, sizeof ( *cqe
) );
1725 /* Identify I/O buffer */
1726 iobuf
= wq
->iobufs
[ wqe_idx
& wqe_idx_mask
];
1728 DBGC ( hermon
, "Hermon %p CQN %#lx QPN %#lx empty %s WQE "
1729 "%#lx\n", hermon
, cq
->cqn
, qp
->qpn
,
1730 ( is_send ?
"send" : "recv" ), wqe_idx
);
1733 wq
->iobufs
[ wqe_idx
& wqe_idx_mask
] = NULL
;
1736 /* Hand off to completion handler */
1737 ib_complete_send ( ibdev
, qp
, iobuf
, rc
);
1739 /* Set received length */
1740 len
= MLX_GET ( &cqe
->normal
, byte_cnt
);
1741 assert ( len
<= iob_tailroom ( iobuf
) );
1742 iob_put ( iobuf
, len
);
1743 memset ( &recv_dest
, 0, sizeof ( recv_dest
) );
1744 recv_dest
.qpn
= qpn
;
1745 memset ( &recv_source
, 0, sizeof ( recv_source
) );
1746 switch ( qp
->type
) {
1750 assert ( iob_len ( iobuf
) >= sizeof ( *grh
) );
1752 iob_pull ( iobuf
, sizeof ( *grh
) );
1753 /* Construct address vector */
1754 source
= &recv_source
;
1755 source
->qpn
= MLX_GET ( &cqe
->normal
, srq_rqpn
);
1756 source
->lid
= MLX_GET ( &cqe
->normal
, slid_smac47_32
);
1757 source
->sl
= MLX_GET ( &cqe
->normal
, sl
);
1758 recv_dest
.gid_present
= source
->gid_present
=
1759 MLX_GET ( &cqe
->normal
, g
);
1760 memcpy ( &recv_dest
.gid
, &grh
->dgid
,
1761 sizeof ( recv_dest
.gid
) );
1762 memcpy ( &source
->gid
, &grh
->sgid
,
1763 sizeof ( source
->gid
) );
1769 /* Construct address vector */
1770 source
= &recv_source
;
1771 source
->vlan_present
= MLX_GET ( &cqe
->normal
, vlan
);
1772 source
->vlan
= MLX_GET ( &cqe
->normal
, vid
);
1778 /* Hand off to completion handler */
1779 ib_complete_recv ( ibdev
, qp
, &recv_dest
, source
, iobuf
, rc
);
1786 * Poll completion queue
1788 * @v ibdev Infiniband device
1789 * @v cq Completion queue
1791 static void hermon_poll_cq ( struct ib_device
*ibdev
,
1792 struct ib_completion_queue
*cq
) {
1793 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
1794 struct hermon_completion_queue
*hermon_cq
= ib_cq_get_drvdata ( cq
);
1795 union hermonprm_completion_entry
*cqe
;
1796 unsigned int cqe_idx_mask
;
1800 /* Look for completion entry */
1801 cqe_idx_mask
= ( cq
->num_cqes
- 1 );
1802 cqe
= &hermon_cq
->cqe
[cq
->next_idx
& cqe_idx_mask
];
1803 if ( MLX_GET ( &cqe
->normal
, owner
) ^
1804 ( ( cq
->next_idx
& cq
->num_cqes
) ?
1 : 0 ) ) {
1805 /* Entry still owned by hardware; end of poll */
1809 /* Handle completion */
1810 if ( ( rc
= hermon_complete ( ibdev
, cq
, cqe
) ) != 0 ) {
1811 DBGC ( hermon
, "Hermon %p CQN %#lx failed to complete:"
1812 " %s\n", hermon
, cq
->cqn
, strerror ( rc
) );
1813 DBGC_HDA ( hermon
, virt_to_phys ( cqe
),
1814 cqe
, sizeof ( *cqe
) );
1817 /* Update completion queue's index */
1820 /* Update doorbell record */
1821 MLX_FILL_1 ( hermon_cq
->doorbell
, 0, update_ci
,
1822 ( cq
->next_idx
& 0x00ffffffUL
) );
1826 /***************************************************************************
1830 ***************************************************************************
1834 * Create event queue
1836 * @v hermon Hermon device
1837 * @ret rc Return status code
1839 static int hermon_create_eq ( struct hermon
*hermon
) {
1840 struct hermon_event_queue
*hermon_eq
= &hermon
->eq
;
1841 struct hermonprm_eqc eqctx
;
1842 struct hermonprm_event_mask mask
;
1846 /* Select event queue number */
1847 hermon_eq
->eqn
= ( 4 * hermon
->cap
.reserved_uars
);
1848 if ( hermon_eq
->eqn
< hermon
->cap
.reserved_eqs
)
1849 hermon_eq
->eqn
= hermon
->cap
.reserved_eqs
;
1851 /* Calculate doorbell address */
1852 hermon_eq
->doorbell
=
1853 ( hermon
->uar
+ HERMON_DB_EQ_OFFSET ( hermon_eq
->eqn
) );
1855 /* Allocate event queue itself */
1856 hermon_eq
->eqe_size
=
1857 ( HERMON_NUM_EQES
* sizeof ( hermon_eq
->eqe
[0] ) );
1858 hermon_eq
->eqe
= malloc_dma ( hermon_eq
->eqe_size
,
1859 sizeof ( hermon_eq
->eqe
[0] ) );
1860 if ( ! hermon_eq
->eqe
) {
1864 memset ( hermon_eq
->eqe
, 0, hermon_eq
->eqe_size
);
1865 for ( i
= 0 ; i
< HERMON_NUM_EQES
; i
++ ) {
1866 MLX_FILL_1 ( &hermon_eq
->eqe
[i
].generic
, 7, owner
, 1 );
1870 /* Allocate MTT entries */
1871 if ( ( rc
= hermon_alloc_mtt ( hermon
, hermon_eq
->eqe
,
1872 hermon_eq
->eqe_size
,
1873 &hermon_eq
->mtt
) ) != 0 )
1876 /* Hand queue over to hardware */
1877 memset ( &eqctx
, 0, sizeof ( eqctx
) );
1878 MLX_FILL_2 ( &eqctx
, 0,
1879 st
, 0xa /* "Fired" */,
1881 MLX_FILL_1 ( &eqctx
, 2,
1882 page_offset
, ( hermon_eq
->mtt
.page_offset
>> 5 ) );
1883 MLX_FILL_1 ( &eqctx
, 3, log_eq_size
, fls ( HERMON_NUM_EQES
- 1 ) );
1884 MLX_FILL_H ( &eqctx
, 6, mtt_base_addr_h
,
1885 hermon_eq
->mtt
.mtt_base_addr
);
1886 MLX_FILL_1 ( &eqctx
, 7, mtt_base_addr_l
,
1887 ( hermon_eq
->mtt
.mtt_base_addr
>> 3 ) );
1888 if ( ( rc
= hermon_cmd_sw2hw_eq ( hermon
, hermon_eq
->eqn
,
1890 DBGC ( hermon
, "Hermon %p EQN %#lx SW2HW_EQ failed: %s\n",
1891 hermon
, hermon_eq
->eqn
, strerror ( rc
) );
1895 /* Map all events to this event queue */
1896 memset ( &mask
, 0xff, sizeof ( mask
) );
1897 if ( ( rc
= hermon_cmd_map_eq ( hermon
,
1898 ( HERMON_MAP_EQ
| hermon_eq
->eqn
),
1900 DBGC ( hermon
, "Hermon %p EQN %#lx MAP_EQ failed: %s\n",
1901 hermon
, hermon_eq
->eqn
, strerror ( rc
) );
1905 DBGC ( hermon
, "Hermon %p EQN %#lx ring [%08lx,%08lx), doorbell "
1906 "%08lx\n", hermon
, hermon_eq
->eqn
,
1907 virt_to_phys ( hermon_eq
->eqe
),
1908 ( virt_to_phys ( hermon_eq
->eqe
) + hermon_eq
->eqe_size
),
1909 virt_to_phys ( hermon_eq
->doorbell
) );
1913 hermon_cmd_hw2sw_eq ( hermon
, hermon_eq
->eqn
, &eqctx
);
1915 hermon_free_mtt ( hermon
, &hermon_eq
->mtt
);
1917 free_dma ( hermon_eq
->eqe
, hermon_eq
->eqe_size
);
1919 memset ( hermon_eq
, 0, sizeof ( *hermon_eq
) );
1924 * Destroy event queue
1926 * @v hermon Hermon device
1928 static void hermon_destroy_eq ( struct hermon
*hermon
) {
1929 struct hermon_event_queue
*hermon_eq
= &hermon
->eq
;
1930 struct hermonprm_eqc eqctx
;
1931 struct hermonprm_event_mask mask
;
1934 /* Unmap events from event queue */
1935 memset ( &mask
, 0xff, sizeof ( mask
) );
1936 if ( ( rc
= hermon_cmd_map_eq ( hermon
,
1937 ( HERMON_UNMAP_EQ
| hermon_eq
->eqn
),
1939 DBGC ( hermon
, "Hermon %p EQN %#lx FATAL MAP_EQ failed to "
1940 "unmap: %s\n", hermon
, hermon_eq
->eqn
, strerror ( rc
) );
1941 /* Continue; HCA may die but system should survive */
1944 /* Take ownership back from hardware */
1945 if ( ( rc
= hermon_cmd_hw2sw_eq ( hermon
, hermon_eq
->eqn
,
1947 DBGC ( hermon
, "Hermon %p EQN %#lx FATAL HW2SW_EQ failed: %s\n",
1948 hermon
, hermon_eq
->eqn
, strerror ( rc
) );
1949 /* Leak memory and return; at least we avoid corruption */
1953 /* Free MTT entries */
1954 hermon_free_mtt ( hermon
, &hermon_eq
->mtt
);
1957 free_dma ( hermon_eq
->eqe
, hermon_eq
->eqe_size
);
1958 memset ( hermon_eq
, 0, sizeof ( *hermon_eq
) );
1962 * Handle port state event
1964 * @v hermon Hermon device
1965 * @v eqe Port state change event queue entry
1967 static void hermon_event_port_state_change ( struct hermon
*hermon
,
1968 union hermonprm_event_entry
*eqe
){
1972 /* Get port and link status */
1973 port
= ( MLX_GET ( &eqe
->port_state_change
, data
.p
) - 1 );
1974 link_up
= ( MLX_GET ( &eqe
->generic
, event_sub_type
) & 0x04 );
1975 DBGC ( hermon
, "Hermon %p port %d link %s\n", hermon
, ( port
+ 1 ),
1976 ( link_up ?
"up" : "down" ) );
1979 if ( port
>= hermon
->cap
.num_ports
) {
1980 DBGC ( hermon
, "Hermon %p port %d does not exist!\n",
1981 hermon
, ( port
+ 1 ) );
1985 /* Notify device of port state change */
1986 hermon
->port
[port
].type
->state_change ( hermon
, &hermon
->port
[port
],
1993 * @v ibdev Infiniband device
1995 static void hermon_poll_eq ( struct ib_device
*ibdev
) {
1996 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
1997 struct hermon_event_queue
*hermon_eq
= &hermon
->eq
;
1998 union hermonprm_event_entry
*eqe
;
1999 union hermonprm_doorbell_register db_reg
;
2000 unsigned int eqe_idx_mask
;
2001 unsigned int event_type
;
2003 /* No event is generated upon reaching INIT, so we must poll
2004 * separately for link state changes while we remain DOWN.
2006 if ( ib_is_open ( ibdev
) &&
2007 ( ibdev
->port_state
== IB_PORT_STATE_DOWN
) ) {
2008 ib_smc_update ( ibdev
, hermon_mad
);
2011 /* Poll event queue */
2013 /* Look for event entry */
2014 eqe_idx_mask
= ( HERMON_NUM_EQES
- 1 );
2015 eqe
= &hermon_eq
->eqe
[hermon_eq
->next_idx
& eqe_idx_mask
];
2016 if ( MLX_GET ( &eqe
->generic
, owner
) ^
2017 ( ( hermon_eq
->next_idx
& HERMON_NUM_EQES
) ?
1 : 0 ) ) {
2018 /* Entry still owned by hardware; end of poll */
2021 DBGCP ( hermon
, "Hermon %p EQN %#lx event:\n",
2022 hermon
, hermon_eq
->eqn
);
2023 DBGCP_HDA ( hermon
, virt_to_phys ( eqe
),
2024 eqe
, sizeof ( *eqe
) );
2027 event_type
= MLX_GET ( &eqe
->generic
, event_type
);
2028 switch ( event_type
) {
2029 case HERMON_EV_PORT_STATE_CHANGE
:
2030 hermon_event_port_state_change ( hermon
, eqe
);
2033 DBGC ( hermon
, "Hermon %p EQN %#lx unrecognised event "
2035 hermon
, hermon_eq
->eqn
, event_type
);
2036 DBGC_HDA ( hermon
, virt_to_phys ( eqe
),
2037 eqe
, sizeof ( *eqe
) );
2041 /* Update event queue's index */
2042 hermon_eq
->next_idx
++;
2045 MLX_FILL_1 ( &db_reg
.event
, 0,
2046 ci
, ( hermon_eq
->next_idx
& 0x00ffffffUL
) );
2047 writel ( db_reg
.dword
[0], hermon_eq
->doorbell
);
2051 /***************************************************************************
2055 ***************************************************************************
2059 * Map virtual to physical address for firmware usage
2061 * @v hermon Hermon device
2062 * @v map Mapping function
2063 * @v va Virtual address
2064 * @v pa Physical address
2065 * @v len Length of region
2066 * @ret rc Return status code
2068 static int hermon_map_vpm ( struct hermon
*hermon
,
2069 int ( *map
) ( struct hermon
*hermon
,
2070 const struct hermonprm_virtual_physical_mapping
* ),
2071 uint64_t va
, physaddr_t pa
, size_t len
) {
2072 struct hermonprm_virtual_physical_mapping mapping
;
2081 assert ( ( va
& ( HERMON_PAGE_SIZE
- 1 ) ) == 0 );
2082 assert ( ( pa
& ( HERMON_PAGE_SIZE
- 1 ) ) == 0 );
2083 assert ( ( len
& ( HERMON_PAGE_SIZE
- 1 ) ) == 0 );
2085 /* Calculate starting points */
2087 end
= ( start
+ len
);
2088 size
= ( 1UL << ( fls ( start
^ end
) - 1 ) );
2089 low
= high
= ( end
& ~( size
- 1 ) );
2090 assert ( start
< low
);
2091 assert ( high
<= end
);
2093 /* These mappings tend to generate huge volumes of
2094 * uninteresting debug data, which basically makes it
2095 * impossible to use debugging otherwise.
2097 DBG_DISABLE ( DBGLVL_LOG
| DBGLVL_EXTRA
);
2099 /* Map blocks in descending order of size */
2100 while ( size
>= HERMON_PAGE_SIZE
) {
2102 /* Find the next candidate block */
2103 if ( ( low
- size
) >= start
) {
2106 } else if ( ( high
+ size
) <= end
) {
2113 assert ( ( va
& ( size
- 1 ) ) == 0 );
2114 assert ( ( pa
& ( size
- 1 ) ) == 0 );
2116 /* Map this block */
2117 memset ( &mapping
, 0, sizeof ( mapping
) );
2118 MLX_FILL_1 ( &mapping
, 0, va_h
, ( va
>> 32 ) );
2119 MLX_FILL_1 ( &mapping
, 1, va_l
, ( va
>> 12 ) );
2120 MLX_FILL_H ( &mapping
, 2, pa_h
, pa
);
2121 MLX_FILL_2 ( &mapping
, 3,
2122 log2size
, ( ( fls ( size
) - 1 ) - 12 ),
2123 pa_l
, ( pa
>> 12 ) );
2124 if ( ( rc
= map ( hermon
, &mapping
) ) != 0 ) {
2125 DBG_ENABLE ( DBGLVL_LOG
| DBGLVL_EXTRA
);
2126 DBGC ( hermon
, "Hermon %p could not map %08llx+%zx to "
2128 hermon
, va
, size
, pa
, strerror ( rc
) );
2133 assert ( low
== start
);
2134 assert ( high
== end
);
2136 DBG_ENABLE ( DBGLVL_LOG
| DBGLVL_EXTRA
);
2141 * Start firmware running
2143 * @v hermon Hermon device
2144 * @ret rc Return status code
2146 static int hermon_start_firmware ( struct hermon
*hermon
) {
2147 struct hermonprm_query_fw fw
;
2148 unsigned int fw_pages
;
2153 /* Get firmware parameters */
2154 if ( ( rc
= hermon_cmd_query_fw ( hermon
, &fw
) ) != 0 ) {
2155 DBGC ( hermon
, "Hermon %p could not query firmware: %s\n",
2156 hermon
, strerror ( rc
) );
2159 DBGC ( hermon
, "Hermon %p firmware version %d.%d.%d\n", hermon
,
2160 MLX_GET ( &fw
, fw_rev_major
), MLX_GET ( &fw
, fw_rev_minor
),
2161 MLX_GET ( &fw
, fw_rev_subminor
) );
2162 fw_pages
= MLX_GET ( &fw
, fw_pages
);
2163 DBGC ( hermon
, "Hermon %p requires %d pages (%d kB) for firmware\n",
2164 hermon
, fw_pages
, ( fw_pages
* 4 ) );
2166 /* Allocate firmware pages and map firmware area */
2167 fw_len
= ( fw_pages
* HERMON_PAGE_SIZE
);
2168 if ( ! hermon
->firmware_area
) {
2169 hermon
->firmware_len
= fw_len
;
2170 hermon
->firmware_area
= umalloc ( hermon
->firmware_len
);
2171 if ( ! hermon
->firmware_area
) {
2176 assert ( hermon
->firmware_len
== fw_len
);
2178 fw_base
= user_to_phys ( hermon
->firmware_area
, 0 );
2179 DBGC ( hermon
, "Hermon %p firmware area at physical [%08lx,%08lx)\n",
2180 hermon
, fw_base
, ( fw_base
+ fw_len
) );
2181 if ( ( rc
= hermon_map_vpm ( hermon
, hermon_cmd_map_fa
,
2182 0, fw_base
, fw_len
) ) != 0 ) {
2183 DBGC ( hermon
, "Hermon %p could not map firmware: %s\n",
2184 hermon
, strerror ( rc
) );
2188 /* Start firmware */
2189 if ( ( rc
= hermon_cmd_run_fw ( hermon
) ) != 0 ) {
2190 DBGC ( hermon
, "Hermon %p could not run firmware: %s\n",
2191 hermon
, strerror ( rc
) );
2195 DBGC ( hermon
, "Hermon %p firmware started\n", hermon
);
2200 hermon_cmd_unmap_fa ( hermon
);
2207 * Stop firmware running
2209 * @v hermon Hermon device
2211 static void hermon_stop_firmware ( struct hermon
*hermon
) {
2214 if ( ( rc
= hermon_cmd_unmap_fa ( hermon
) ) != 0 ) {
2215 DBGC ( hermon
, "Hermon %p FATAL could not stop firmware: %s\n",
2216 hermon
, strerror ( rc
) );
2217 /* Leak memory and return; at least we avoid corruption */
2218 hermon
->firmware_area
= UNULL
;
2223 /***************************************************************************
2225 * Infinihost Context Memory management
2227 ***************************************************************************
2233 * @v hermon Hermon device
2234 * @ret rc Return status code
2236 static int hermon_get_cap ( struct hermon
*hermon
) {
2237 struct hermonprm_query_dev_cap dev_cap
;
2240 if ( ( rc
= hermon_cmd_query_dev_cap ( hermon
, &dev_cap
) ) != 0 ) {
2241 DBGC ( hermon
, "Hermon %p could not get device limits: %s\n",
2242 hermon
, strerror ( rc
) );
2246 hermon
->cap
.cmpt_entry_size
= MLX_GET ( &dev_cap
, c_mpt_entry_sz
);
2247 hermon
->cap
.reserved_qps
=
2248 ( 1 << MLX_GET ( &dev_cap
, log2_rsvd_qps
) );
2249 hermon
->cap
.qpc_entry_size
= MLX_GET ( &dev_cap
, qpc_entry_sz
);
2250 hermon
->cap
.altc_entry_size
= MLX_GET ( &dev_cap
, altc_entry_sz
);
2251 hermon
->cap
.auxc_entry_size
= MLX_GET ( &dev_cap
, aux_entry_sz
);
2252 hermon
->cap
.reserved_srqs
=
2253 ( 1 << MLX_GET ( &dev_cap
, log2_rsvd_srqs
) );
2254 hermon
->cap
.srqc_entry_size
= MLX_GET ( &dev_cap
, srq_entry_sz
);
2255 hermon
->cap
.reserved_cqs
=
2256 ( 1 << MLX_GET ( &dev_cap
, log2_rsvd_cqs
) );
2257 hermon
->cap
.cqc_entry_size
= MLX_GET ( &dev_cap
, cqc_entry_sz
);
2258 hermon
->cap
.reserved_eqs
= MLX_GET ( &dev_cap
, num_rsvd_eqs
);
2259 if ( hermon
->cap
.reserved_eqs
== 0 ) {
2260 /* Backward compatibility */
2261 hermon
->cap
.reserved_eqs
=
2262 ( 1 << MLX_GET ( &dev_cap
, log2_rsvd_eqs
) );
2264 hermon
->cap
.eqc_entry_size
= MLX_GET ( &dev_cap
, eqc_entry_sz
);
2265 hermon
->cap
.reserved_mtts
=
2266 ( 1 << MLX_GET ( &dev_cap
, log2_rsvd_mtts
) );
2267 hermon
->cap
.mtt_entry_size
= MLX_GET ( &dev_cap
, mtt_entry_sz
);
2268 hermon
->cap
.reserved_mrws
=
2269 ( 1 << MLX_GET ( &dev_cap
, log2_rsvd_mrws
) );
2270 hermon
->cap
.dmpt_entry_size
= MLX_GET ( &dev_cap
, d_mpt_entry_sz
);
2271 hermon
->cap
.reserved_uars
= MLX_GET ( &dev_cap
, num_rsvd_uars
);
2272 hermon
->cap
.num_ports
= MLX_GET ( &dev_cap
, num_ports
);
2273 hermon
->cap
.dpdp
= MLX_GET ( &dev_cap
, dpdp
);
2276 if ( hermon
->cap
.num_ports
> HERMON_MAX_PORTS
) {
2277 DBGC ( hermon
, "Hermon %p has %d ports (only %d supported)\n",
2278 hermon
, hermon
->cap
.num_ports
, HERMON_MAX_PORTS
);
2279 hermon
->cap
.num_ports
= HERMON_MAX_PORTS
;
2288 * @v icm_offset Current ICM offset
2289 * @v len ICM table length
2290 * @ret icm_offset ICM offset
2292 static uint64_t icm_align ( uint64_t icm_offset
, size_t len
) {
2294 /* Round up to a multiple of the table size */
2295 assert ( len
== ( 1UL << ( fls ( len
) - 1 ) ) );
2296 return ( ( icm_offset
+ len
- 1 ) & ~( ( ( uint64_t ) len
) - 1 ) );
2300 * Map ICM (allocating if necessary)
2302 * @v hermon Hermon device
2303 * @v init_hca INIT_HCA structure to fill in
2304 * @ret rc Return status code
2306 static int hermon_map_icm ( struct hermon
*hermon
,
2307 struct hermonprm_init_hca
*init_hca
) {
2308 struct hermonprm_scalar_parameter icm_size
;
2309 struct hermonprm_scalar_parameter icm_aux_size
;
2310 uint64_t icm_offset
= 0;
2311 unsigned int log_num_qps
, log_num_srqs
, log_num_cqs
, log_num_eqs
;
2312 unsigned int log_num_mtts
, log_num_mpts
, log_num_mcs
;
2313 size_t cmpt_max_len
;
2314 size_t icm_len
, icm_aux_len
;
2316 physaddr_t icm_phys
;
2321 * Start by carving up the ICM virtual address space
2325 /* Calculate number of each object type within ICM */
2326 log_num_qps
= fls ( hermon
->cap
.reserved_qps
+
2327 HERMON_RSVD_SPECIAL_QPS
+ HERMON_MAX_QPS
- 1 );
2328 log_num_srqs
= fls ( hermon
->cap
.reserved_srqs
- 1 );
2329 log_num_cqs
= fls ( hermon
->cap
.reserved_cqs
+ HERMON_MAX_CQS
- 1 );
2330 log_num_eqs
= fls ( hermon
->cap
.reserved_eqs
+ HERMON_MAX_EQS
- 1 );
2331 log_num_mtts
= fls ( hermon
->cap
.reserved_mtts
+ HERMON_MAX_MTTS
- 1 );
2332 log_num_mpts
= fls ( hermon
->cap
.reserved_mrws
+ 1 - 1 );
2333 log_num_mcs
= HERMON_LOG_MULTICAST_HASH_SIZE
;
2335 /* ICM starts with the cMPT tables, which are sparse */
2336 cmpt_max_len
= ( HERMON_CMPT_MAX_ENTRIES
*
2337 ( ( uint64_t ) hermon
->cap
.cmpt_entry_size
) );
2338 len
= ( ( ( ( 1 << log_num_qps
) * hermon
->cap
.cmpt_entry_size
) +
2339 HERMON_PAGE_SIZE
- 1 ) & ~( HERMON_PAGE_SIZE
- 1 ) );
2340 hermon
->icm_map
[HERMON_ICM_QP_CMPT
].offset
= icm_offset
;
2341 hermon
->icm_map
[HERMON_ICM_QP_CMPT
].len
= len
;
2342 icm_offset
+= cmpt_max_len
;
2343 len
= ( ( ( ( 1 << log_num_srqs
) * hermon
->cap
.cmpt_entry_size
) +
2344 HERMON_PAGE_SIZE
- 1 ) & ~( HERMON_PAGE_SIZE
- 1 ) );
2345 hermon
->icm_map
[HERMON_ICM_SRQ_CMPT
].offset
= icm_offset
;
2346 hermon
->icm_map
[HERMON_ICM_SRQ_CMPT
].len
= len
;
2347 icm_offset
+= cmpt_max_len
;
2348 len
= ( ( ( ( 1 << log_num_cqs
) * hermon
->cap
.cmpt_entry_size
) +
2349 HERMON_PAGE_SIZE
- 1 ) & ~( HERMON_PAGE_SIZE
- 1 ) );
2350 hermon
->icm_map
[HERMON_ICM_CQ_CMPT
].offset
= icm_offset
;
2351 hermon
->icm_map
[HERMON_ICM_CQ_CMPT
].len
= len
;
2352 icm_offset
+= cmpt_max_len
;
2353 len
= ( ( ( ( 1 << log_num_eqs
) * hermon
->cap
.cmpt_entry_size
) +
2354 HERMON_PAGE_SIZE
- 1 ) & ~( HERMON_PAGE_SIZE
- 1 ) );
2355 hermon
->icm_map
[HERMON_ICM_EQ_CMPT
].offset
= icm_offset
;
2356 hermon
->icm_map
[HERMON_ICM_EQ_CMPT
].len
= len
;
2357 icm_offset
+= cmpt_max_len
;
2359 hermon
->icm_map
[HERMON_ICM_OTHER
].offset
= icm_offset
;
2361 /* Queue pair contexts */
2362 len
= ( ( 1 << log_num_qps
) * hermon
->cap
.qpc_entry_size
);
2363 icm_offset
= icm_align ( icm_offset
, len
);
2364 MLX_FILL_1 ( init_hca
, 12,
2365 qpc_eec_cqc_eqc_rdb_parameters
.qpc_base_addr_h
,
2366 ( icm_offset
>> 32 ) );
2367 MLX_FILL_2 ( init_hca
, 13,
2368 qpc_eec_cqc_eqc_rdb_parameters
.qpc_base_addr_l
,
2369 ( icm_offset
>> 5 ),
2370 qpc_eec_cqc_eqc_rdb_parameters
.log_num_of_qp
,
2372 DBGC ( hermon
, "Hermon %p ICM QPC is %d x %#zx at [%08llx,%08llx)\n",
2373 hermon
, ( 1 << log_num_qps
), hermon
->cap
.qpc_entry_size
,
2374 icm_offset
, ( icm_offset
+ len
) );
2377 /* Extended alternate path contexts */
2378 len
= ( ( 1 << log_num_qps
) * hermon
->cap
.altc_entry_size
);
2379 icm_offset
= icm_align ( icm_offset
, len
);
2380 MLX_FILL_1 ( init_hca
, 24,
2381 qpc_eec_cqc_eqc_rdb_parameters
.altc_base_addr_h
,
2382 ( icm_offset
>> 32 ) );
2383 MLX_FILL_1 ( init_hca
, 25,
2384 qpc_eec_cqc_eqc_rdb_parameters
.altc_base_addr_l
,
2386 DBGC ( hermon
, "Hermon %p ICM ALTC is %d x %#zx at [%08llx,%08llx)\n",
2387 hermon
, ( 1 << log_num_qps
), hermon
->cap
.altc_entry_size
,
2388 icm_offset
, ( icm_offset
+ len
) );
2391 /* Extended auxiliary contexts */
2392 len
= ( ( 1 << log_num_qps
) * hermon
->cap
.auxc_entry_size
);
2393 icm_offset
= icm_align ( icm_offset
, len
);
2394 MLX_FILL_1 ( init_hca
, 28,
2395 qpc_eec_cqc_eqc_rdb_parameters
.auxc_base_addr_h
,
2396 ( icm_offset
>> 32 ) );
2397 MLX_FILL_1 ( init_hca
, 29,
2398 qpc_eec_cqc_eqc_rdb_parameters
.auxc_base_addr_l
,
2400 DBGC ( hermon
, "Hermon %p ICM AUXC is %d x %#zx at [%08llx,%08llx)\n",
2401 hermon
, ( 1 << log_num_qps
), hermon
->cap
.auxc_entry_size
,
2402 icm_offset
, ( icm_offset
+ len
) );
2405 /* Shared receive queue contexts */
2406 len
= ( ( 1 << log_num_srqs
) * hermon
->cap
.srqc_entry_size
);
2407 icm_offset
= icm_align ( icm_offset
, len
);
2408 MLX_FILL_1 ( init_hca
, 18,
2409 qpc_eec_cqc_eqc_rdb_parameters
.srqc_base_addr_h
,
2410 ( icm_offset
>> 32 ) );
2411 MLX_FILL_2 ( init_hca
, 19,
2412 qpc_eec_cqc_eqc_rdb_parameters
.srqc_base_addr_l
,
2413 ( icm_offset
>> 5 ),
2414 qpc_eec_cqc_eqc_rdb_parameters
.log_num_of_srq
,
2416 DBGC ( hermon
, "Hermon %p ICM SRQC is %d x %#zx at [%08llx,%08llx)\n",
2417 hermon
, ( 1 << log_num_srqs
), hermon
->cap
.srqc_entry_size
,
2418 icm_offset
, ( icm_offset
+ len
) );
2421 /* Completion queue contexts */
2422 len
= ( ( 1 << log_num_cqs
) * hermon
->cap
.cqc_entry_size
);
2423 icm_offset
= icm_align ( icm_offset
, len
);
2424 MLX_FILL_1 ( init_hca
, 20,
2425 qpc_eec_cqc_eqc_rdb_parameters
.cqc_base_addr_h
,
2426 ( icm_offset
>> 32 ) );
2427 MLX_FILL_2 ( init_hca
, 21,
2428 qpc_eec_cqc_eqc_rdb_parameters
.cqc_base_addr_l
,
2429 ( icm_offset
>> 5 ),
2430 qpc_eec_cqc_eqc_rdb_parameters
.log_num_of_cq
,
2432 DBGC ( hermon
, "Hermon %p ICM CQC is %d x %#zx at [%08llx,%08llx)\n",
2433 hermon
, ( 1 << log_num_cqs
), hermon
->cap
.cqc_entry_size
,
2434 icm_offset
, ( icm_offset
+ len
) );
2437 /* Event queue contexts */
2438 len
= ( ( 1 << log_num_eqs
) * hermon
->cap
.eqc_entry_size
);
2439 icm_offset
= icm_align ( icm_offset
, len
);
2440 MLX_FILL_1 ( init_hca
, 32,
2441 qpc_eec_cqc_eqc_rdb_parameters
.eqc_base_addr_h
,
2442 ( icm_offset
>> 32 ) );
2443 MLX_FILL_2 ( init_hca
, 33,
2444 qpc_eec_cqc_eqc_rdb_parameters
.eqc_base_addr_l
,
2445 ( icm_offset
>> 5 ),
2446 qpc_eec_cqc_eqc_rdb_parameters
.log_num_of_eq
,
2448 DBGC ( hermon
, "Hermon %p ICM EQC is %d x %#zx at [%08llx,%08llx)\n",
2449 hermon
, ( 1 << log_num_eqs
), hermon
->cap
.eqc_entry_size
,
2450 icm_offset
, ( icm_offset
+ len
) );
2453 /* Memory translation table */
2454 len
= ( ( 1 << log_num_mtts
) * hermon
->cap
.mtt_entry_size
);
2455 icm_offset
= icm_align ( icm_offset
, len
);
2456 MLX_FILL_1 ( init_hca
, 64,
2457 tpt_parameters
.mtt_base_addr_h
, ( icm_offset
>> 32 ) );
2458 MLX_FILL_1 ( init_hca
, 65,
2459 tpt_parameters
.mtt_base_addr_l
, icm_offset
);
2460 DBGC ( hermon
, "Hermon %p ICM MTT is %d x %#zx at [%08llx,%08llx)\n",
2461 hermon
, ( 1 << log_num_mtts
), hermon
->cap
.mtt_entry_size
,
2462 icm_offset
, ( icm_offset
+ len
) );
2465 /* Memory protection table */
2466 len
= ( ( 1 << log_num_mpts
) * hermon
->cap
.dmpt_entry_size
);
2467 icm_offset
= icm_align ( icm_offset
, len
);
2468 MLX_FILL_1 ( init_hca
, 60,
2469 tpt_parameters
.dmpt_base_adr_h
, ( icm_offset
>> 32 ) );
2470 MLX_FILL_1 ( init_hca
, 61,
2471 tpt_parameters
.dmpt_base_adr_l
, icm_offset
);
2472 MLX_FILL_1 ( init_hca
, 62,
2473 tpt_parameters
.log_dmpt_sz
, log_num_mpts
);
2474 DBGC ( hermon
, "Hermon %p ICM DMPT is %d x %#zx at [%08llx,%08llx)\n",
2475 hermon
, ( 1 << log_num_mpts
), hermon
->cap
.dmpt_entry_size
,
2476 icm_offset
, ( icm_offset
+ len
) );
2479 /* Multicast table */
2480 len
= ( ( 1 << log_num_mcs
) * sizeof ( struct hermonprm_mcg_entry
) );
2481 icm_offset
= icm_align ( icm_offset
, len
);
2482 MLX_FILL_1 ( init_hca
, 48,
2483 multicast_parameters
.mc_base_addr_h
,
2484 ( icm_offset
>> 32 ) );
2485 MLX_FILL_1 ( init_hca
, 49,
2486 multicast_parameters
.mc_base_addr_l
, icm_offset
);
2487 MLX_FILL_1 ( init_hca
, 52,
2488 multicast_parameters
.log_mc_table_entry_sz
,
2489 fls ( sizeof ( struct hermonprm_mcg_entry
) - 1 ) );
2490 MLX_FILL_1 ( init_hca
, 53,
2491 multicast_parameters
.log_mc_table_hash_sz
, log_num_mcs
);
2492 MLX_FILL_1 ( init_hca
, 54,
2493 multicast_parameters
.log_mc_table_sz
, log_num_mcs
);
2494 DBGC ( hermon
, "Hermon %p ICM MC is %d x %#zx at [%08llx,%08llx)\n",
2495 hermon
, ( 1 << log_num_mcs
),
2496 sizeof ( struct hermonprm_mcg_entry
),
2497 icm_offset
, ( icm_offset
+ len
) );
2501 hermon
->icm_map
[HERMON_ICM_OTHER
].len
=
2502 ( icm_offset
- hermon
->icm_map
[HERMON_ICM_OTHER
].offset
);
2505 * Allocate and map physical memory for (portions of) ICM
2508 * ICM AUX area (aligned to its own size)
2513 /* Calculate physical memory required for ICM */
2515 for ( i
= 0 ; i
< HERMON_ICM_NUM_REGIONS
; i
++ ) {
2516 icm_len
+= hermon
->icm_map
[i
].len
;
2519 /* Get ICM auxiliary area size */
2520 memset ( &icm_size
, 0, sizeof ( icm_size
) );
2521 MLX_FILL_1 ( &icm_size
, 0, value_hi
, ( icm_offset
>> 32 ) );
2522 MLX_FILL_1 ( &icm_size
, 1, value
, icm_offset
);
2523 if ( ( rc
= hermon_cmd_set_icm_size ( hermon
, &icm_size
,
2524 &icm_aux_size
) ) != 0 ) {
2525 DBGC ( hermon
, "Hermon %p could not set ICM size: %s\n",
2526 hermon
, strerror ( rc
) );
2527 goto err_set_icm_size
;
2529 icm_aux_len
= ( MLX_GET ( &icm_aux_size
, value
) * HERMON_PAGE_SIZE
);
2531 /* Allocate ICM data and auxiliary area */
2532 DBGC ( hermon
, "Hermon %p requires %zd kB ICM and %zd kB AUX ICM\n",
2533 hermon
, ( icm_len
/ 1024 ), ( icm_aux_len
/ 1024 ) );
2534 if ( ! hermon
->icm
) {
2535 hermon
->icm_len
= icm_len
;
2536 hermon
->icm_aux_len
= icm_aux_len
;
2537 hermon
->icm
= umalloc ( hermon
->icm_aux_len
+ hermon
->icm_len
);
2538 if ( ! hermon
->icm
) {
2543 assert ( hermon
->icm_len
== icm_len
);
2544 assert ( hermon
->icm_aux_len
== icm_aux_len
);
2546 icm_phys
= user_to_phys ( hermon
->icm
, 0 );
2548 /* Map ICM auxiliary area */
2549 DBGC ( hermon
, "Hermon %p mapping ICM AUX => %08lx\n",
2551 if ( ( rc
= hermon_map_vpm ( hermon
, hermon_cmd_map_icm_aux
,
2552 0, icm_phys
, icm_aux_len
) ) != 0 ) {
2553 DBGC ( hermon
, "Hermon %p could not map AUX ICM: %s\n",
2554 hermon
, strerror ( rc
) );
2555 goto err_map_icm_aux
;
2557 icm_phys
+= icm_aux_len
;
2560 for ( i
= 0 ; i
< HERMON_ICM_NUM_REGIONS
; i
++ ) {
2561 DBGC ( hermon
, "Hermon %p mapping ICM %llx+%zx => %08lx\n",
2562 hermon
, hermon
->icm_map
[i
].offset
,
2563 hermon
->icm_map
[i
].len
, icm_phys
);
2564 if ( ( rc
= hermon_map_vpm ( hermon
, hermon_cmd_map_icm
,
2565 hermon
->icm_map
[i
].offset
,
2567 hermon
->icm_map
[i
].len
) ) != 0 ){
2568 DBGC ( hermon
, "Hermon %p could not map ICM: %s\n",
2569 hermon
, strerror ( rc
) );
2572 icm_phys
+= hermon
->icm_map
[i
].len
;
2578 assert ( i
== 0 ); /* We don't handle partial failure at present */
2580 hermon_cmd_unmap_icm_aux ( hermon
);
2589 * @v hermon Hermon device
2591 static void hermon_unmap_icm ( struct hermon
*hermon
) {
2592 struct hermonprm_scalar_parameter unmap_icm
;
2595 for ( i
= ( HERMON_ICM_NUM_REGIONS
- 1 ) ; i
>= 0 ; i
-- ) {
2596 memset ( &unmap_icm
, 0, sizeof ( unmap_icm
) );
2597 MLX_FILL_1 ( &unmap_icm
, 0, value_hi
,
2598 ( hermon
->icm_map
[i
].offset
>> 32 ) );
2599 MLX_FILL_1 ( &unmap_icm
, 1, value
,
2600 hermon
->icm_map
[i
].offset
);
2601 hermon_cmd_unmap_icm ( hermon
,
2602 ( 1 << fls ( ( hermon
->icm_map
[i
].len
/
2603 HERMON_PAGE_SIZE
) - 1)),
2606 hermon_cmd_unmap_icm_aux ( hermon
);
2609 /***************************************************************************
2611 * Initialisation and teardown
2613 ***************************************************************************
2619 * @v hermon Hermon device
2621 static void hermon_reset ( struct hermon
*hermon
) {
2622 struct pci_device
*pci
= hermon
->pci
;
2623 struct pci_config_backup backup
;
2624 static const uint8_t backup_exclude
[] =
2625 PCI_CONFIG_BACKUP_EXCLUDE ( 0x58, 0x5c );
2627 /* Perform device reset and preserve PCI configuration */
2628 pci_backup ( pci
, &backup
, backup_exclude
);
2629 writel ( HERMON_RESET_MAGIC
,
2630 ( hermon
->config
+ HERMON_RESET_OFFSET
) );
2631 mdelay ( HERMON_RESET_WAIT_TIME_MS
);
2632 pci_restore ( pci
, &backup
, backup_exclude
);
2634 /* Reset command interface toggle */
2639 * Set up memory protection table
2641 * @v hermon Hermon device
2642 * @ret rc Return status code
2644 static int hermon_setup_mpt ( struct hermon
*hermon
) {
2645 struct hermonprm_mpt mpt
;
2650 key
= ( hermon
->cap
.reserved_mrws
| HERMON_MKEY_PREFIX
);
2651 hermon
->lkey
= ( ( key
<< 8 ) | ( key
>> 24 ) );
2653 /* Initialise memory protection table */
2654 memset ( &mpt
, 0, sizeof ( mpt
) );
2655 MLX_FILL_7 ( &mpt
, 0,
2663 MLX_FILL_1 ( &mpt
, 2, mem_key
, key
);
2664 MLX_FILL_1 ( &mpt
, 3,
2665 pd
, HERMON_GLOBAL_PD
);
2666 MLX_FILL_1 ( &mpt
, 10, len64
, 1 );
2667 if ( ( rc
= hermon_cmd_sw2hw_mpt ( hermon
,
2668 hermon
->cap
.reserved_mrws
,
2670 DBGC ( hermon
, "Hermon %p could not set up MPT: %s\n",
2671 hermon
, strerror ( rc
) );
2679 * Configure special queue pairs
2681 * @v hermon Hermon device
2682 * @ret rc Return status code
2684 static int hermon_configure_special_qps ( struct hermon
*hermon
) {
2687 /* Special QP block must be aligned on its own size */
2688 hermon
->special_qpn_base
= ( ( hermon
->cap
.reserved_qps
+
2689 HERMON_NUM_SPECIAL_QPS
- 1 )
2690 & ~( HERMON_NUM_SPECIAL_QPS
- 1 ) );
2691 hermon
->qpn_base
= ( hermon
->special_qpn_base
+
2692 HERMON_NUM_SPECIAL_QPS
);
2693 DBGC ( hermon
, "Hermon %p special QPs at [%lx,%lx]\n", hermon
,
2694 hermon
->special_qpn_base
, ( hermon
->qpn_base
- 1 ) );
2696 /* Issue command to configure special QPs */
2697 if ( ( rc
= hermon_cmd_conf_special_qp ( hermon
, 0x00,
2698 hermon
->special_qpn_base
) ) != 0 ) {
2699 DBGC ( hermon
, "Hermon %p could not configure special QPs: "
2700 "%s\n", hermon
, strerror ( rc
) );
2708 * Start Hermon device
2710 * @v hermon Hermon device
2711 * @v running Firmware is already running
2712 * @ret rc Return status code
2714 static int hermon_start ( struct hermon
*hermon
, int running
) {
2715 struct hermonprm_init_hca init_hca
;
2719 /* Start firmware if not already running */
2721 if ( ( rc
= hermon_start_firmware ( hermon
) ) != 0 )
2722 goto err_start_firmware
;
2725 /* Allocate and map ICM */
2726 memset ( &init_hca
, 0, sizeof ( init_hca
) );
2727 if ( ( rc
= hermon_map_icm ( hermon
, &init_hca
) ) != 0 )
2730 /* Initialise HCA */
2731 MLX_FILL_1 ( &init_hca
, 0, version
, 0x02 /* "Must be 0x02" */ );
2732 MLX_FILL_1 ( &init_hca
, 5, udp
, 1 );
2733 MLX_FILL_1 ( &init_hca
, 74, uar_parameters
.log_max_uars
, 8 );
2734 if ( ( rc
= hermon_cmd_init_hca ( hermon
, &init_hca
) ) != 0 ) {
2735 DBGC ( hermon
, "Hermon %p could not initialise HCA: %s\n",
2736 hermon
, strerror ( rc
) );
2740 /* Set up memory protection */
2741 if ( ( rc
= hermon_setup_mpt ( hermon
) ) != 0 )
2743 for ( i
= 0 ; i
< hermon
->cap
.num_ports
; i
++ )
2744 hermon
->port
[i
].ibdev
->rdma_key
= hermon
->lkey
;
2746 /* Set up event queue */
2747 if ( ( rc
= hermon_create_eq ( hermon
) ) != 0 )
2750 /* Configure special QPs */
2751 if ( ( rc
= hermon_configure_special_qps ( hermon
) ) != 0 )
2752 goto err_conf_special_qps
;
2756 err_conf_special_qps
:
2757 hermon_destroy_eq ( hermon
);
2760 hermon_cmd_close_hca ( hermon
);
2762 hermon_unmap_icm ( hermon
);
2764 hermon_stop_firmware ( hermon
);
2770 * Stop Hermon device
2772 * @v hermon Hermon device
2774 static void hermon_stop ( struct hermon
*hermon
) {
2775 hermon_destroy_eq ( hermon
);
2776 hermon_cmd_close_hca ( hermon
);
2777 hermon_unmap_icm ( hermon
);
2778 hermon_stop_firmware ( hermon
);
2779 hermon_reset ( hermon
);
2783 * Open Hermon device
2785 * @v hermon Hermon device
2786 * @ret rc Return status code
2788 static int hermon_open ( struct hermon
*hermon
) {
2791 /* Start device if applicable */
2792 if ( hermon
->open_count
== 0 ) {
2793 if ( ( rc
= hermon_start ( hermon
, 0 ) ) != 0 )
2797 /* Increment open counter */
2798 hermon
->open_count
++;
2804 * Close Hermon device
2806 * @v hermon Hermon device
2808 static void hermon_close ( struct hermon
*hermon
) {
2810 /* Decrement open counter */
2811 assert ( hermon
->open_count
!= 0 );
2812 hermon
->open_count
--;
2814 /* Stop device if applicable */
2815 if ( hermon
->open_count
== 0 )
2816 hermon_stop ( hermon
);
2819 /***************************************************************************
2821 * Infiniband link-layer operations
2823 ***************************************************************************
2827 * Initialise Infiniband link
2829 * @v ibdev Infiniband device
2830 * @ret rc Return status code
2832 static int hermon_ib_open ( struct ib_device
*ibdev
) {
2833 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
2834 union hermonprm_set_port set_port
;
2838 if ( ( rc
= hermon_open ( hermon
) ) != 0 )
2841 /* Set port parameters */
2842 memset ( &set_port
, 0, sizeof ( set_port
) );
2843 MLX_FILL_8 ( &set_port
.ib
, 0,
2848 mtu_cap
, IB_MTU_2048
,
2852 MLX_FILL_2 ( &set_port
.ib
, 10,
2855 MLX_FILL_1 ( &set_port
.ib
, 28,
2856 link_speed_supported
, 1 );
2857 if ( ( rc
= hermon_cmd_set_port ( hermon
, 0, ibdev
->port
,
2858 &set_port
) ) != 0 ) {
2859 DBGC ( hermon
, "Hermon %p port %d could not set port: %s\n",
2860 hermon
, ibdev
->port
, strerror ( rc
) );
2864 /* Initialise port */
2865 if ( ( rc
= hermon_cmd_init_port ( hermon
, ibdev
->port
) ) != 0 ) {
2866 DBGC ( hermon
, "Hermon %p port %d could not initialise port: "
2867 "%s\n", hermon
, ibdev
->port
, strerror ( rc
) );
2871 /* Update MAD parameters */
2872 ib_smc_update ( ibdev
, hermon_mad
);
2878 hermon_close ( hermon
);
2884 * Close Infiniband link
2886 * @v ibdev Infiniband device
2888 static void hermon_ib_close ( struct ib_device
*ibdev
) {
2889 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
2893 if ( ( rc
= hermon_cmd_close_port ( hermon
, ibdev
->port
) ) != 0 ) {
2894 DBGC ( hermon
, "Hermon %p port %d could not close port: %s\n",
2895 hermon
, ibdev
->port
, strerror ( rc
) );
2896 /* Nothing we can do about this */
2899 /* Close hardware */
2900 hermon_close ( hermon
);
2904 * Inform embedded subnet management agent of a received MAD
2906 * @v ibdev Infiniband device
2908 * @ret rc Return status code
2910 static int hermon_inform_sma ( struct ib_device
*ibdev
,
2911 union ib_mad
*mad
) {
2914 /* Send the MAD to the embedded SMA */
2915 if ( ( rc
= hermon_mad ( ibdev
, mad
) ) != 0 )
2918 /* Update parameters held in software */
2919 ib_smc_update ( ibdev
, hermon_mad
);
2924 /***************************************************************************
2926 * Multicast group operations
2928 ***************************************************************************
2932 * Attach to multicast group
2934 * @v ibdev Infiniband device
2936 * @v gid Multicast GID
2937 * @ret rc Return status code
2939 static int hermon_mcast_attach ( struct ib_device
*ibdev
,
2940 struct ib_queue_pair
*qp
,
2941 union ib_gid
*gid
) {
2942 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
2943 struct hermonprm_mgm_hash hash
;
2944 struct hermonprm_mcg_entry mcg
;
2948 /* Generate hash table index */
2949 if ( ( rc
= hermon_cmd_mgid_hash ( hermon
, gid
, &hash
) ) != 0 ) {
2950 DBGC ( hermon
, "Hermon %p could not hash GID: %s\n",
2951 hermon
, strerror ( rc
) );
2954 index
= MLX_GET ( &hash
, hash
);
2956 /* Check for existing hash table entry */
2957 if ( ( rc
= hermon_cmd_read_mcg ( hermon
, index
, &mcg
) ) != 0 ) {
2958 DBGC ( hermon
, "Hermon %p could not read MCG %#x: %s\n",
2959 hermon
, index
, strerror ( rc
) );
2962 if ( MLX_GET ( &mcg
, hdr
.members_count
) != 0 ) {
2963 /* FIXME: this implementation allows only a single QP
2964 * per multicast group, and doesn't handle hash
2965 * collisions. Sufficient for IPoIB but may need to
2966 * be extended in future.
2968 DBGC ( hermon
, "Hermon %p MGID index %#x already in use\n",
2973 /* Update hash table entry */
2974 MLX_FILL_1 ( &mcg
, 1, hdr
.members_count
, 1 );
2975 MLX_FILL_1 ( &mcg
, 8, qp
[0].qpn
, qp
->qpn
);
2976 memcpy ( &mcg
.u
.dwords
[4], gid
, sizeof ( *gid
) );
2977 if ( ( rc
= hermon_cmd_write_mcg ( hermon
, index
, &mcg
) ) != 0 ) {
2978 DBGC ( hermon
, "Hermon %p could not write MCG %#x: %s\n",
2979 hermon
, index
, strerror ( rc
) );
2987 * Detach from multicast group
2989 * @v ibdev Infiniband device
2991 * @v gid Multicast GID
2993 static void hermon_mcast_detach ( struct ib_device
*ibdev
,
2994 struct ib_queue_pair
*qp __unused
,
2995 union ib_gid
*gid
) {
2996 struct hermon
*hermon
= ib_get_drvdata ( ibdev
);
2997 struct hermonprm_mgm_hash hash
;
2998 struct hermonprm_mcg_entry mcg
;
3002 /* Generate hash table index */
3003 if ( ( rc
= hermon_cmd_mgid_hash ( hermon
, gid
, &hash
) ) != 0 ) {
3004 DBGC ( hermon
, "Hermon %p could not hash GID: %s\n",
3005 hermon
, strerror ( rc
) );
3008 index
= MLX_GET ( &hash
, hash
);
3010 /* Clear hash table entry */
3011 memset ( &mcg
, 0, sizeof ( mcg
) );
3012 if ( ( rc
= hermon_cmd_write_mcg ( hermon
, index
, &mcg
) ) != 0 ) {
3013 DBGC ( hermon
, "Hermon %p could not write MCG %#x: %s\n",
3014 hermon
, index
, strerror ( rc
) );
3019 /** Hermon Infiniband operations */
3020 static struct ib_device_operations hermon_ib_operations
= {
3021 .create_cq
= hermon_create_cq
,
3022 .destroy_cq
= hermon_destroy_cq
,
3023 .create_qp
= hermon_create_qp
,
3024 .modify_qp
= hermon_modify_qp
,
3025 .destroy_qp
= hermon_destroy_qp
,
3026 .post_send
= hermon_post_send
,
3027 .post_recv
= hermon_post_recv
,
3028 .poll_cq
= hermon_poll_cq
,
3029 .poll_eq
= hermon_poll_eq
,
3030 .open
= hermon_ib_open
,
3031 .close
= hermon_ib_close
,
3032 .mcast_attach
= hermon_mcast_attach
,
3033 .mcast_detach
= hermon_mcast_detach
,
3034 .set_port_info
= hermon_inform_sma
,