[tcpip] Allow supported address families to be detected at runtime
[ipxe.git] / src / drivers / infiniband / hermon.c
1 /*
2 * Copyright (C) 2008 Michael Brown <mbrown@fensystems.co.uk>.
3 * Copyright (C) 2008 Mellanox Technologies Ltd.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation; either version 2 of the
8 * License, or any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21 FILE_LICENCE ( GPL2_OR_LATER );
22
23 #include <stdint.h>
24 #include <stdlib.h>
25 #include <stdio.h>
26 #include <string.h>
27 #include <strings.h>
28 #include <unistd.h>
29 #include <errno.h>
30 #include <byteswap.h>
31 #include <ipxe/io.h>
32 #include <ipxe/pci.h>
33 #include <ipxe/pcibackup.h>
34 #include <ipxe/malloc.h>
35 #include <ipxe/umalloc.h>
36 #include <ipxe/iobuf.h>
37 #include <ipxe/netdevice.h>
38 #include <ipxe/infiniband.h>
39 #include <ipxe/ib_smc.h>
40 #include <ipxe/if_ether.h>
41 #include <ipxe/ethernet.h>
42 #include <ipxe/fcoe.h>
43 #include <ipxe/vlan.h>
44 #include <ipxe/bofm.h>
45 #include <ipxe/nvsvpd.h>
46 #include <ipxe/nvo.h>
47 #include "hermon.h"
48
49 /**
50 * @file
51 *
52 * Mellanox Hermon Infiniband HCA
53 *
54 */
55
56 /***************************************************************************
57 *
58 * Queue number allocation
59 *
60 ***************************************************************************
61 */
62
63 /**
64 * Allocate offsets within usage bitmask
65 *
66 * @v bits Usage bitmask
67 * @v bits_len Length of usage bitmask
68 * @v num_bits Number of contiguous bits to allocate within bitmask
69 * @ret bit First free bit within bitmask, or negative error
70 */
71 static int hermon_bitmask_alloc ( hermon_bitmask_t *bits,
72 unsigned int bits_len,
73 unsigned int num_bits ) {
74 unsigned int bit = 0;
75 hermon_bitmask_t mask = 1;
76 unsigned int found = 0;
77
78 /* Search bits for num_bits contiguous free bits */
79 while ( bit < bits_len ) {
80 if ( ( mask & *bits ) == 0 ) {
81 if ( ++found == num_bits )
82 goto found;
83 } else {
84 found = 0;
85 }
86 bit++;
87 mask = ( mask << 1 ) | ( mask >> ( 8 * sizeof ( mask ) - 1 ) );
88 if ( mask == 1 )
89 bits++;
90 }
91 return -ENFILE;
92
93 found:
94 /* Mark bits as in-use */
95 do {
96 *bits |= mask;
97 if ( mask == 1 )
98 bits--;
99 mask = ( mask >> 1 ) | ( mask << ( 8 * sizeof ( mask ) - 1 ) );
100 } while ( --found );
101
102 return ( bit - num_bits + 1 );
103 }
104
105 /**
106 * Free offsets within usage bitmask
107 *
108 * @v bits Usage bitmask
109 * @v bit Starting bit within bitmask
110 * @v num_bits Number of contiguous bits to free within bitmask
111 */
112 static void hermon_bitmask_free ( hermon_bitmask_t *bits,
113 int bit, unsigned int num_bits ) {
114 hermon_bitmask_t mask;
115
116 for ( ; num_bits ; bit++, num_bits-- ) {
117 mask = ( 1 << ( bit % ( 8 * sizeof ( mask ) ) ) );
118 bits[ ( bit / ( 8 * sizeof ( mask ) ) ) ] &= ~mask;
119 }
120 }
121
122 /***************************************************************************
123 *
124 * HCA commands
125 *
126 ***************************************************************************
127 */
128
129 /**
130 * Wait for Hermon command completion
131 *
132 * @v hermon Hermon device
133 * @v hcr HCA command registers
134 * @ret rc Return status code
135 */
136 static int hermon_cmd_wait ( struct hermon *hermon,
137 struct hermonprm_hca_command_register *hcr ) {
138 unsigned int wait;
139
140 for ( wait = HERMON_HCR_MAX_WAIT_MS ; wait ; wait-- ) {
141 hcr->u.dwords[6] =
142 readl ( hermon->config + HERMON_HCR_REG ( 6 ) );
143 if ( ( MLX_GET ( hcr, go ) == 0 ) &&
144 ( MLX_GET ( hcr, t ) == hermon->toggle ) )
145 return 0;
146 mdelay ( 1 );
147 }
148 return -EBUSY;
149 }
150
151 /**
152 * Issue HCA command
153 *
154 * @v hermon Hermon device
155 * @v command Command opcode, flags and input/output lengths
156 * @v op_mod Opcode modifier (0 if no modifier applicable)
157 * @v in Input parameters
158 * @v in_mod Input modifier (0 if no modifier applicable)
159 * @v out Output parameters
160 * @ret rc Return status code
161 */
162 static int hermon_cmd ( struct hermon *hermon, unsigned long command,
163 unsigned int op_mod, const void *in,
164 unsigned int in_mod, void *out ) {
165 struct hermonprm_hca_command_register hcr;
166 unsigned int opcode = HERMON_HCR_OPCODE ( command );
167 size_t in_len = HERMON_HCR_IN_LEN ( command );
168 size_t out_len = HERMON_HCR_OUT_LEN ( command );
169 void *in_buffer;
170 void *out_buffer;
171 unsigned int status;
172 unsigned int i;
173 int rc;
174
175 assert ( in_len <= HERMON_MBOX_SIZE );
176 assert ( out_len <= HERMON_MBOX_SIZE );
177
178 DBGC2 ( hermon, "Hermon %p command %02x in %zx%s out %zx%s\n",
179 hermon, opcode, in_len,
180 ( ( command & HERMON_HCR_IN_MBOX ) ? "(mbox)" : "" ), out_len,
181 ( ( command & HERMON_HCR_OUT_MBOX ) ? "(mbox)" : "" ) );
182
183 /* Check that HCR is free */
184 if ( ( rc = hermon_cmd_wait ( hermon, &hcr ) ) != 0 ) {
185 DBGC ( hermon, "Hermon %p command interface locked\n",
186 hermon );
187 return rc;
188 }
189
190 /* Flip HCR toggle */
191 hermon->toggle = ( 1 - hermon->toggle );
192
193 /* Prepare HCR */
194 memset ( &hcr, 0, sizeof ( hcr ) );
195 in_buffer = &hcr.u.dwords[0];
196 if ( in_len && ( command & HERMON_HCR_IN_MBOX ) ) {
197 memset ( hermon->mailbox_in, 0, HERMON_MBOX_SIZE );
198 in_buffer = hermon->mailbox_in;
199 MLX_FILL_H ( &hcr, 0, in_param_h, virt_to_bus ( in_buffer ) );
200 MLX_FILL_1 ( &hcr, 1, in_param_l, virt_to_bus ( in_buffer ) );
201 }
202 memcpy ( in_buffer, in, in_len );
203 MLX_FILL_1 ( &hcr, 2, input_modifier, in_mod );
204 out_buffer = &hcr.u.dwords[3];
205 if ( out_len && ( command & HERMON_HCR_OUT_MBOX ) ) {
206 out_buffer = hermon->mailbox_out;
207 MLX_FILL_H ( &hcr, 3, out_param_h,
208 virt_to_bus ( out_buffer ) );
209 MLX_FILL_1 ( &hcr, 4, out_param_l,
210 virt_to_bus ( out_buffer ) );
211 }
212 MLX_FILL_4 ( &hcr, 6,
213 opcode, opcode,
214 opcode_modifier, op_mod,
215 go, 1,
216 t, hermon->toggle );
217 DBGC ( hermon, "Hermon %p issuing command %04x\n",
218 hermon, opcode );
219 DBGC2_HDA ( hermon, virt_to_phys ( hermon->config + HERMON_HCR_BASE ),
220 &hcr, sizeof ( hcr ) );
221 if ( in_len && ( command & HERMON_HCR_IN_MBOX ) ) {
222 DBGC2 ( hermon, "Input mailbox:\n" );
223 DBGC2_HDA ( hermon, virt_to_phys ( in_buffer ), in_buffer,
224 ( ( in_len < 512 ) ? in_len : 512 ) );
225 }
226
227 /* Issue command */
228 for ( i = 0 ; i < ( sizeof ( hcr ) / sizeof ( hcr.u.dwords[0] ) ) ;
229 i++ ) {
230 writel ( hcr.u.dwords[i],
231 hermon->config + HERMON_HCR_REG ( i ) );
232 barrier();
233 }
234
235 /* Wait for command completion */
236 if ( ( rc = hermon_cmd_wait ( hermon, &hcr ) ) != 0 ) {
237 DBGC ( hermon, "Hermon %p timed out waiting for command:\n",
238 hermon );
239 DBGC_HDA ( hermon,
240 virt_to_phys ( hermon->config + HERMON_HCR_BASE ),
241 &hcr, sizeof ( hcr ) );
242 return rc;
243 }
244
245 /* Check command status */
246 status = MLX_GET ( &hcr, status );
247 if ( status != 0 ) {
248 DBGC ( hermon, "Hermon %p command failed with status %02x:\n",
249 hermon, status );
250 DBGC_HDA ( hermon,
251 virt_to_phys ( hermon->config + HERMON_HCR_BASE ),
252 &hcr, sizeof ( hcr ) );
253 return -EIO;
254 }
255
256 /* Read output parameters, if any */
257 hcr.u.dwords[3] = readl ( hermon->config + HERMON_HCR_REG ( 3 ) );
258 hcr.u.dwords[4] = readl ( hermon->config + HERMON_HCR_REG ( 4 ) );
259 memcpy ( out, out_buffer, out_len );
260 if ( out_len ) {
261 DBGC2 ( hermon, "Output%s:\n",
262 ( command & HERMON_HCR_OUT_MBOX ) ? " mailbox" : "" );
263 DBGC2_HDA ( hermon, virt_to_phys ( out_buffer ), out_buffer,
264 ( ( out_len < 512 ) ? out_len : 512 ) );
265 }
266
267 return 0;
268 }
269
270 static inline int
271 hermon_cmd_query_dev_cap ( struct hermon *hermon,
272 struct hermonprm_query_dev_cap *dev_cap ) {
273 return hermon_cmd ( hermon,
274 HERMON_HCR_OUT_CMD ( HERMON_HCR_QUERY_DEV_CAP,
275 1, sizeof ( *dev_cap ) ),
276 0, NULL, 0, dev_cap );
277 }
278
279 static inline int
280 hermon_cmd_query_fw ( struct hermon *hermon, struct hermonprm_query_fw *fw ) {
281 return hermon_cmd ( hermon,
282 HERMON_HCR_OUT_CMD ( HERMON_HCR_QUERY_FW,
283 1, sizeof ( *fw ) ),
284 0, NULL, 0, fw );
285 }
286
287 static inline int
288 hermon_cmd_init_hca ( struct hermon *hermon,
289 const struct hermonprm_init_hca *init_hca ) {
290 return hermon_cmd ( hermon,
291 HERMON_HCR_IN_CMD ( HERMON_HCR_INIT_HCA,
292 1, sizeof ( *init_hca ) ),
293 0, init_hca, 0, NULL );
294 }
295
296 static inline int
297 hermon_cmd_close_hca ( struct hermon *hermon ) {
298 return hermon_cmd ( hermon,
299 HERMON_HCR_VOID_CMD ( HERMON_HCR_CLOSE_HCA ),
300 0, NULL, 0, NULL );
301 }
302
303 static inline int
304 hermon_cmd_init_port ( struct hermon *hermon, unsigned int port ) {
305 return hermon_cmd ( hermon,
306 HERMON_HCR_VOID_CMD ( HERMON_HCR_INIT_PORT ),
307 0, NULL, port, NULL );
308 }
309
310 static inline int
311 hermon_cmd_close_port ( struct hermon *hermon, unsigned int port ) {
312 return hermon_cmd ( hermon,
313 HERMON_HCR_VOID_CMD ( HERMON_HCR_CLOSE_PORT ),
314 0, NULL, port, NULL );
315 }
316
317 static inline int
318 hermon_cmd_set_port ( struct hermon *hermon, int is_ethernet,
319 unsigned int port_selector,
320 const union hermonprm_set_port *set_port ) {
321 return hermon_cmd ( hermon,
322 HERMON_HCR_IN_CMD ( HERMON_HCR_SET_PORT,
323 1, sizeof ( *set_port ) ),
324 is_ethernet, set_port, port_selector, NULL );
325 }
326
327 static inline int
328 hermon_cmd_sw2hw_mpt ( struct hermon *hermon, unsigned int index,
329 const struct hermonprm_mpt *mpt ) {
330 return hermon_cmd ( hermon,
331 HERMON_HCR_IN_CMD ( HERMON_HCR_SW2HW_MPT,
332 1, sizeof ( *mpt ) ),
333 0, mpt, index, NULL );
334 }
335
336 static inline int
337 hermon_cmd_write_mtt ( struct hermon *hermon,
338 const struct hermonprm_write_mtt *write_mtt ) {
339 return hermon_cmd ( hermon,
340 HERMON_HCR_IN_CMD ( HERMON_HCR_WRITE_MTT,
341 1, sizeof ( *write_mtt ) ),
342 0, write_mtt, 1, NULL );
343 }
344
345 static inline int
346 hermon_cmd_map_eq ( struct hermon *hermon, unsigned long index_map,
347 const struct hermonprm_event_mask *mask ) {
348 return hermon_cmd ( hermon,
349 HERMON_HCR_IN_CMD ( HERMON_HCR_MAP_EQ,
350 0, sizeof ( *mask ) ),
351 0, mask, index_map, NULL );
352 }
353
354 static inline int
355 hermon_cmd_sw2hw_eq ( struct hermon *hermon, unsigned int index,
356 const struct hermonprm_eqc *eqctx ) {
357 return hermon_cmd ( hermon,
358 HERMON_HCR_IN_CMD ( HERMON_HCR_SW2HW_EQ,
359 1, sizeof ( *eqctx ) ),
360 0, eqctx, index, NULL );
361 }
362
363 static inline int
364 hermon_cmd_hw2sw_eq ( struct hermon *hermon, unsigned int index,
365 struct hermonprm_eqc *eqctx ) {
366 return hermon_cmd ( hermon,
367 HERMON_HCR_OUT_CMD ( HERMON_HCR_HW2SW_EQ,
368 1, sizeof ( *eqctx ) ),
369 1, NULL, index, eqctx );
370 }
371
372 static inline int
373 hermon_cmd_query_eq ( struct hermon *hermon, unsigned int index,
374 struct hermonprm_eqc *eqctx ) {
375 return hermon_cmd ( hermon,
376 HERMON_HCR_OUT_CMD ( HERMON_HCR_QUERY_EQ,
377 1, sizeof ( *eqctx ) ),
378 0, NULL, index, eqctx );
379 }
380
381 static inline int
382 hermon_cmd_sw2hw_cq ( struct hermon *hermon, unsigned long cqn,
383 const struct hermonprm_completion_queue_context *cqctx ){
384 return hermon_cmd ( hermon,
385 HERMON_HCR_IN_CMD ( HERMON_HCR_SW2HW_CQ,
386 1, sizeof ( *cqctx ) ),
387 0, cqctx, cqn, NULL );
388 }
389
390 static inline int
391 hermon_cmd_hw2sw_cq ( struct hermon *hermon, unsigned long cqn,
392 struct hermonprm_completion_queue_context *cqctx ) {
393 return hermon_cmd ( hermon,
394 HERMON_HCR_OUT_CMD ( HERMON_HCR_HW2SW_CQ,
395 1, sizeof ( *cqctx ) ),
396 0, NULL, cqn, cqctx );
397 }
398
399 static inline int
400 hermon_cmd_query_cq ( struct hermon *hermon, unsigned long cqn,
401 struct hermonprm_completion_queue_context *cqctx ) {
402 return hermon_cmd ( hermon,
403 HERMON_HCR_OUT_CMD ( HERMON_HCR_QUERY_CQ,
404 1, sizeof ( *cqctx ) ),
405 0, NULL, cqn, cqctx );
406 }
407
408 static inline int
409 hermon_cmd_rst2init_qp ( struct hermon *hermon, unsigned long qpn,
410 const struct hermonprm_qp_ee_state_transitions *ctx ){
411 return hermon_cmd ( hermon,
412 HERMON_HCR_IN_CMD ( HERMON_HCR_RST2INIT_QP,
413 1, sizeof ( *ctx ) ),
414 0, ctx, qpn, NULL );
415 }
416
417 static inline int
418 hermon_cmd_init2rtr_qp ( struct hermon *hermon, unsigned long qpn,
419 const struct hermonprm_qp_ee_state_transitions *ctx ){
420 return hermon_cmd ( hermon,
421 HERMON_HCR_IN_CMD ( HERMON_HCR_INIT2RTR_QP,
422 1, sizeof ( *ctx ) ),
423 0, ctx, qpn, NULL );
424 }
425
426 static inline int
427 hermon_cmd_rtr2rts_qp ( struct hermon *hermon, unsigned long qpn,
428 const struct hermonprm_qp_ee_state_transitions *ctx ) {
429 return hermon_cmd ( hermon,
430 HERMON_HCR_IN_CMD ( HERMON_HCR_RTR2RTS_QP,
431 1, sizeof ( *ctx ) ),
432 0, ctx, qpn, NULL );
433 }
434
435 static inline int
436 hermon_cmd_rts2rts_qp ( struct hermon *hermon, unsigned long qpn,
437 const struct hermonprm_qp_ee_state_transitions *ctx ) {
438 return hermon_cmd ( hermon,
439 HERMON_HCR_IN_CMD ( HERMON_HCR_RTS2RTS_QP,
440 1, sizeof ( *ctx ) ),
441 0, ctx, qpn, NULL );
442 }
443
444 static inline int
445 hermon_cmd_2rst_qp ( struct hermon *hermon, unsigned long qpn ) {
446 return hermon_cmd ( hermon,
447 HERMON_HCR_VOID_CMD ( HERMON_HCR_2RST_QP ),
448 0x03, NULL, qpn, NULL );
449 }
450
451 static inline int
452 hermon_cmd_query_qp ( struct hermon *hermon, unsigned long qpn,
453 struct hermonprm_qp_ee_state_transitions *ctx ) {
454 return hermon_cmd ( hermon,
455 HERMON_HCR_OUT_CMD ( HERMON_HCR_QUERY_QP,
456 1, sizeof ( *ctx ) ),
457 0, NULL, qpn, ctx );
458 }
459
460 static inline int
461 hermon_cmd_conf_special_qp ( struct hermon *hermon, unsigned int internal_qps,
462 unsigned long base_qpn ) {
463 return hermon_cmd ( hermon,
464 HERMON_HCR_VOID_CMD ( HERMON_HCR_CONF_SPECIAL_QP ),
465 internal_qps, NULL, base_qpn, NULL );
466 }
467
468 static inline int
469 hermon_cmd_mad_ifc ( struct hermon *hermon, unsigned int port,
470 union hermonprm_mad *mad ) {
471 return hermon_cmd ( hermon,
472 HERMON_HCR_INOUT_CMD ( HERMON_HCR_MAD_IFC,
473 1, sizeof ( *mad ),
474 1, sizeof ( *mad ) ),
475 0x03, mad, port, mad );
476 }
477
478 static inline int
479 hermon_cmd_read_mcg ( struct hermon *hermon, unsigned int index,
480 struct hermonprm_mcg_entry *mcg ) {
481 return hermon_cmd ( hermon,
482 HERMON_HCR_OUT_CMD ( HERMON_HCR_READ_MCG,
483 1, sizeof ( *mcg ) ),
484 0, NULL, index, mcg );
485 }
486
487 static inline int
488 hermon_cmd_write_mcg ( struct hermon *hermon, unsigned int index,
489 const struct hermonprm_mcg_entry *mcg ) {
490 return hermon_cmd ( hermon,
491 HERMON_HCR_IN_CMD ( HERMON_HCR_WRITE_MCG,
492 1, sizeof ( *mcg ) ),
493 0, mcg, index, NULL );
494 }
495
496 static inline int
497 hermon_cmd_mgid_hash ( struct hermon *hermon, const union ib_gid *gid,
498 struct hermonprm_mgm_hash *hash ) {
499 return hermon_cmd ( hermon,
500 HERMON_HCR_INOUT_CMD ( HERMON_HCR_MGID_HASH,
501 1, sizeof ( *gid ),
502 0, sizeof ( *hash ) ),
503 0, gid, 0, hash );
504 }
505
506 static inline int
507 hermon_cmd_mod_stat_cfg ( struct hermon *hermon, unsigned int mode,
508 unsigned int input_mod,
509 struct hermonprm_scalar_parameter *portion ) {
510 return hermon_cmd ( hermon,
511 HERMON_HCR_INOUT_CMD ( HERMON_HCR_MOD_STAT_CFG,
512 0, sizeof ( *portion ),
513 0, sizeof ( *portion ) ),
514 mode, portion, input_mod, portion );
515 }
516
517 static inline int
518 hermon_cmd_query_port ( struct hermon *hermon, unsigned int port,
519 struct hermonprm_query_port_cap *query_port ) {
520 return hermon_cmd ( hermon,
521 HERMON_HCR_OUT_CMD ( HERMON_HCR_QUERY_PORT,
522 1, sizeof ( *query_port ) ),
523 0, NULL, port, query_port );
524 }
525
526 static inline int
527 hermon_cmd_sense_port ( struct hermon *hermon, unsigned int port,
528 struct hermonprm_sense_port *port_type ) {
529 return hermon_cmd ( hermon,
530 HERMON_HCR_OUT_CMD ( HERMON_HCR_SENSE_PORT,
531 0, sizeof ( *port_type ) ),
532 0, NULL, port, port_type );
533 }
534
535 static inline int
536 hermon_cmd_run_fw ( struct hermon *hermon ) {
537 return hermon_cmd ( hermon,
538 HERMON_HCR_VOID_CMD ( HERMON_HCR_RUN_FW ),
539 0, NULL, 0, NULL );
540 }
541
542 static inline int
543 hermon_cmd_unmap_icm ( struct hermon *hermon, unsigned int page_count,
544 const struct hermonprm_scalar_parameter *offset ) {
545 return hermon_cmd ( hermon,
546 HERMON_HCR_IN_CMD ( HERMON_HCR_UNMAP_ICM,
547 0, sizeof ( *offset ) ),
548 0, offset, page_count, NULL );
549 }
550
551 static inline int
552 hermon_cmd_map_icm ( struct hermon *hermon,
553 const struct hermonprm_virtual_physical_mapping *map ) {
554 return hermon_cmd ( hermon,
555 HERMON_HCR_IN_CMD ( HERMON_HCR_MAP_ICM,
556 1, sizeof ( *map ) ),
557 0, map, 1, NULL );
558 }
559
560 static inline int
561 hermon_cmd_unmap_icm_aux ( struct hermon *hermon ) {
562 return hermon_cmd ( hermon,
563 HERMON_HCR_VOID_CMD ( HERMON_HCR_UNMAP_ICM_AUX ),
564 0, NULL, 0, NULL );
565 }
566
567 static inline int
568 hermon_cmd_map_icm_aux ( struct hermon *hermon,
569 const struct hermonprm_virtual_physical_mapping *map ) {
570 return hermon_cmd ( hermon,
571 HERMON_HCR_IN_CMD ( HERMON_HCR_MAP_ICM_AUX,
572 1, sizeof ( *map ) ),
573 0, map, 1, NULL );
574 }
575
576 static inline int
577 hermon_cmd_set_icm_size ( struct hermon *hermon,
578 const struct hermonprm_scalar_parameter *icm_size,
579 struct hermonprm_scalar_parameter *icm_aux_size ) {
580 return hermon_cmd ( hermon,
581 HERMON_HCR_INOUT_CMD ( HERMON_HCR_SET_ICM_SIZE,
582 0, sizeof ( *icm_size ),
583 0, sizeof (*icm_aux_size) ),
584 0, icm_size, 0, icm_aux_size );
585 }
586
587 static inline int
588 hermon_cmd_unmap_fa ( struct hermon *hermon ) {
589 return hermon_cmd ( hermon,
590 HERMON_HCR_VOID_CMD ( HERMON_HCR_UNMAP_FA ),
591 0, NULL, 0, NULL );
592 }
593
594 static inline int
595 hermon_cmd_map_fa ( struct hermon *hermon,
596 const struct hermonprm_virtual_physical_mapping *map ) {
597 return hermon_cmd ( hermon,
598 HERMON_HCR_IN_CMD ( HERMON_HCR_MAP_FA,
599 1, sizeof ( *map ) ),
600 0, map, 1, NULL );
601 }
602
603 /***************************************************************************
604 *
605 * Memory translation table operations
606 *
607 ***************************************************************************
608 */
609
610 /**
611 * Allocate MTT entries
612 *
613 * @v hermon Hermon device
614 * @v memory Memory to map into MTT
615 * @v len Length of memory to map
616 * @v mtt MTT descriptor to fill in
617 * @ret rc Return status code
618 */
619 static int hermon_alloc_mtt ( struct hermon *hermon,
620 const void *memory, size_t len,
621 struct hermon_mtt *mtt ) {
622 struct hermonprm_write_mtt write_mtt;
623 physaddr_t start;
624 physaddr_t addr;
625 unsigned int page_offset;
626 unsigned int num_pages;
627 int mtt_offset;
628 unsigned int mtt_base_addr;
629 unsigned int i;
630 int rc;
631
632 /* Find available MTT entries */
633 start = virt_to_phys ( memory );
634 page_offset = ( start & ( HERMON_PAGE_SIZE - 1 ) );
635 start -= page_offset;
636 len += page_offset;
637 num_pages = ( ( len + HERMON_PAGE_SIZE - 1 ) / HERMON_PAGE_SIZE );
638 mtt_offset = hermon_bitmask_alloc ( hermon->mtt_inuse, HERMON_MAX_MTTS,
639 num_pages );
640 if ( mtt_offset < 0 ) {
641 DBGC ( hermon, "Hermon %p could not allocate %d MTT entries\n",
642 hermon, num_pages );
643 rc = mtt_offset;
644 goto err_mtt_offset;
645 }
646 mtt_base_addr = ( ( hermon->cap.reserved_mtts + mtt_offset ) *
647 hermon->cap.mtt_entry_size );
648 addr = start;
649
650 /* Fill in MTT structure */
651 mtt->mtt_offset = mtt_offset;
652 mtt->num_pages = num_pages;
653 mtt->mtt_base_addr = mtt_base_addr;
654 mtt->page_offset = page_offset;
655
656 /* Construct and issue WRITE_MTT commands */
657 for ( i = 0 ; i < num_pages ; i++ ) {
658 memset ( &write_mtt, 0, sizeof ( write_mtt ) );
659 MLX_FILL_1 ( &write_mtt.mtt_base_addr, 1,
660 value, mtt_base_addr );
661 MLX_FILL_H ( &write_mtt.mtt, 0, ptag_h, addr );
662 MLX_FILL_2 ( &write_mtt.mtt, 1,
663 p, 1,
664 ptag_l, ( addr >> 3 ) );
665 if ( ( rc = hermon_cmd_write_mtt ( hermon,
666 &write_mtt ) ) != 0 ) {
667 DBGC ( hermon, "Hermon %p could not write MTT at %x\n",
668 hermon, mtt_base_addr );
669 goto err_write_mtt;
670 }
671 addr += HERMON_PAGE_SIZE;
672 mtt_base_addr += hermon->cap.mtt_entry_size;
673 }
674
675 DBGC ( hermon, "Hermon %p MTT entries [%#x,%#x] for "
676 "[%08lx,%08lx,%08lx,%08lx)\n", hermon, mtt->mtt_offset,
677 ( mtt->mtt_offset + mtt->num_pages - 1 ), start,
678 ( start + page_offset ), ( start + len ), addr );
679
680 return 0;
681
682 err_write_mtt:
683 hermon_bitmask_free ( hermon->mtt_inuse, mtt_offset, num_pages );
684 err_mtt_offset:
685 return rc;
686 }
687
688 /**
689 * Free MTT entries
690 *
691 * @v hermon Hermon device
692 * @v mtt MTT descriptor
693 */
694 static void hermon_free_mtt ( struct hermon *hermon,
695 struct hermon_mtt *mtt ) {
696
697 DBGC ( hermon, "Hermon %p MTT entries [%#x,%#x] freed\n",
698 hermon, mtt->mtt_offset,
699 ( mtt->mtt_offset + mtt->num_pages - 1 ) );
700 hermon_bitmask_free ( hermon->mtt_inuse, mtt->mtt_offset,
701 mtt->num_pages );
702 }
703
704 /***************************************************************************
705 *
706 * Static configuration operations
707 *
708 ***************************************************************************
709 */
710
711 /**
712 * Calculate offset within static configuration
713 *
714 * @v field Field
715 * @ret offset Offset
716 */
717 #define HERMON_MOD_STAT_CFG_OFFSET( field ) \
718 ( ( MLX_BIT_OFFSET ( struct hermonprm_mod_stat_cfg_st, field ) / 8 ) \
719 & ~( sizeof ( struct hermonprm_scalar_parameter ) - 1 ) )
720
721 /**
722 * Query or modify static configuration
723 *
724 * @v hermon Hermon device
725 * @v port Port
726 * @v mode Command mode
727 * @v offset Offset within static configuration
728 * @v stat_cfg Static configuration
729 * @ret rc Return status code
730 */
731 static int hermon_mod_stat_cfg ( struct hermon *hermon, unsigned int port,
732 unsigned int mode, unsigned int offset,
733 struct hermonprm_mod_stat_cfg *stat_cfg ) {
734 struct hermonprm_scalar_parameter *portion =
735 ( ( void * ) &stat_cfg->u.bytes[offset] );
736 struct hermonprm_mod_stat_cfg_input_mod mod;
737 int rc;
738
739 /* Sanity check */
740 assert ( ( offset % sizeof ( *portion ) ) == 0 );
741
742 /* Construct input modifier */
743 memset ( &mod, 0, sizeof ( mod ) );
744 MLX_FILL_2 ( &mod, 0,
745 portnum, port,
746 offset, offset );
747
748 /* Issue command */
749 if ( ( rc = hermon_cmd_mod_stat_cfg ( hermon, mode,
750 be32_to_cpu ( mod.u.dwords[0] ),
751 portion ) ) != 0 )
752 return rc;
753
754 return 0;
755 }
756
757 /***************************************************************************
758 *
759 * MAD operations
760 *
761 ***************************************************************************
762 */
763
764 /**
765 * Issue management datagram
766 *
767 * @v ibdev Infiniband device
768 * @v mad Management datagram
769 * @ret rc Return status code
770 */
771 static int hermon_mad ( struct ib_device *ibdev, union ib_mad *mad ) {
772 struct hermon *hermon = ib_get_drvdata ( ibdev );
773 union hermonprm_mad mad_ifc;
774 int rc;
775
776 linker_assert ( sizeof ( *mad ) == sizeof ( mad_ifc.mad ),
777 mad_size_mismatch );
778
779 /* Copy in request packet */
780 memcpy ( &mad_ifc.mad, mad, sizeof ( mad_ifc.mad ) );
781
782 /* Issue MAD */
783 if ( ( rc = hermon_cmd_mad_ifc ( hermon, ibdev->port,
784 &mad_ifc ) ) != 0 ) {
785 DBGC ( hermon, "Hermon %p port %d could not issue MAD IFC: "
786 "%s\n", hermon, ibdev->port, strerror ( rc ) );
787 return rc;
788 }
789
790 /* Copy out reply packet */
791 memcpy ( mad, &mad_ifc.mad, sizeof ( *mad ) );
792
793 if ( mad->hdr.status != 0 ) {
794 DBGC ( hermon, "Hermon %p port %d MAD IFC status %04x\n",
795 hermon, ibdev->port, ntohs ( mad->hdr.status ) );
796 return -EIO;
797 }
798 return 0;
799 }
800
801 /***************************************************************************
802 *
803 * Completion queue operations
804 *
805 ***************************************************************************
806 */
807
808 /**
809 * Dump completion queue context (for debugging only)
810 *
811 * @v hermon Hermon device
812 * @v cq Completion queue
813 * @ret rc Return status code
814 */
815 static __attribute__ (( unused )) int
816 hermon_dump_cqctx ( struct hermon *hermon, struct ib_completion_queue *cq ) {
817 struct hermonprm_completion_queue_context cqctx;
818 int rc;
819
820 memset ( &cqctx, 0, sizeof ( cqctx ) );
821 if ( ( rc = hermon_cmd_query_cq ( hermon, cq->cqn, &cqctx ) ) != 0 ) {
822 DBGC ( hermon, "Hermon %p CQN %#lx QUERY_CQ failed: %s\n",
823 hermon, cq->cqn, strerror ( rc ) );
824 return rc;
825 }
826 DBGC ( hermon, "Hermon %p CQN %#lx context:\n", hermon, cq->cqn );
827 DBGC_HDA ( hermon, 0, &cqctx, sizeof ( cqctx ) );
828
829 return 0;
830 }
831
832 /**
833 * Create completion queue
834 *
835 * @v ibdev Infiniband device
836 * @v cq Completion queue
837 * @ret rc Return status code
838 */
839 static int hermon_create_cq ( struct ib_device *ibdev,
840 struct ib_completion_queue *cq ) {
841 struct hermon *hermon = ib_get_drvdata ( ibdev );
842 struct hermon_completion_queue *hermon_cq;
843 struct hermonprm_completion_queue_context cqctx;
844 int cqn_offset;
845 unsigned int i;
846 int rc;
847
848 /* Find a free completion queue number */
849 cqn_offset = hermon_bitmask_alloc ( hermon->cq_inuse,
850 HERMON_MAX_CQS, 1 );
851 if ( cqn_offset < 0 ) {
852 DBGC ( hermon, "Hermon %p out of completion queues\n",
853 hermon );
854 rc = cqn_offset;
855 goto err_cqn_offset;
856 }
857 cq->cqn = ( hermon->cap.reserved_cqs + cqn_offset );
858
859 /* Allocate control structures */
860 hermon_cq = zalloc ( sizeof ( *hermon_cq ) );
861 if ( ! hermon_cq ) {
862 rc = -ENOMEM;
863 goto err_hermon_cq;
864 }
865
866 /* Allocate doorbell */
867 hermon_cq->doorbell = malloc_dma ( sizeof ( hermon_cq->doorbell[0] ),
868 sizeof ( hermon_cq->doorbell[0] ) );
869 if ( ! hermon_cq->doorbell ) {
870 rc = -ENOMEM;
871 goto err_doorbell;
872 }
873 memset ( hermon_cq->doorbell, 0, sizeof ( hermon_cq->doorbell[0] ) );
874
875 /* Allocate completion queue itself */
876 hermon_cq->cqe_size = ( cq->num_cqes * sizeof ( hermon_cq->cqe[0] ) );
877 hermon_cq->cqe = malloc_dma ( hermon_cq->cqe_size,
878 sizeof ( hermon_cq->cqe[0] ) );
879 if ( ! hermon_cq->cqe ) {
880 rc = -ENOMEM;
881 goto err_cqe;
882 }
883 memset ( hermon_cq->cqe, 0, hermon_cq->cqe_size );
884 for ( i = 0 ; i < cq->num_cqes ; i++ ) {
885 MLX_FILL_1 ( &hermon_cq->cqe[i].normal, 7, owner, 1 );
886 }
887 barrier();
888
889 /* Allocate MTT entries */
890 if ( ( rc = hermon_alloc_mtt ( hermon, hermon_cq->cqe,
891 hermon_cq->cqe_size,
892 &hermon_cq->mtt ) ) != 0 )
893 goto err_alloc_mtt;
894
895 /* Hand queue over to hardware */
896 memset ( &cqctx, 0, sizeof ( cqctx ) );
897 MLX_FILL_1 ( &cqctx, 0, st, 0xa /* "Event fired" */ );
898 MLX_FILL_1 ( &cqctx, 2,
899 page_offset, ( hermon_cq->mtt.page_offset >> 5 ) );
900 MLX_FILL_2 ( &cqctx, 3,
901 usr_page, HERMON_UAR_NON_EQ_PAGE,
902 log_cq_size, fls ( cq->num_cqes - 1 ) );
903 MLX_FILL_1 ( &cqctx, 5, c_eqn, hermon->eq.eqn );
904 MLX_FILL_H ( &cqctx, 6, mtt_base_addr_h,
905 hermon_cq->mtt.mtt_base_addr );
906 MLX_FILL_1 ( &cqctx, 7, mtt_base_addr_l,
907 ( hermon_cq->mtt.mtt_base_addr >> 3 ) );
908 MLX_FILL_H ( &cqctx, 14, db_record_addr_h,
909 virt_to_phys ( hermon_cq->doorbell ) );
910 MLX_FILL_1 ( &cqctx, 15, db_record_addr_l,
911 ( virt_to_phys ( hermon_cq->doorbell ) >> 3 ) );
912 if ( ( rc = hermon_cmd_sw2hw_cq ( hermon, cq->cqn, &cqctx ) ) != 0 ) {
913 DBGC ( hermon, "Hermon %p CQN %#lx SW2HW_CQ failed: %s\n",
914 hermon, cq->cqn, strerror ( rc ) );
915 goto err_sw2hw_cq;
916 }
917
918 DBGC ( hermon, "Hermon %p CQN %#lx ring [%08lx,%08lx), doorbell "
919 "%08lx\n", hermon, cq->cqn, virt_to_phys ( hermon_cq->cqe ),
920 ( virt_to_phys ( hermon_cq->cqe ) + hermon_cq->cqe_size ),
921 virt_to_phys ( hermon_cq->doorbell ) );
922 ib_cq_set_drvdata ( cq, hermon_cq );
923 return 0;
924
925 err_sw2hw_cq:
926 hermon_free_mtt ( hermon, &hermon_cq->mtt );
927 err_alloc_mtt:
928 free_dma ( hermon_cq->cqe, hermon_cq->cqe_size );
929 err_cqe:
930 free_dma ( hermon_cq->doorbell, sizeof ( hermon_cq->doorbell[0] ) );
931 err_doorbell:
932 free ( hermon_cq );
933 err_hermon_cq:
934 hermon_bitmask_free ( hermon->cq_inuse, cqn_offset, 1 );
935 err_cqn_offset:
936 return rc;
937 }
938
939 /**
940 * Destroy completion queue
941 *
942 * @v ibdev Infiniband device
943 * @v cq Completion queue
944 */
945 static void hermon_destroy_cq ( struct ib_device *ibdev,
946 struct ib_completion_queue *cq ) {
947 struct hermon *hermon = ib_get_drvdata ( ibdev );
948 struct hermon_completion_queue *hermon_cq = ib_cq_get_drvdata ( cq );
949 struct hermonprm_completion_queue_context cqctx;
950 int cqn_offset;
951 int rc;
952
953 /* Take ownership back from hardware */
954 if ( ( rc = hermon_cmd_hw2sw_cq ( hermon, cq->cqn, &cqctx ) ) != 0 ) {
955 DBGC ( hermon, "Hermon %p CQN %#lx FATAL HW2SW_CQ failed: "
956 "%s\n", hermon, cq->cqn, strerror ( rc ) );
957 /* Leak memory and return; at least we avoid corruption */
958 return;
959 }
960
961 /* Free MTT entries */
962 hermon_free_mtt ( hermon, &hermon_cq->mtt );
963
964 /* Free memory */
965 free_dma ( hermon_cq->cqe, hermon_cq->cqe_size );
966 free_dma ( hermon_cq->doorbell, sizeof ( hermon_cq->doorbell[0] ) );
967 free ( hermon_cq );
968
969 /* Mark queue number as free */
970 cqn_offset = ( cq->cqn - hermon->cap.reserved_cqs );
971 hermon_bitmask_free ( hermon->cq_inuse, cqn_offset, 1 );
972
973 ib_cq_set_drvdata ( cq, NULL );
974 }
975
976 /***************************************************************************
977 *
978 * Queue pair operations
979 *
980 ***************************************************************************
981 */
982
983 /**
984 * Assign queue pair number
985 *
986 * @v ibdev Infiniband device
987 * @v qp Queue pair
988 * @ret rc Return status code
989 */
990 static int hermon_alloc_qpn ( struct ib_device *ibdev,
991 struct ib_queue_pair *qp ) {
992 struct hermon *hermon = ib_get_drvdata ( ibdev );
993 unsigned int port_offset;
994 int qpn_offset;
995
996 /* Calculate queue pair number */
997 port_offset = ( ibdev->port - HERMON_PORT_BASE );
998
999 switch ( qp->type ) {
1000 case IB_QPT_SMI:
1001 qp->qpn = ( hermon->special_qpn_base + port_offset );
1002 return 0;
1003 case IB_QPT_GSI:
1004 qp->qpn = ( hermon->special_qpn_base + 2 + port_offset );
1005 return 0;
1006 case IB_QPT_UD:
1007 case IB_QPT_RC:
1008 case IB_QPT_ETH:
1009 /* Find a free queue pair number */
1010 qpn_offset = hermon_bitmask_alloc ( hermon->qp_inuse,
1011 HERMON_MAX_QPS, 1 );
1012 if ( qpn_offset < 0 ) {
1013 DBGC ( hermon, "Hermon %p out of queue pairs\n",
1014 hermon );
1015 return qpn_offset;
1016 }
1017 qp->qpn = ( ( random() & HERMON_QPN_RANDOM_MASK ) |
1018 ( hermon->qpn_base + qpn_offset ) );
1019 return 0;
1020 default:
1021 DBGC ( hermon, "Hermon %p unsupported QP type %d\n",
1022 hermon, qp->type );
1023 return -ENOTSUP;
1024 }
1025 }
1026
1027 /**
1028 * Free queue pair number
1029 *
1030 * @v ibdev Infiniband device
1031 * @v qp Queue pair
1032 */
1033 static void hermon_free_qpn ( struct ib_device *ibdev,
1034 struct ib_queue_pair *qp ) {
1035 struct hermon *hermon = ib_get_drvdata ( ibdev );
1036 int qpn_offset;
1037
1038 qpn_offset = ( ( qp->qpn & ~HERMON_QPN_RANDOM_MASK )
1039 - hermon->qpn_base );
1040 if ( qpn_offset >= 0 )
1041 hermon_bitmask_free ( hermon->qp_inuse, qpn_offset, 1 );
1042 }
1043
1044 /**
1045 * Calculate transmission rate
1046 *
1047 * @v av Address vector
1048 * @ret hermon_rate Hermon rate
1049 */
1050 static unsigned int hermon_rate ( struct ib_address_vector *av ) {
1051 return ( ( ( av->rate >= IB_RATE_2_5 ) && ( av->rate <= IB_RATE_120 ) )
1052 ? ( av->rate + 5 ) : 0 );
1053 }
1054
1055 /**
1056 * Calculate schedule queue
1057 *
1058 * @v ibdev Infiniband device
1059 * @v qp Queue pair
1060 * @ret sched_queue Schedule queue
1061 */
1062 static unsigned int hermon_sched_queue ( struct ib_device *ibdev,
1063 struct ib_queue_pair *qp ) {
1064 return ( ( ( qp->type == IB_QPT_SMI ) ?
1065 HERMON_SCHED_QP0 : HERMON_SCHED_DEFAULT ) |
1066 ( ( ibdev->port - 1 ) << 6 ) );
1067 }
1068
1069 /** Queue pair transport service type map */
1070 static uint8_t hermon_qp_st[] = {
1071 [IB_QPT_SMI] = HERMON_ST_MLX,
1072 [IB_QPT_GSI] = HERMON_ST_MLX,
1073 [IB_QPT_UD] = HERMON_ST_UD,
1074 [IB_QPT_RC] = HERMON_ST_RC,
1075 [IB_QPT_ETH] = HERMON_ST_MLX,
1076 };
1077
1078 /**
1079 * Dump queue pair context (for debugging only)
1080 *
1081 * @v hermon Hermon device
1082 * @v qp Queue pair
1083 * @ret rc Return status code
1084 */
1085 static __attribute__ (( unused )) int
1086 hermon_dump_qpctx ( struct hermon *hermon, struct ib_queue_pair *qp ) {
1087 struct hermonprm_qp_ee_state_transitions qpctx;
1088 int rc;
1089
1090 memset ( &qpctx, 0, sizeof ( qpctx ) );
1091 if ( ( rc = hermon_cmd_query_qp ( hermon, qp->qpn, &qpctx ) ) != 0 ) {
1092 DBGC ( hermon, "Hermon %p QPN %#lx QUERY_QP failed: %s\n",
1093 hermon, qp->qpn, strerror ( rc ) );
1094 return rc;
1095 }
1096 DBGC ( hermon, "Hermon %p QPN %#lx context:\n", hermon, qp->qpn );
1097 DBGC_HDA ( hermon, 0, &qpctx.u.dwords[2], ( sizeof ( qpctx ) - 8 ) );
1098
1099 return 0;
1100 }
1101
1102 /**
1103 * Create queue pair
1104 *
1105 * @v ibdev Infiniband device
1106 * @v qp Queue pair
1107 * @ret rc Return status code
1108 */
1109 static int hermon_create_qp ( struct ib_device *ibdev,
1110 struct ib_queue_pair *qp ) {
1111 struct hermon *hermon = ib_get_drvdata ( ibdev );
1112 struct hermon_queue_pair *hermon_qp;
1113 struct hermonprm_qp_ee_state_transitions qpctx;
1114 int rc;
1115
1116 /* Calculate queue pair number */
1117 if ( ( rc = hermon_alloc_qpn ( ibdev, qp ) ) != 0 )
1118 goto err_alloc_qpn;
1119
1120 /* Allocate control structures */
1121 hermon_qp = zalloc ( sizeof ( *hermon_qp ) );
1122 if ( ! hermon_qp ) {
1123 rc = -ENOMEM;
1124 goto err_hermon_qp;
1125 }
1126
1127 /* Allocate doorbells */
1128 hermon_qp->recv.doorbell =
1129 malloc_dma ( sizeof ( hermon_qp->recv.doorbell[0] ),
1130 sizeof ( hermon_qp->recv.doorbell[0] ) );
1131 if ( ! hermon_qp->recv.doorbell ) {
1132 rc = -ENOMEM;
1133 goto err_recv_doorbell;
1134 }
1135 memset ( hermon_qp->recv.doorbell, 0,
1136 sizeof ( hermon_qp->recv.doorbell[0] ) );
1137 hermon_qp->send.doorbell =
1138 ( hermon->uar + HERMON_UAR_NON_EQ_PAGE * HERMON_PAGE_SIZE +
1139 HERMON_DB_POST_SND_OFFSET );
1140
1141 /* Allocate work queue buffer */
1142 hermon_qp->send.num_wqes = ( qp->send.num_wqes /* headroom */ + 1 +
1143 ( 2048 / sizeof ( hermon_qp->send.wqe[0] ) ) );
1144 hermon_qp->send.num_wqes =
1145 ( 1 << fls ( hermon_qp->send.num_wqes - 1 ) ); /* round up */
1146 hermon_qp->send.wqe_size = ( hermon_qp->send.num_wqes *
1147 sizeof ( hermon_qp->send.wqe[0] ) );
1148 hermon_qp->recv.wqe_size = ( qp->recv.num_wqes *
1149 sizeof ( hermon_qp->recv.wqe[0] ) );
1150 hermon_qp->wqe_size = ( hermon_qp->send.wqe_size +
1151 hermon_qp->recv.wqe_size );
1152 hermon_qp->wqe = malloc_dma ( hermon_qp->wqe_size,
1153 sizeof ( hermon_qp->send.wqe[0] ) );
1154 if ( ! hermon_qp->wqe ) {
1155 rc = -ENOMEM;
1156 goto err_alloc_wqe;
1157 }
1158 hermon_qp->send.wqe = hermon_qp->wqe;
1159 memset ( hermon_qp->send.wqe, 0xff, hermon_qp->send.wqe_size );
1160 hermon_qp->recv.wqe = ( hermon_qp->wqe + hermon_qp->send.wqe_size );
1161 memset ( hermon_qp->recv.wqe, 0, hermon_qp->recv.wqe_size );
1162
1163 /* Allocate MTT entries */
1164 if ( ( rc = hermon_alloc_mtt ( hermon, hermon_qp->wqe,
1165 hermon_qp->wqe_size,
1166 &hermon_qp->mtt ) ) != 0 ) {
1167 goto err_alloc_mtt;
1168 }
1169
1170 /* Transition queue to INIT state */
1171 memset ( &qpctx, 0, sizeof ( qpctx ) );
1172 MLX_FILL_2 ( &qpctx, 2,
1173 qpc_eec_data.pm_state, HERMON_PM_STATE_MIGRATED,
1174 qpc_eec_data.st, hermon_qp_st[qp->type] );
1175 MLX_FILL_1 ( &qpctx, 3, qpc_eec_data.pd, HERMON_GLOBAL_PD );
1176 MLX_FILL_4 ( &qpctx, 4,
1177 qpc_eec_data.log_rq_size, fls ( qp->recv.num_wqes - 1 ),
1178 qpc_eec_data.log_rq_stride,
1179 ( fls ( sizeof ( hermon_qp->recv.wqe[0] ) - 1 ) - 4 ),
1180 qpc_eec_data.log_sq_size,
1181 fls ( hermon_qp->send.num_wqes - 1 ),
1182 qpc_eec_data.log_sq_stride,
1183 ( fls ( sizeof ( hermon_qp->send.wqe[0] ) - 1 ) - 4 ) );
1184 MLX_FILL_1 ( &qpctx, 5,
1185 qpc_eec_data.usr_page, HERMON_UAR_NON_EQ_PAGE );
1186 MLX_FILL_1 ( &qpctx, 33, qpc_eec_data.cqn_snd, qp->send.cq->cqn );
1187 MLX_FILL_4 ( &qpctx, 38,
1188 qpc_eec_data.rre, 1,
1189 qpc_eec_data.rwe, 1,
1190 qpc_eec_data.rae, 1,
1191 qpc_eec_data.page_offset,
1192 ( hermon_qp->mtt.page_offset >> 6 ) );
1193 MLX_FILL_1 ( &qpctx, 41, qpc_eec_data.cqn_rcv, qp->recv.cq->cqn );
1194 MLX_FILL_H ( &qpctx, 42, qpc_eec_data.db_record_addr_h,
1195 virt_to_phys ( hermon_qp->recv.doorbell ) );
1196 MLX_FILL_1 ( &qpctx, 43, qpc_eec_data.db_record_addr_l,
1197 ( virt_to_phys ( hermon_qp->recv.doorbell ) >> 2 ) );
1198 MLX_FILL_H ( &qpctx, 52, qpc_eec_data.mtt_base_addr_h,
1199 hermon_qp->mtt.mtt_base_addr );
1200 MLX_FILL_1 ( &qpctx, 53, qpc_eec_data.mtt_base_addr_l,
1201 ( hermon_qp->mtt.mtt_base_addr >> 3 ) );
1202 if ( ( rc = hermon_cmd_rst2init_qp ( hermon, qp->qpn,
1203 &qpctx ) ) != 0 ) {
1204 DBGC ( hermon, "Hermon %p QPN %#lx RST2INIT_QP failed: %s\n",
1205 hermon, qp->qpn, strerror ( rc ) );
1206 goto err_rst2init_qp;
1207 }
1208 hermon_qp->state = HERMON_QP_ST_INIT;
1209
1210 DBGC ( hermon, "Hermon %p QPN %#lx send ring [%08lx,%08lx), doorbell "
1211 "%08lx\n", hermon, qp->qpn,
1212 virt_to_phys ( hermon_qp->send.wqe ),
1213 ( virt_to_phys ( hermon_qp->send.wqe ) +
1214 hermon_qp->send.wqe_size ),
1215 virt_to_phys ( hermon_qp->send.doorbell ) );
1216 DBGC ( hermon, "Hermon %p QPN %#lx receive ring [%08lx,%08lx), "
1217 "doorbell %08lx\n", hermon, qp->qpn,
1218 virt_to_phys ( hermon_qp->recv.wqe ),
1219 ( virt_to_phys ( hermon_qp->recv.wqe ) +
1220 hermon_qp->recv.wqe_size ),
1221 virt_to_phys ( hermon_qp->recv.doorbell ) );
1222 DBGC ( hermon, "Hermon %p QPN %#lx send CQN %#lx receive CQN %#lx\n",
1223 hermon, qp->qpn, qp->send.cq->cqn, qp->recv.cq->cqn );
1224 ib_qp_set_drvdata ( qp, hermon_qp );
1225 return 0;
1226
1227 hermon_cmd_2rst_qp ( hermon, qp->qpn );
1228 err_rst2init_qp:
1229 hermon_free_mtt ( hermon, &hermon_qp->mtt );
1230 err_alloc_mtt:
1231 free_dma ( hermon_qp->wqe, hermon_qp->wqe_size );
1232 err_alloc_wqe:
1233 free_dma ( hermon_qp->recv.doorbell,
1234 sizeof ( hermon_qp->recv.doorbell[0] ) );
1235 err_recv_doorbell:
1236 free ( hermon_qp );
1237 err_hermon_qp:
1238 hermon_free_qpn ( ibdev, qp );
1239 err_alloc_qpn:
1240 return rc;
1241 }
1242
1243 /**
1244 * Modify queue pair
1245 *
1246 * @v ibdev Infiniband device
1247 * @v qp Queue pair
1248 * @ret rc Return status code
1249 */
1250 static int hermon_modify_qp ( struct ib_device *ibdev,
1251 struct ib_queue_pair *qp ) {
1252 struct hermon *hermon = ib_get_drvdata ( ibdev );
1253 struct hermon_queue_pair *hermon_qp = ib_qp_get_drvdata ( qp );
1254 struct hermonprm_qp_ee_state_transitions qpctx;
1255 int rc;
1256
1257 /* Transition queue to RTR state, if applicable */
1258 if ( hermon_qp->state < HERMON_QP_ST_RTR ) {
1259 memset ( &qpctx, 0, sizeof ( qpctx ) );
1260 MLX_FILL_2 ( &qpctx, 4,
1261 qpc_eec_data.mtu,
1262 ( ( qp->type == IB_QPT_ETH ) ?
1263 HERMON_MTU_ETH : HERMON_MTU_2048 ),
1264 qpc_eec_data.msg_max, 31 );
1265 MLX_FILL_1 ( &qpctx, 7,
1266 qpc_eec_data.remote_qpn_een, qp->av.qpn );
1267 MLX_FILL_1 ( &qpctx, 9,
1268 qpc_eec_data.primary_address_path.rlid,
1269 qp->av.lid );
1270 MLX_FILL_1 ( &qpctx, 10,
1271 qpc_eec_data.primary_address_path.max_stat_rate,
1272 hermon_rate ( &qp->av ) );
1273 memcpy ( &qpctx.u.dwords[12], &qp->av.gid,
1274 sizeof ( qp->av.gid ) );
1275 MLX_FILL_1 ( &qpctx, 16,
1276 qpc_eec_data.primary_address_path.sched_queue,
1277 hermon_sched_queue ( ibdev, qp ) );
1278 MLX_FILL_1 ( &qpctx, 39,
1279 qpc_eec_data.next_rcv_psn, qp->recv.psn );
1280 if ( ( rc = hermon_cmd_init2rtr_qp ( hermon, qp->qpn,
1281 &qpctx ) ) != 0 ) {
1282 DBGC ( hermon, "Hermon %p QPN %#lx INIT2RTR_QP failed:"
1283 " %s\n", hermon, qp->qpn, strerror ( rc ) );
1284 return rc;
1285 }
1286 hermon_qp->state = HERMON_QP_ST_RTR;
1287 }
1288
1289 /* Transition queue to RTS state */
1290 if ( hermon_qp->state < HERMON_QP_ST_RTS ) {
1291 memset ( &qpctx, 0, sizeof ( qpctx ) );
1292 MLX_FILL_1 ( &qpctx, 10,
1293 qpc_eec_data.primary_address_path.ack_timeout,
1294 14 /* 4.096us * 2^(14) = 67ms */ );
1295 MLX_FILL_2 ( &qpctx, 30,
1296 qpc_eec_data.retry_count, HERMON_RETRY_MAX,
1297 qpc_eec_data.rnr_retry, HERMON_RETRY_MAX );
1298 MLX_FILL_1 ( &qpctx, 32,
1299 qpc_eec_data.next_send_psn, qp->send.psn );
1300 if ( ( rc = hermon_cmd_rtr2rts_qp ( hermon, qp->qpn,
1301 &qpctx ) ) != 0 ) {
1302 DBGC ( hermon, "Hermon %p QPN %#lx RTR2RTS_QP failed: "
1303 "%s\n", hermon, qp->qpn, strerror ( rc ) );
1304 return rc;
1305 }
1306 hermon_qp->state = HERMON_QP_ST_RTS;
1307 }
1308
1309 /* Update parameters in RTS state */
1310 memset ( &qpctx, 0, sizeof ( qpctx ) );
1311 MLX_FILL_1 ( &qpctx, 0, opt_param_mask, HERMON_QP_OPT_PARAM_QKEY );
1312 MLX_FILL_1 ( &qpctx, 44, qpc_eec_data.q_key, qp->qkey );
1313 if ( ( rc = hermon_cmd_rts2rts_qp ( hermon, qp->qpn, &qpctx ) ) != 0 ){
1314 DBGC ( hermon, "Hermon %p QPN %#lx RTS2RTS_QP failed: %s\n",
1315 hermon, qp->qpn, strerror ( rc ) );
1316 return rc;
1317 }
1318
1319 return 0;
1320 }
1321
1322 /**
1323 * Destroy queue pair
1324 *
1325 * @v ibdev Infiniband device
1326 * @v qp Queue pair
1327 */
1328 static void hermon_destroy_qp ( struct ib_device *ibdev,
1329 struct ib_queue_pair *qp ) {
1330 struct hermon *hermon = ib_get_drvdata ( ibdev );
1331 struct hermon_queue_pair *hermon_qp = ib_qp_get_drvdata ( qp );
1332 int rc;
1333
1334 /* Take ownership back from hardware */
1335 if ( ( rc = hermon_cmd_2rst_qp ( hermon, qp->qpn ) ) != 0 ) {
1336 DBGC ( hermon, "Hermon %p QPN %#lx FATAL 2RST_QP failed: %s\n",
1337 hermon, qp->qpn, strerror ( rc ) );
1338 /* Leak memory and return; at least we avoid corruption */
1339 return;
1340 }
1341
1342 /* Free MTT entries */
1343 hermon_free_mtt ( hermon, &hermon_qp->mtt );
1344
1345 /* Free memory */
1346 free_dma ( hermon_qp->wqe, hermon_qp->wqe_size );
1347 free_dma ( hermon_qp->recv.doorbell,
1348 sizeof ( hermon_qp->recv.doorbell[0] ) );
1349 free ( hermon_qp );
1350
1351 /* Mark queue number as free */
1352 hermon_free_qpn ( ibdev, qp );
1353
1354 ib_qp_set_drvdata ( qp, NULL );
1355 }
1356
1357 /***************************************************************************
1358 *
1359 * Work request operations
1360 *
1361 ***************************************************************************
1362 */
1363
1364 /**
1365 * Construct UD send work queue entry
1366 *
1367 * @v ibdev Infiniband device
1368 * @v qp Queue pair
1369 * @v dest Destination address vector
1370 * @v iobuf I/O buffer
1371 * @v wqe Send work queue entry
1372 * @ret opcode Control opcode
1373 */
1374 static __attribute__ (( unused )) unsigned int
1375 hermon_fill_nop_send_wqe ( struct ib_device *ibdev __unused,
1376 struct ib_queue_pair *qp __unused,
1377 struct ib_address_vector *dest __unused,
1378 struct io_buffer *iobuf __unused,
1379 union hermon_send_wqe *wqe ) {
1380
1381 MLX_FILL_1 ( &wqe->ctrl, 1, ds, ( sizeof ( wqe->ctrl ) / 16 ) );
1382 MLX_FILL_1 ( &wqe->ctrl, 2, c, 0x03 /* generate completion */ );
1383 return HERMON_OPCODE_NOP;
1384 }
1385
1386 /**
1387 * Construct UD send work queue entry
1388 *
1389 * @v ibdev Infiniband device
1390 * @v qp Queue pair
1391 * @v dest Destination address vector
1392 * @v iobuf I/O buffer
1393 * @v wqe Send work queue entry
1394 * @ret opcode Control opcode
1395 */
1396 static unsigned int
1397 hermon_fill_ud_send_wqe ( struct ib_device *ibdev,
1398 struct ib_queue_pair *qp __unused,
1399 struct ib_address_vector *dest,
1400 struct io_buffer *iobuf,
1401 union hermon_send_wqe *wqe ) {
1402 struct hermon *hermon = ib_get_drvdata ( ibdev );
1403
1404 MLX_FILL_1 ( &wqe->ud.ctrl, 1, ds,
1405 ( ( offsetof ( typeof ( wqe->ud ), data[1] ) / 16 ) ) );
1406 MLX_FILL_1 ( &wqe->ud.ctrl, 2, c, 0x03 /* generate completion */ );
1407 MLX_FILL_2 ( &wqe->ud.ud, 0,
1408 ud_address_vector.pd, HERMON_GLOBAL_PD,
1409 ud_address_vector.port_number, ibdev->port );
1410 MLX_FILL_2 ( &wqe->ud.ud, 1,
1411 ud_address_vector.rlid, dest->lid,
1412 ud_address_vector.g, dest->gid_present );
1413 MLX_FILL_1 ( &wqe->ud.ud, 2,
1414 ud_address_vector.max_stat_rate, hermon_rate ( dest ) );
1415 MLX_FILL_1 ( &wqe->ud.ud, 3, ud_address_vector.sl, dest->sl );
1416 memcpy ( &wqe->ud.ud.u.dwords[4], &dest->gid, sizeof ( dest->gid ) );
1417 MLX_FILL_1 ( &wqe->ud.ud, 8, destination_qp, dest->qpn );
1418 MLX_FILL_1 ( &wqe->ud.ud, 9, q_key, dest->qkey );
1419 MLX_FILL_1 ( &wqe->ud.data[0], 0, byte_count, iob_len ( iobuf ) );
1420 MLX_FILL_1 ( &wqe->ud.data[0], 1, l_key, hermon->lkey );
1421 MLX_FILL_H ( &wqe->ud.data[0], 2,
1422 local_address_h, virt_to_bus ( iobuf->data ) );
1423 MLX_FILL_1 ( &wqe->ud.data[0], 3,
1424 local_address_l, virt_to_bus ( iobuf->data ) );
1425 return HERMON_OPCODE_SEND;
1426 }
1427
1428 /**
1429 * Construct MLX send work queue entry
1430 *
1431 * @v ibdev Infiniband device
1432 * @v qp Queue pair
1433 * @v dest Destination address vector
1434 * @v iobuf I/O buffer
1435 * @v wqe Send work queue entry
1436 * @ret opcode Control opcode
1437 */
1438 static unsigned int
1439 hermon_fill_mlx_send_wqe ( struct ib_device *ibdev,
1440 struct ib_queue_pair *qp,
1441 struct ib_address_vector *dest,
1442 struct io_buffer *iobuf,
1443 union hermon_send_wqe *wqe ) {
1444 struct hermon *hermon = ib_get_drvdata ( ibdev );
1445 struct io_buffer headers;
1446
1447 /* Construct IB headers */
1448 iob_populate ( &headers, &wqe->mlx.headers, 0,
1449 sizeof ( wqe->mlx.headers ) );
1450 iob_reserve ( &headers, sizeof ( wqe->mlx.headers ) );
1451 ib_push ( ibdev, &headers, qp, iob_len ( iobuf ), dest );
1452
1453 /* Fill work queue entry */
1454 MLX_FILL_1 ( &wqe->mlx.ctrl, 1, ds,
1455 ( ( offsetof ( typeof ( wqe->mlx ), data[2] ) / 16 ) ) );
1456 MLX_FILL_5 ( &wqe->mlx.ctrl, 2,
1457 c, 0x03 /* generate completion */,
1458 icrc, 0 /* generate ICRC */,
1459 max_statrate, hermon_rate ( dest ),
1460 slr, 0,
1461 v15, ( ( qp->ext_qpn == IB_QPN_SMI ) ? 1 : 0 ) );
1462 MLX_FILL_1 ( &wqe->mlx.ctrl, 3, rlid, dest->lid );
1463 MLX_FILL_1 ( &wqe->mlx.data[0], 0,
1464 byte_count, iob_len ( &headers ) );
1465 MLX_FILL_1 ( &wqe->mlx.data[0], 1, l_key, hermon->lkey );
1466 MLX_FILL_H ( &wqe->mlx.data[0], 2,
1467 local_address_h, virt_to_bus ( headers.data ) );
1468 MLX_FILL_1 ( &wqe->mlx.data[0], 3,
1469 local_address_l, virt_to_bus ( headers.data ) );
1470 MLX_FILL_1 ( &wqe->mlx.data[1], 0,
1471 byte_count, ( iob_len ( iobuf ) + 4 /* ICRC */ ) );
1472 MLX_FILL_1 ( &wqe->mlx.data[1], 1, l_key, hermon->lkey );
1473 MLX_FILL_H ( &wqe->mlx.data[1], 2,
1474 local_address_h, virt_to_bus ( iobuf->data ) );
1475 MLX_FILL_1 ( &wqe->mlx.data[1], 3,
1476 local_address_l, virt_to_bus ( iobuf->data ) );
1477 return HERMON_OPCODE_SEND;
1478 }
1479
1480 /**
1481 * Construct RC send work queue entry
1482 *
1483 * @v ibdev Infiniband device
1484 * @v qp Queue pair
1485 * @v dest Destination address vector
1486 * @v iobuf I/O buffer
1487 * @v wqe Send work queue entry
1488 * @ret opcode Control opcode
1489 */
1490 static unsigned int
1491 hermon_fill_rc_send_wqe ( struct ib_device *ibdev,
1492 struct ib_queue_pair *qp __unused,
1493 struct ib_address_vector *dest __unused,
1494 struct io_buffer *iobuf,
1495 union hermon_send_wqe *wqe ) {
1496 struct hermon *hermon = ib_get_drvdata ( ibdev );
1497
1498 MLX_FILL_1 ( &wqe->rc.ctrl, 1, ds,
1499 ( ( offsetof ( typeof ( wqe->rc ), data[1] ) / 16 ) ) );
1500 MLX_FILL_1 ( &wqe->rc.ctrl, 2, c, 0x03 /* generate completion */ );
1501 MLX_FILL_1 ( &wqe->rc.data[0], 0, byte_count, iob_len ( iobuf ) );
1502 MLX_FILL_1 ( &wqe->rc.data[0], 1, l_key, hermon->lkey );
1503 MLX_FILL_H ( &wqe->rc.data[0], 2,
1504 local_address_h, virt_to_bus ( iobuf->data ) );
1505 MLX_FILL_1 ( &wqe->rc.data[0], 3,
1506 local_address_l, virt_to_bus ( iobuf->data ) );
1507 return HERMON_OPCODE_SEND;
1508 }
1509
1510 /**
1511 * Construct Ethernet send work queue entry
1512 *
1513 * @v ibdev Infiniband device
1514 * @v qp Queue pair
1515 * @v dest Destination address vector
1516 * @v iobuf I/O buffer
1517 * @v wqe Send work queue entry
1518 * @ret opcode Control opcode
1519 */
1520 static unsigned int
1521 hermon_fill_eth_send_wqe ( struct ib_device *ibdev,
1522 struct ib_queue_pair *qp __unused,
1523 struct ib_address_vector *dest __unused,
1524 struct io_buffer *iobuf,
1525 union hermon_send_wqe *wqe ) {
1526 struct hermon *hermon = ib_get_drvdata ( ibdev );
1527
1528 /* Fill work queue entry */
1529 MLX_FILL_1 ( &wqe->eth.ctrl, 1, ds,
1530 ( ( offsetof ( typeof ( wqe->mlx ), data[1] ) / 16 ) ) );
1531 MLX_FILL_2 ( &wqe->eth.ctrl, 2,
1532 c, 0x03 /* generate completion */,
1533 s, 1 /* inhibit ICRC */ );
1534 MLX_FILL_1 ( &wqe->eth.data[0], 0,
1535 byte_count, iob_len ( iobuf ) );
1536 MLX_FILL_1 ( &wqe->eth.data[0], 1, l_key, hermon->lkey );
1537 MLX_FILL_H ( &wqe->eth.data[0], 2,
1538 local_address_h, virt_to_bus ( iobuf->data ) );
1539 MLX_FILL_1 ( &wqe->eth.data[0], 3,
1540 local_address_l, virt_to_bus ( iobuf->data ) );
1541 return HERMON_OPCODE_SEND;
1542 }
1543
1544 /** Work queue entry constructors */
1545 static unsigned int
1546 ( * hermon_fill_send_wqe[] ) ( struct ib_device *ibdev,
1547 struct ib_queue_pair *qp,
1548 struct ib_address_vector *dest,
1549 struct io_buffer *iobuf,
1550 union hermon_send_wqe *wqe ) = {
1551 [IB_QPT_SMI] = hermon_fill_mlx_send_wqe,
1552 [IB_QPT_GSI] = hermon_fill_mlx_send_wqe,
1553 [IB_QPT_UD] = hermon_fill_ud_send_wqe,
1554 [IB_QPT_RC] = hermon_fill_rc_send_wqe,
1555 [IB_QPT_ETH] = hermon_fill_eth_send_wqe,
1556 };
1557
1558 /**
1559 * Post send work queue entry
1560 *
1561 * @v ibdev Infiniband device
1562 * @v qp Queue pair
1563 * @v dest Destination address vector
1564 * @v iobuf I/O buffer
1565 * @ret rc Return status code
1566 */
1567 static int hermon_post_send ( struct ib_device *ibdev,
1568 struct ib_queue_pair *qp,
1569 struct ib_address_vector *dest,
1570 struct io_buffer *iobuf ) {
1571 struct hermon *hermon = ib_get_drvdata ( ibdev );
1572 struct hermon_queue_pair *hermon_qp = ib_qp_get_drvdata ( qp );
1573 struct ib_work_queue *wq = &qp->send;
1574 struct hermon_send_work_queue *hermon_send_wq = &hermon_qp->send;
1575 union hermon_send_wqe *wqe;
1576 union hermonprm_doorbell_register db_reg;
1577 unsigned long wqe_idx_mask;
1578 unsigned long wqe_idx;
1579 unsigned int owner;
1580 unsigned int opcode;
1581
1582 /* Allocate work queue entry */
1583 wqe_idx = ( wq->next_idx & ( hermon_send_wq->num_wqes - 1 ) );
1584 owner = ( ( wq->next_idx & hermon_send_wq->num_wqes ) ? 1 : 0 );
1585 wqe_idx_mask = ( wq->num_wqes - 1 );
1586 if ( wq->iobufs[ wqe_idx & wqe_idx_mask ] ) {
1587 DBGC ( hermon, "Hermon %p QPN %#lx send queue full",
1588 hermon, qp->qpn );
1589 return -ENOBUFS;
1590 }
1591 wq->iobufs[ wqe_idx & wqe_idx_mask ] = iobuf;
1592 wqe = &hermon_send_wq->wqe[wqe_idx];
1593
1594 /* Construct work queue entry */
1595 memset ( ( ( ( void * ) wqe ) + 4 /* avoid ctrl.owner */ ), 0,
1596 ( sizeof ( *wqe ) - 4 ) );
1597 assert ( qp->type < ( sizeof ( hermon_fill_send_wqe ) /
1598 sizeof ( hermon_fill_send_wqe[0] ) ) );
1599 assert ( hermon_fill_send_wqe[qp->type] != NULL );
1600 opcode = hermon_fill_send_wqe[qp->type] ( ibdev, qp, dest, iobuf, wqe );
1601 barrier();
1602 MLX_FILL_2 ( &wqe->ctrl, 0,
1603 opcode, opcode,
1604 owner, owner );
1605 DBGCP ( hermon, "Hermon %p QPN %#lx posting send WQE %#lx:\n",
1606 hermon, qp->qpn, wqe_idx );
1607 DBGCP_HDA ( hermon, virt_to_phys ( wqe ), wqe, sizeof ( *wqe ) );
1608
1609 /* Ring doorbell register */
1610 MLX_FILL_1 ( &db_reg.send, 0, qn, qp->qpn );
1611 barrier();
1612 writel ( db_reg.dword[0], hermon_send_wq->doorbell );
1613
1614 /* Update work queue's index */
1615 wq->next_idx++;
1616
1617 return 0;
1618 }
1619
1620 /**
1621 * Post receive work queue entry
1622 *
1623 * @v ibdev Infiniband device
1624 * @v qp Queue pair
1625 * @v iobuf I/O buffer
1626 * @ret rc Return status code
1627 */
1628 static int hermon_post_recv ( struct ib_device *ibdev,
1629 struct ib_queue_pair *qp,
1630 struct io_buffer *iobuf ) {
1631 struct hermon *hermon = ib_get_drvdata ( ibdev );
1632 struct hermon_queue_pair *hermon_qp = ib_qp_get_drvdata ( qp );
1633 struct ib_work_queue *wq = &qp->recv;
1634 struct hermon_recv_work_queue *hermon_recv_wq = &hermon_qp->recv;
1635 struct hermonprm_recv_wqe *wqe;
1636 unsigned int wqe_idx_mask;
1637
1638 /* Allocate work queue entry */
1639 wqe_idx_mask = ( wq->num_wqes - 1 );
1640 if ( wq->iobufs[wq->next_idx & wqe_idx_mask] ) {
1641 DBGC ( hermon, "Hermon %p QPN %#lx receive queue full",
1642 hermon, qp->qpn );
1643 return -ENOBUFS;
1644 }
1645 wq->iobufs[wq->next_idx & wqe_idx_mask] = iobuf;
1646 wqe = &hermon_recv_wq->wqe[wq->next_idx & wqe_idx_mask].recv;
1647
1648 /* Construct work queue entry */
1649 MLX_FILL_1 ( &wqe->data[0], 0, byte_count, iob_tailroom ( iobuf ) );
1650 MLX_FILL_1 ( &wqe->data[0], 1, l_key, hermon->lkey );
1651 MLX_FILL_H ( &wqe->data[0], 2,
1652 local_address_h, virt_to_bus ( iobuf->data ) );
1653 MLX_FILL_1 ( &wqe->data[0], 3,
1654 local_address_l, virt_to_bus ( iobuf->data ) );
1655
1656 /* Update work queue's index */
1657 wq->next_idx++;
1658
1659 /* Update doorbell record */
1660 barrier();
1661 MLX_FILL_1 ( hermon_recv_wq->doorbell, 0, receive_wqe_counter,
1662 ( wq->next_idx & 0xffff ) );
1663
1664 return 0;
1665 }
1666
1667 /**
1668 * Handle completion
1669 *
1670 * @v ibdev Infiniband device
1671 * @v cq Completion queue
1672 * @v cqe Hardware completion queue entry
1673 * @ret rc Return status code
1674 */
1675 static int hermon_complete ( struct ib_device *ibdev,
1676 struct ib_completion_queue *cq,
1677 union hermonprm_completion_entry *cqe ) {
1678 struct hermon *hermon = ib_get_drvdata ( ibdev );
1679 struct ib_work_queue *wq;
1680 struct ib_queue_pair *qp;
1681 struct io_buffer *iobuf;
1682 struct ib_address_vector recv_dest;
1683 struct ib_address_vector recv_source;
1684 struct ib_global_route_header *grh;
1685 struct ib_address_vector *source;
1686 unsigned int opcode;
1687 unsigned long qpn;
1688 int is_send;
1689 unsigned long wqe_idx;
1690 unsigned long wqe_idx_mask;
1691 size_t len;
1692 int rc = 0;
1693
1694 /* Parse completion */
1695 qpn = MLX_GET ( &cqe->normal, qpn );
1696 is_send = MLX_GET ( &cqe->normal, s_r );
1697 opcode = MLX_GET ( &cqe->normal, opcode );
1698 if ( opcode >= HERMON_OPCODE_RECV_ERROR ) {
1699 /* "s" field is not valid for error opcodes */
1700 is_send = ( opcode == HERMON_OPCODE_SEND_ERROR );
1701 DBGC ( hermon, "Hermon %p CQN %#lx syndrome %x vendor %x\n",
1702 hermon, cq->cqn, MLX_GET ( &cqe->error, syndrome ),
1703 MLX_GET ( &cqe->error, vendor_error_syndrome ) );
1704 rc = -EIO;
1705 /* Don't return immediately; propagate error to completer */
1706 }
1707
1708 /* Identify work queue */
1709 wq = ib_find_wq ( cq, qpn, is_send );
1710 if ( ! wq ) {
1711 DBGC ( hermon, "Hermon %p CQN %#lx unknown %s QPN %#lx\n",
1712 hermon, cq->cqn, ( is_send ? "send" : "recv" ), qpn );
1713 return -EIO;
1714 }
1715 qp = wq->qp;
1716
1717 /* Identify work queue entry */
1718 wqe_idx = MLX_GET ( &cqe->normal, wqe_counter );
1719 wqe_idx_mask = ( wq->num_wqes - 1 );
1720 DBGCP ( hermon, "Hermon %p CQN %#lx QPN %#lx %s WQE %#lx completed:\n",
1721 hermon, cq->cqn, qp->qpn, ( is_send ? "send" : "recv" ),
1722 wqe_idx );
1723 DBGCP_HDA ( hermon, virt_to_phys ( cqe ), cqe, sizeof ( *cqe ) );
1724
1725 /* Identify I/O buffer */
1726 iobuf = wq->iobufs[ wqe_idx & wqe_idx_mask ];
1727 if ( ! iobuf ) {
1728 DBGC ( hermon, "Hermon %p CQN %#lx QPN %#lx empty %s WQE "
1729 "%#lx\n", hermon, cq->cqn, qp->qpn,
1730 ( is_send ? "send" : "recv" ), wqe_idx );
1731 return -EIO;
1732 }
1733 wq->iobufs[ wqe_idx & wqe_idx_mask ] = NULL;
1734
1735 if ( is_send ) {
1736 /* Hand off to completion handler */
1737 ib_complete_send ( ibdev, qp, iobuf, rc );
1738 } else {
1739 /* Set received length */
1740 len = MLX_GET ( &cqe->normal, byte_cnt );
1741 assert ( len <= iob_tailroom ( iobuf ) );
1742 iob_put ( iobuf, len );
1743 memset ( &recv_dest, 0, sizeof ( recv_dest ) );
1744 recv_dest.qpn = qpn;
1745 memset ( &recv_source, 0, sizeof ( recv_source ) );
1746 switch ( qp->type ) {
1747 case IB_QPT_SMI:
1748 case IB_QPT_GSI:
1749 case IB_QPT_UD:
1750 assert ( iob_len ( iobuf ) >= sizeof ( *grh ) );
1751 grh = iobuf->data;
1752 iob_pull ( iobuf, sizeof ( *grh ) );
1753 /* Construct address vector */
1754 source = &recv_source;
1755 source->qpn = MLX_GET ( &cqe->normal, srq_rqpn );
1756 source->lid = MLX_GET ( &cqe->normal, slid_smac47_32 );
1757 source->sl = MLX_GET ( &cqe->normal, sl );
1758 recv_dest.gid_present = source->gid_present =
1759 MLX_GET ( &cqe->normal, g );
1760 memcpy ( &recv_dest.gid, &grh->dgid,
1761 sizeof ( recv_dest.gid ) );
1762 memcpy ( &source->gid, &grh->sgid,
1763 sizeof ( source->gid ) );
1764 break;
1765 case IB_QPT_RC:
1766 source = &qp->av;
1767 break;
1768 case IB_QPT_ETH:
1769 /* Construct address vector */
1770 source = &recv_source;
1771 source->vlan_present = MLX_GET ( &cqe->normal, vlan );
1772 source->vlan = MLX_GET ( &cqe->normal, vid );
1773 break;
1774 default:
1775 assert ( 0 );
1776 return -EINVAL;
1777 }
1778 /* Hand off to completion handler */
1779 ib_complete_recv ( ibdev, qp, &recv_dest, source, iobuf, rc );
1780 }
1781
1782 return rc;
1783 }
1784
1785 /**
1786 * Poll completion queue
1787 *
1788 * @v ibdev Infiniband device
1789 * @v cq Completion queue
1790 */
1791 static void hermon_poll_cq ( struct ib_device *ibdev,
1792 struct ib_completion_queue *cq ) {
1793 struct hermon *hermon = ib_get_drvdata ( ibdev );
1794 struct hermon_completion_queue *hermon_cq = ib_cq_get_drvdata ( cq );
1795 union hermonprm_completion_entry *cqe;
1796 unsigned int cqe_idx_mask;
1797 int rc;
1798
1799 while ( 1 ) {
1800 /* Look for completion entry */
1801 cqe_idx_mask = ( cq->num_cqes - 1 );
1802 cqe = &hermon_cq->cqe[cq->next_idx & cqe_idx_mask];
1803 if ( MLX_GET ( &cqe->normal, owner ) ^
1804 ( ( cq->next_idx & cq->num_cqes ) ? 1 : 0 ) ) {
1805 /* Entry still owned by hardware; end of poll */
1806 break;
1807 }
1808
1809 /* Handle completion */
1810 if ( ( rc = hermon_complete ( ibdev, cq, cqe ) ) != 0 ) {
1811 DBGC ( hermon, "Hermon %p CQN %#lx failed to complete:"
1812 " %s\n", hermon, cq->cqn, strerror ( rc ) );
1813 DBGC_HDA ( hermon, virt_to_phys ( cqe ),
1814 cqe, sizeof ( *cqe ) );
1815 }
1816
1817 /* Update completion queue's index */
1818 cq->next_idx++;
1819
1820 /* Update doorbell record */
1821 MLX_FILL_1 ( hermon_cq->doorbell, 0, update_ci,
1822 ( cq->next_idx & 0x00ffffffUL ) );
1823 }
1824 }
1825
1826 /***************************************************************************
1827 *
1828 * Event queues
1829 *
1830 ***************************************************************************
1831 */
1832
1833 /**
1834 * Create event queue
1835 *
1836 * @v hermon Hermon device
1837 * @ret rc Return status code
1838 */
1839 static int hermon_create_eq ( struct hermon *hermon ) {
1840 struct hermon_event_queue *hermon_eq = &hermon->eq;
1841 struct hermonprm_eqc eqctx;
1842 struct hermonprm_event_mask mask;
1843 unsigned int i;
1844 int rc;
1845
1846 /* Select event queue number */
1847 hermon_eq->eqn = ( 4 * hermon->cap.reserved_uars );
1848 if ( hermon_eq->eqn < hermon->cap.reserved_eqs )
1849 hermon_eq->eqn = hermon->cap.reserved_eqs;
1850
1851 /* Calculate doorbell address */
1852 hermon_eq->doorbell =
1853 ( hermon->uar + HERMON_DB_EQ_OFFSET ( hermon_eq->eqn ) );
1854
1855 /* Allocate event queue itself */
1856 hermon_eq->eqe_size =
1857 ( HERMON_NUM_EQES * sizeof ( hermon_eq->eqe[0] ) );
1858 hermon_eq->eqe = malloc_dma ( hermon_eq->eqe_size,
1859 sizeof ( hermon_eq->eqe[0] ) );
1860 if ( ! hermon_eq->eqe ) {
1861 rc = -ENOMEM;
1862 goto err_eqe;
1863 }
1864 memset ( hermon_eq->eqe, 0, hermon_eq->eqe_size );
1865 for ( i = 0 ; i < HERMON_NUM_EQES ; i++ ) {
1866 MLX_FILL_1 ( &hermon_eq->eqe[i].generic, 7, owner, 1 );
1867 }
1868 barrier();
1869
1870 /* Allocate MTT entries */
1871 if ( ( rc = hermon_alloc_mtt ( hermon, hermon_eq->eqe,
1872 hermon_eq->eqe_size,
1873 &hermon_eq->mtt ) ) != 0 )
1874 goto err_alloc_mtt;
1875
1876 /* Hand queue over to hardware */
1877 memset ( &eqctx, 0, sizeof ( eqctx ) );
1878 MLX_FILL_2 ( &eqctx, 0,
1879 st, 0xa /* "Fired" */,
1880 oi, 1 );
1881 MLX_FILL_1 ( &eqctx, 2,
1882 page_offset, ( hermon_eq->mtt.page_offset >> 5 ) );
1883 MLX_FILL_1 ( &eqctx, 3, log_eq_size, fls ( HERMON_NUM_EQES - 1 ) );
1884 MLX_FILL_H ( &eqctx, 6, mtt_base_addr_h,
1885 hermon_eq->mtt.mtt_base_addr );
1886 MLX_FILL_1 ( &eqctx, 7, mtt_base_addr_l,
1887 ( hermon_eq->mtt.mtt_base_addr >> 3 ) );
1888 if ( ( rc = hermon_cmd_sw2hw_eq ( hermon, hermon_eq->eqn,
1889 &eqctx ) ) != 0 ) {
1890 DBGC ( hermon, "Hermon %p EQN %#lx SW2HW_EQ failed: %s\n",
1891 hermon, hermon_eq->eqn, strerror ( rc ) );
1892 goto err_sw2hw_eq;
1893 }
1894
1895 /* Map all events to this event queue */
1896 memset ( &mask, 0xff, sizeof ( mask ) );
1897 if ( ( rc = hermon_cmd_map_eq ( hermon,
1898 ( HERMON_MAP_EQ | hermon_eq->eqn ),
1899 &mask ) ) != 0 ) {
1900 DBGC ( hermon, "Hermon %p EQN %#lx MAP_EQ failed: %s\n",
1901 hermon, hermon_eq->eqn, strerror ( rc ) );
1902 goto err_map_eq;
1903 }
1904
1905 DBGC ( hermon, "Hermon %p EQN %#lx ring [%08lx,%08lx), doorbell "
1906 "%08lx\n", hermon, hermon_eq->eqn,
1907 virt_to_phys ( hermon_eq->eqe ),
1908 ( virt_to_phys ( hermon_eq->eqe ) + hermon_eq->eqe_size ),
1909 virt_to_phys ( hermon_eq->doorbell ) );
1910 return 0;
1911
1912 err_map_eq:
1913 hermon_cmd_hw2sw_eq ( hermon, hermon_eq->eqn, &eqctx );
1914 err_sw2hw_eq:
1915 hermon_free_mtt ( hermon, &hermon_eq->mtt );
1916 err_alloc_mtt:
1917 free_dma ( hermon_eq->eqe, hermon_eq->eqe_size );
1918 err_eqe:
1919 memset ( hermon_eq, 0, sizeof ( *hermon_eq ) );
1920 return rc;
1921 }
1922
1923 /**
1924 * Destroy event queue
1925 *
1926 * @v hermon Hermon device
1927 */
1928 static void hermon_destroy_eq ( struct hermon *hermon ) {
1929 struct hermon_event_queue *hermon_eq = &hermon->eq;
1930 struct hermonprm_eqc eqctx;
1931 struct hermonprm_event_mask mask;
1932 int rc;
1933
1934 /* Unmap events from event queue */
1935 memset ( &mask, 0xff, sizeof ( mask ) );
1936 if ( ( rc = hermon_cmd_map_eq ( hermon,
1937 ( HERMON_UNMAP_EQ | hermon_eq->eqn ),
1938 &mask ) ) != 0 ) {
1939 DBGC ( hermon, "Hermon %p EQN %#lx FATAL MAP_EQ failed to "
1940 "unmap: %s\n", hermon, hermon_eq->eqn, strerror ( rc ) );
1941 /* Continue; HCA may die but system should survive */
1942 }
1943
1944 /* Take ownership back from hardware */
1945 if ( ( rc = hermon_cmd_hw2sw_eq ( hermon, hermon_eq->eqn,
1946 &eqctx ) ) != 0 ) {
1947 DBGC ( hermon, "Hermon %p EQN %#lx FATAL HW2SW_EQ failed: %s\n",
1948 hermon, hermon_eq->eqn, strerror ( rc ) );
1949 /* Leak memory and return; at least we avoid corruption */
1950 return;
1951 }
1952
1953 /* Free MTT entries */
1954 hermon_free_mtt ( hermon, &hermon_eq->mtt );
1955
1956 /* Free memory */
1957 free_dma ( hermon_eq->eqe, hermon_eq->eqe_size );
1958 memset ( hermon_eq, 0, sizeof ( *hermon_eq ) );
1959 }
1960
1961 /**
1962 * Handle port state event
1963 *
1964 * @v hermon Hermon device
1965 * @v eqe Port state change event queue entry
1966 */
1967 static void hermon_event_port_state_change ( struct hermon *hermon,
1968 union hermonprm_event_entry *eqe){
1969 unsigned int port;
1970 int link_up;
1971
1972 /* Get port and link status */
1973 port = ( MLX_GET ( &eqe->port_state_change, data.p ) - 1 );
1974 link_up = ( MLX_GET ( &eqe->generic, event_sub_type ) & 0x04 );
1975 DBGC ( hermon, "Hermon %p port %d link %s\n", hermon, ( port + 1 ),
1976 ( link_up ? "up" : "down" ) );
1977
1978 /* Sanity check */
1979 if ( port >= hermon->cap.num_ports ) {
1980 DBGC ( hermon, "Hermon %p port %d does not exist!\n",
1981 hermon, ( port + 1 ) );
1982 return;
1983 }
1984
1985 /* Notify device of port state change */
1986 hermon->port[port].type->state_change ( hermon, &hermon->port[port],
1987 link_up );
1988 }
1989
1990 /**
1991 * Poll event queue
1992 *
1993 * @v ibdev Infiniband device
1994 */
1995 static void hermon_poll_eq ( struct ib_device *ibdev ) {
1996 struct hermon *hermon = ib_get_drvdata ( ibdev );
1997 struct hermon_event_queue *hermon_eq = &hermon->eq;
1998 union hermonprm_event_entry *eqe;
1999 union hermonprm_doorbell_register db_reg;
2000 unsigned int eqe_idx_mask;
2001 unsigned int event_type;
2002
2003 /* No event is generated upon reaching INIT, so we must poll
2004 * separately for link state changes while we remain DOWN.
2005 */
2006 if ( ib_is_open ( ibdev ) &&
2007 ( ibdev->port_state == IB_PORT_STATE_DOWN ) ) {
2008 ib_smc_update ( ibdev, hermon_mad );
2009 }
2010
2011 /* Poll event queue */
2012 while ( 1 ) {
2013 /* Look for event entry */
2014 eqe_idx_mask = ( HERMON_NUM_EQES - 1 );
2015 eqe = &hermon_eq->eqe[hermon_eq->next_idx & eqe_idx_mask];
2016 if ( MLX_GET ( &eqe->generic, owner ) ^
2017 ( ( hermon_eq->next_idx & HERMON_NUM_EQES ) ? 1 : 0 ) ) {
2018 /* Entry still owned by hardware; end of poll */
2019 break;
2020 }
2021 DBGCP ( hermon, "Hermon %p EQN %#lx event:\n",
2022 hermon, hermon_eq->eqn );
2023 DBGCP_HDA ( hermon, virt_to_phys ( eqe ),
2024 eqe, sizeof ( *eqe ) );
2025
2026 /* Handle event */
2027 event_type = MLX_GET ( &eqe->generic, event_type );
2028 switch ( event_type ) {
2029 case HERMON_EV_PORT_STATE_CHANGE:
2030 hermon_event_port_state_change ( hermon, eqe );
2031 break;
2032 default:
2033 DBGC ( hermon, "Hermon %p EQN %#lx unrecognised event "
2034 "type %#x:\n",
2035 hermon, hermon_eq->eqn, event_type );
2036 DBGC_HDA ( hermon, virt_to_phys ( eqe ),
2037 eqe, sizeof ( *eqe ) );
2038 break;
2039 }
2040
2041 /* Update event queue's index */
2042 hermon_eq->next_idx++;
2043
2044 /* Ring doorbell */
2045 MLX_FILL_1 ( &db_reg.event, 0,
2046 ci, ( hermon_eq->next_idx & 0x00ffffffUL ) );
2047 writel ( db_reg.dword[0], hermon_eq->doorbell );
2048 }
2049 }
2050
2051 /***************************************************************************
2052 *
2053 * Firmware control
2054 *
2055 ***************************************************************************
2056 */
2057
2058 /**
2059 * Map virtual to physical address for firmware usage
2060 *
2061 * @v hermon Hermon device
2062 * @v map Mapping function
2063 * @v va Virtual address
2064 * @v pa Physical address
2065 * @v len Length of region
2066 * @ret rc Return status code
2067 */
2068 static int hermon_map_vpm ( struct hermon *hermon,
2069 int ( *map ) ( struct hermon *hermon,
2070 const struct hermonprm_virtual_physical_mapping* ),
2071 uint64_t va, physaddr_t pa, size_t len ) {
2072 struct hermonprm_virtual_physical_mapping mapping;
2073 physaddr_t start;
2074 physaddr_t low;
2075 physaddr_t high;
2076 physaddr_t end;
2077 size_t size;
2078 int rc;
2079
2080 /* Sanity checks */
2081 assert ( ( va & ( HERMON_PAGE_SIZE - 1 ) ) == 0 );
2082 assert ( ( pa & ( HERMON_PAGE_SIZE - 1 ) ) == 0 );
2083 assert ( ( len & ( HERMON_PAGE_SIZE - 1 ) ) == 0 );
2084
2085 /* Calculate starting points */
2086 start = pa;
2087 end = ( start + len );
2088 size = ( 1UL << ( fls ( start ^ end ) - 1 ) );
2089 low = high = ( end & ~( size - 1 ) );
2090 assert ( start < low );
2091 assert ( high <= end );
2092
2093 /* These mappings tend to generate huge volumes of
2094 * uninteresting debug data, which basically makes it
2095 * impossible to use debugging otherwise.
2096 */
2097 DBG_DISABLE ( DBGLVL_LOG | DBGLVL_EXTRA );
2098
2099 /* Map blocks in descending order of size */
2100 while ( size >= HERMON_PAGE_SIZE ) {
2101
2102 /* Find the next candidate block */
2103 if ( ( low - size ) >= start ) {
2104 low -= size;
2105 pa = low;
2106 } else if ( ( high + size ) <= end ) {
2107 pa = high;
2108 high += size;
2109 } else {
2110 size >>= 1;
2111 continue;
2112 }
2113 assert ( ( va & ( size - 1 ) ) == 0 );
2114 assert ( ( pa & ( size - 1 ) ) == 0 );
2115
2116 /* Map this block */
2117 memset ( &mapping, 0, sizeof ( mapping ) );
2118 MLX_FILL_1 ( &mapping, 0, va_h, ( va >> 32 ) );
2119 MLX_FILL_1 ( &mapping, 1, va_l, ( va >> 12 ) );
2120 MLX_FILL_H ( &mapping, 2, pa_h, pa );
2121 MLX_FILL_2 ( &mapping, 3,
2122 log2size, ( ( fls ( size ) - 1 ) - 12 ),
2123 pa_l, ( pa >> 12 ) );
2124 if ( ( rc = map ( hermon, &mapping ) ) != 0 ) {
2125 DBG_ENABLE ( DBGLVL_LOG | DBGLVL_EXTRA );
2126 DBGC ( hermon, "Hermon %p could not map %08llx+%zx to "
2127 "%08lx: %s\n",
2128 hermon, va, size, pa, strerror ( rc ) );
2129 return rc;
2130 }
2131 va += size;
2132 }
2133 assert ( low == start );
2134 assert ( high == end );
2135
2136 DBG_ENABLE ( DBGLVL_LOG | DBGLVL_EXTRA );
2137 return 0;
2138 }
2139
2140 /**
2141 * Start firmware running
2142 *
2143 * @v hermon Hermon device
2144 * @ret rc Return status code
2145 */
2146 static int hermon_start_firmware ( struct hermon *hermon ) {
2147 struct hermonprm_query_fw fw;
2148 unsigned int fw_pages;
2149 size_t fw_len;
2150 physaddr_t fw_base;
2151 int rc;
2152
2153 /* Get firmware parameters */
2154 if ( ( rc = hermon_cmd_query_fw ( hermon, &fw ) ) != 0 ) {
2155 DBGC ( hermon, "Hermon %p could not query firmware: %s\n",
2156 hermon, strerror ( rc ) );
2157 goto err_query_fw;
2158 }
2159 DBGC ( hermon, "Hermon %p firmware version %d.%d.%d\n", hermon,
2160 MLX_GET ( &fw, fw_rev_major ), MLX_GET ( &fw, fw_rev_minor ),
2161 MLX_GET ( &fw, fw_rev_subminor ) );
2162 fw_pages = MLX_GET ( &fw, fw_pages );
2163 DBGC ( hermon, "Hermon %p requires %d pages (%d kB) for firmware\n",
2164 hermon, fw_pages, ( fw_pages * 4 ) );
2165
2166 /* Allocate firmware pages and map firmware area */
2167 fw_len = ( fw_pages * HERMON_PAGE_SIZE );
2168 if ( ! hermon->firmware_area ) {
2169 hermon->firmware_len = fw_len;
2170 hermon->firmware_area = umalloc ( hermon->firmware_len );
2171 if ( ! hermon->firmware_area ) {
2172 rc = -ENOMEM;
2173 goto err_alloc_fa;
2174 }
2175 } else {
2176 assert ( hermon->firmware_len == fw_len );
2177 }
2178 fw_base = user_to_phys ( hermon->firmware_area, 0 );
2179 DBGC ( hermon, "Hermon %p firmware area at physical [%08lx,%08lx)\n",
2180 hermon, fw_base, ( fw_base + fw_len ) );
2181 if ( ( rc = hermon_map_vpm ( hermon, hermon_cmd_map_fa,
2182 0, fw_base, fw_len ) ) != 0 ) {
2183 DBGC ( hermon, "Hermon %p could not map firmware: %s\n",
2184 hermon, strerror ( rc ) );
2185 goto err_map_fa;
2186 }
2187
2188 /* Start firmware */
2189 if ( ( rc = hermon_cmd_run_fw ( hermon ) ) != 0 ) {
2190 DBGC ( hermon, "Hermon %p could not run firmware: %s\n",
2191 hermon, strerror ( rc ) );
2192 goto err_run_fw;
2193 }
2194
2195 DBGC ( hermon, "Hermon %p firmware started\n", hermon );
2196 return 0;
2197
2198 err_run_fw:
2199 err_map_fa:
2200 hermon_cmd_unmap_fa ( hermon );
2201 err_alloc_fa:
2202 err_query_fw:
2203 return rc;
2204 }
2205
2206 /**
2207 * Stop firmware running
2208 *
2209 * @v hermon Hermon device
2210 */
2211 static void hermon_stop_firmware ( struct hermon *hermon ) {
2212 int rc;
2213
2214 if ( ( rc = hermon_cmd_unmap_fa ( hermon ) ) != 0 ) {
2215 DBGC ( hermon, "Hermon %p FATAL could not stop firmware: %s\n",
2216 hermon, strerror ( rc ) );
2217 /* Leak memory and return; at least we avoid corruption */
2218 hermon->firmware_area = UNULL;
2219 return;
2220 }
2221 }
2222
2223 /***************************************************************************
2224 *
2225 * Infinihost Context Memory management
2226 *
2227 ***************************************************************************
2228 */
2229
2230 /**
2231 * Get device limits
2232 *
2233 * @v hermon Hermon device
2234 * @ret rc Return status code
2235 */
2236 static int hermon_get_cap ( struct hermon *hermon ) {
2237 struct hermonprm_query_dev_cap dev_cap;
2238 int rc;
2239
2240 if ( ( rc = hermon_cmd_query_dev_cap ( hermon, &dev_cap ) ) != 0 ) {
2241 DBGC ( hermon, "Hermon %p could not get device limits: %s\n",
2242 hermon, strerror ( rc ) );
2243 return rc;
2244 }
2245
2246 hermon->cap.cmpt_entry_size = MLX_GET ( &dev_cap, c_mpt_entry_sz );
2247 hermon->cap.reserved_qps =
2248 ( 1 << MLX_GET ( &dev_cap, log2_rsvd_qps ) );
2249 hermon->cap.qpc_entry_size = MLX_GET ( &dev_cap, qpc_entry_sz );
2250 hermon->cap.altc_entry_size = MLX_GET ( &dev_cap, altc_entry_sz );
2251 hermon->cap.auxc_entry_size = MLX_GET ( &dev_cap, aux_entry_sz );
2252 hermon->cap.reserved_srqs =
2253 ( 1 << MLX_GET ( &dev_cap, log2_rsvd_srqs ) );
2254 hermon->cap.srqc_entry_size = MLX_GET ( &dev_cap, srq_entry_sz );
2255 hermon->cap.reserved_cqs =
2256 ( 1 << MLX_GET ( &dev_cap, log2_rsvd_cqs ) );
2257 hermon->cap.cqc_entry_size = MLX_GET ( &dev_cap, cqc_entry_sz );
2258 hermon->cap.reserved_eqs = MLX_GET ( &dev_cap, num_rsvd_eqs );
2259 if ( hermon->cap.reserved_eqs == 0 ) {
2260 /* Backward compatibility */
2261 hermon->cap.reserved_eqs =
2262 ( 1 << MLX_GET ( &dev_cap, log2_rsvd_eqs ) );
2263 }
2264 hermon->cap.eqc_entry_size = MLX_GET ( &dev_cap, eqc_entry_sz );
2265 hermon->cap.reserved_mtts =
2266 ( 1 << MLX_GET ( &dev_cap, log2_rsvd_mtts ) );
2267 hermon->cap.mtt_entry_size = MLX_GET ( &dev_cap, mtt_entry_sz );
2268 hermon->cap.reserved_mrws =
2269 ( 1 << MLX_GET ( &dev_cap, log2_rsvd_mrws ) );
2270 hermon->cap.dmpt_entry_size = MLX_GET ( &dev_cap, d_mpt_entry_sz );
2271 hermon->cap.reserved_uars = MLX_GET ( &dev_cap, num_rsvd_uars );
2272 hermon->cap.num_ports = MLX_GET ( &dev_cap, num_ports );
2273 hermon->cap.dpdp = MLX_GET ( &dev_cap, dpdp );
2274
2275 /* Sanity check */
2276 if ( hermon->cap.num_ports > HERMON_MAX_PORTS ) {
2277 DBGC ( hermon, "Hermon %p has %d ports (only %d supported)\n",
2278 hermon, hermon->cap.num_ports, HERMON_MAX_PORTS );
2279 hermon->cap.num_ports = HERMON_MAX_PORTS;
2280 }
2281
2282 return 0;
2283 }
2284
2285 /**
2286 * Align ICM table
2287 *
2288 * @v icm_offset Current ICM offset
2289 * @v len ICM table length
2290 * @ret icm_offset ICM offset
2291 */
2292 static uint64_t icm_align ( uint64_t icm_offset, size_t len ) {
2293
2294 /* Round up to a multiple of the table size */
2295 assert ( len == ( 1UL << ( fls ( len ) - 1 ) ) );
2296 return ( ( icm_offset + len - 1 ) & ~( ( ( uint64_t ) len ) - 1 ) );
2297 }
2298
2299 /**
2300 * Map ICM (allocating if necessary)
2301 *
2302 * @v hermon Hermon device
2303 * @v init_hca INIT_HCA structure to fill in
2304 * @ret rc Return status code
2305 */
2306 static int hermon_map_icm ( struct hermon *hermon,
2307 struct hermonprm_init_hca *init_hca ) {
2308 struct hermonprm_scalar_parameter icm_size;
2309 struct hermonprm_scalar_parameter icm_aux_size;
2310 uint64_t icm_offset = 0;
2311 unsigned int log_num_qps, log_num_srqs, log_num_cqs, log_num_eqs;
2312 unsigned int log_num_mtts, log_num_mpts, log_num_mcs;
2313 size_t cmpt_max_len;
2314 size_t icm_len, icm_aux_len;
2315 size_t len;
2316 physaddr_t icm_phys;
2317 int i;
2318 int rc;
2319
2320 /*
2321 * Start by carving up the ICM virtual address space
2322 *
2323 */
2324
2325 /* Calculate number of each object type within ICM */
2326 log_num_qps = fls ( hermon->cap.reserved_qps +
2327 HERMON_RSVD_SPECIAL_QPS + HERMON_MAX_QPS - 1 );
2328 log_num_srqs = fls ( hermon->cap.reserved_srqs - 1 );
2329 log_num_cqs = fls ( hermon->cap.reserved_cqs + HERMON_MAX_CQS - 1 );
2330 log_num_eqs = fls ( hermon->cap.reserved_eqs + HERMON_MAX_EQS - 1 );
2331 log_num_mtts = fls ( hermon->cap.reserved_mtts + HERMON_MAX_MTTS - 1 );
2332 log_num_mpts = fls ( hermon->cap.reserved_mrws + 1 - 1 );
2333 log_num_mcs = HERMON_LOG_MULTICAST_HASH_SIZE;
2334
2335 /* ICM starts with the cMPT tables, which are sparse */
2336 cmpt_max_len = ( HERMON_CMPT_MAX_ENTRIES *
2337 ( ( uint64_t ) hermon->cap.cmpt_entry_size ) );
2338 len = ( ( ( ( 1 << log_num_qps ) * hermon->cap.cmpt_entry_size ) +
2339 HERMON_PAGE_SIZE - 1 ) & ~( HERMON_PAGE_SIZE - 1 ) );
2340 hermon->icm_map[HERMON_ICM_QP_CMPT].offset = icm_offset;
2341 hermon->icm_map[HERMON_ICM_QP_CMPT].len = len;
2342 icm_offset += cmpt_max_len;
2343 len = ( ( ( ( 1 << log_num_srqs ) * hermon->cap.cmpt_entry_size ) +
2344 HERMON_PAGE_SIZE - 1 ) & ~( HERMON_PAGE_SIZE - 1 ) );
2345 hermon->icm_map[HERMON_ICM_SRQ_CMPT].offset = icm_offset;
2346 hermon->icm_map[HERMON_ICM_SRQ_CMPT].len = len;
2347 icm_offset += cmpt_max_len;
2348 len = ( ( ( ( 1 << log_num_cqs ) * hermon->cap.cmpt_entry_size ) +
2349 HERMON_PAGE_SIZE - 1 ) & ~( HERMON_PAGE_SIZE - 1 ) );
2350 hermon->icm_map[HERMON_ICM_CQ_CMPT].offset = icm_offset;
2351 hermon->icm_map[HERMON_ICM_CQ_CMPT].len = len;
2352 icm_offset += cmpt_max_len;
2353 len = ( ( ( ( 1 << log_num_eqs ) * hermon->cap.cmpt_entry_size ) +
2354 HERMON_PAGE_SIZE - 1 ) & ~( HERMON_PAGE_SIZE - 1 ) );
2355 hermon->icm_map[HERMON_ICM_EQ_CMPT].offset = icm_offset;
2356 hermon->icm_map[HERMON_ICM_EQ_CMPT].len = len;
2357 icm_offset += cmpt_max_len;
2358
2359 hermon->icm_map[HERMON_ICM_OTHER].offset = icm_offset;
2360
2361 /* Queue pair contexts */
2362 len = ( ( 1 << log_num_qps ) * hermon->cap.qpc_entry_size );
2363 icm_offset = icm_align ( icm_offset, len );
2364 MLX_FILL_1 ( init_hca, 12,
2365 qpc_eec_cqc_eqc_rdb_parameters.qpc_base_addr_h,
2366 ( icm_offset >> 32 ) );
2367 MLX_FILL_2 ( init_hca, 13,
2368 qpc_eec_cqc_eqc_rdb_parameters.qpc_base_addr_l,
2369 ( icm_offset >> 5 ),
2370 qpc_eec_cqc_eqc_rdb_parameters.log_num_of_qp,
2371 log_num_qps );
2372 DBGC ( hermon, "Hermon %p ICM QPC is %d x %#zx at [%08llx,%08llx)\n",
2373 hermon, ( 1 << log_num_qps ), hermon->cap.qpc_entry_size,
2374 icm_offset, ( icm_offset + len ) );
2375 icm_offset += len;
2376
2377 /* Extended alternate path contexts */
2378 len = ( ( 1 << log_num_qps ) * hermon->cap.altc_entry_size );
2379 icm_offset = icm_align ( icm_offset, len );
2380 MLX_FILL_1 ( init_hca, 24,
2381 qpc_eec_cqc_eqc_rdb_parameters.altc_base_addr_h,
2382 ( icm_offset >> 32 ) );
2383 MLX_FILL_1 ( init_hca, 25,
2384 qpc_eec_cqc_eqc_rdb_parameters.altc_base_addr_l,
2385 icm_offset );
2386 DBGC ( hermon, "Hermon %p ICM ALTC is %d x %#zx at [%08llx,%08llx)\n",
2387 hermon, ( 1 << log_num_qps ), hermon->cap.altc_entry_size,
2388 icm_offset, ( icm_offset + len ) );
2389 icm_offset += len;
2390
2391 /* Extended auxiliary contexts */
2392 len = ( ( 1 << log_num_qps ) * hermon->cap.auxc_entry_size );
2393 icm_offset = icm_align ( icm_offset, len );
2394 MLX_FILL_1 ( init_hca, 28,
2395 qpc_eec_cqc_eqc_rdb_parameters.auxc_base_addr_h,
2396 ( icm_offset >> 32 ) );
2397 MLX_FILL_1 ( init_hca, 29,
2398 qpc_eec_cqc_eqc_rdb_parameters.auxc_base_addr_l,
2399 icm_offset );
2400 DBGC ( hermon, "Hermon %p ICM AUXC is %d x %#zx at [%08llx,%08llx)\n",
2401 hermon, ( 1 << log_num_qps ), hermon->cap.auxc_entry_size,
2402 icm_offset, ( icm_offset + len ) );
2403 icm_offset += len;
2404
2405 /* Shared receive queue contexts */
2406 len = ( ( 1 << log_num_srqs ) * hermon->cap.srqc_entry_size );
2407 icm_offset = icm_align ( icm_offset, len );
2408 MLX_FILL_1 ( init_hca, 18,
2409 qpc_eec_cqc_eqc_rdb_parameters.srqc_base_addr_h,
2410 ( icm_offset >> 32 ) );
2411 MLX_FILL_2 ( init_hca, 19,
2412 qpc_eec_cqc_eqc_rdb_parameters.srqc_base_addr_l,
2413 ( icm_offset >> 5 ),
2414 qpc_eec_cqc_eqc_rdb_parameters.log_num_of_srq,
2415 log_num_srqs );
2416 DBGC ( hermon, "Hermon %p ICM SRQC is %d x %#zx at [%08llx,%08llx)\n",
2417 hermon, ( 1 << log_num_srqs ), hermon->cap.srqc_entry_size,
2418 icm_offset, ( icm_offset + len ) );
2419 icm_offset += len;
2420
2421 /* Completion queue contexts */
2422 len = ( ( 1 << log_num_cqs ) * hermon->cap.cqc_entry_size );
2423 icm_offset = icm_align ( icm_offset, len );
2424 MLX_FILL_1 ( init_hca, 20,
2425 qpc_eec_cqc_eqc_rdb_parameters.cqc_base_addr_h,
2426 ( icm_offset >> 32 ) );
2427 MLX_FILL_2 ( init_hca, 21,
2428 qpc_eec_cqc_eqc_rdb_parameters.cqc_base_addr_l,
2429 ( icm_offset >> 5 ),
2430 qpc_eec_cqc_eqc_rdb_parameters.log_num_of_cq,
2431 log_num_cqs );
2432 DBGC ( hermon, "Hermon %p ICM CQC is %d x %#zx at [%08llx,%08llx)\n",
2433 hermon, ( 1 << log_num_cqs ), hermon->cap.cqc_entry_size,
2434 icm_offset, ( icm_offset + len ) );
2435 icm_offset += len;
2436
2437 /* Event queue contexts */
2438 len = ( ( 1 << log_num_eqs ) * hermon->cap.eqc_entry_size );
2439 icm_offset = icm_align ( icm_offset, len );
2440 MLX_FILL_1 ( init_hca, 32,
2441 qpc_eec_cqc_eqc_rdb_parameters.eqc_base_addr_h,
2442 ( icm_offset >> 32 ) );
2443 MLX_FILL_2 ( init_hca, 33,
2444 qpc_eec_cqc_eqc_rdb_parameters.eqc_base_addr_l,
2445 ( icm_offset >> 5 ),
2446 qpc_eec_cqc_eqc_rdb_parameters.log_num_of_eq,
2447 log_num_eqs );
2448 DBGC ( hermon, "Hermon %p ICM EQC is %d x %#zx at [%08llx,%08llx)\n",
2449 hermon, ( 1 << log_num_eqs ), hermon->cap.eqc_entry_size,
2450 icm_offset, ( icm_offset + len ) );
2451 icm_offset += len;
2452
2453 /* Memory translation table */
2454 len = ( ( 1 << log_num_mtts ) * hermon->cap.mtt_entry_size );
2455 icm_offset = icm_align ( icm_offset, len );
2456 MLX_FILL_1 ( init_hca, 64,
2457 tpt_parameters.mtt_base_addr_h, ( icm_offset >> 32 ) );
2458 MLX_FILL_1 ( init_hca, 65,
2459 tpt_parameters.mtt_base_addr_l, icm_offset );
2460 DBGC ( hermon, "Hermon %p ICM MTT is %d x %#zx at [%08llx,%08llx)\n",
2461 hermon, ( 1 << log_num_mtts ), hermon->cap.mtt_entry_size,
2462 icm_offset, ( icm_offset + len ) );
2463 icm_offset += len;
2464
2465 /* Memory protection table */
2466 len = ( ( 1 << log_num_mpts ) * hermon->cap.dmpt_entry_size );
2467 icm_offset = icm_align ( icm_offset, len );
2468 MLX_FILL_1 ( init_hca, 60,
2469 tpt_parameters.dmpt_base_adr_h, ( icm_offset >> 32 ) );
2470 MLX_FILL_1 ( init_hca, 61,
2471 tpt_parameters.dmpt_base_adr_l, icm_offset );
2472 MLX_FILL_1 ( init_hca, 62,
2473 tpt_parameters.log_dmpt_sz, log_num_mpts );
2474 DBGC ( hermon, "Hermon %p ICM DMPT is %d x %#zx at [%08llx,%08llx)\n",
2475 hermon, ( 1 << log_num_mpts ), hermon->cap.dmpt_entry_size,
2476 icm_offset, ( icm_offset + len ) );
2477 icm_offset += len;
2478
2479 /* Multicast table */
2480 len = ( ( 1 << log_num_mcs ) * sizeof ( struct hermonprm_mcg_entry ) );
2481 icm_offset = icm_align ( icm_offset, len );
2482 MLX_FILL_1 ( init_hca, 48,
2483 multicast_parameters.mc_base_addr_h,
2484 ( icm_offset >> 32 ) );
2485 MLX_FILL_1 ( init_hca, 49,
2486 multicast_parameters.mc_base_addr_l, icm_offset );
2487 MLX_FILL_1 ( init_hca, 52,
2488 multicast_parameters.log_mc_table_entry_sz,
2489 fls ( sizeof ( struct hermonprm_mcg_entry ) - 1 ) );
2490 MLX_FILL_1 ( init_hca, 53,
2491 multicast_parameters.log_mc_table_hash_sz, log_num_mcs );
2492 MLX_FILL_1 ( init_hca, 54,
2493 multicast_parameters.log_mc_table_sz, log_num_mcs );
2494 DBGC ( hermon, "Hermon %p ICM MC is %d x %#zx at [%08llx,%08llx)\n",
2495 hermon, ( 1 << log_num_mcs ),
2496 sizeof ( struct hermonprm_mcg_entry ),
2497 icm_offset, ( icm_offset + len ) );
2498 icm_offset += len;
2499
2500
2501 hermon->icm_map[HERMON_ICM_OTHER].len =
2502 ( icm_offset - hermon->icm_map[HERMON_ICM_OTHER].offset );
2503
2504 /*
2505 * Allocate and map physical memory for (portions of) ICM
2506 *
2507 * Map is:
2508 * ICM AUX area (aligned to its own size)
2509 * cMPT areas
2510 * Other areas
2511 */
2512
2513 /* Calculate physical memory required for ICM */
2514 icm_len = 0;
2515 for ( i = 0 ; i < HERMON_ICM_NUM_REGIONS ; i++ ) {
2516 icm_len += hermon->icm_map[i].len;
2517 }
2518
2519 /* Get ICM auxiliary area size */
2520 memset ( &icm_size, 0, sizeof ( icm_size ) );
2521 MLX_FILL_1 ( &icm_size, 0, value_hi, ( icm_offset >> 32 ) );
2522 MLX_FILL_1 ( &icm_size, 1, value, icm_offset );
2523 if ( ( rc = hermon_cmd_set_icm_size ( hermon, &icm_size,
2524 &icm_aux_size ) ) != 0 ) {
2525 DBGC ( hermon, "Hermon %p could not set ICM size: %s\n",
2526 hermon, strerror ( rc ) );
2527 goto err_set_icm_size;
2528 }
2529 icm_aux_len = ( MLX_GET ( &icm_aux_size, value ) * HERMON_PAGE_SIZE );
2530
2531 /* Allocate ICM data and auxiliary area */
2532 DBGC ( hermon, "Hermon %p requires %zd kB ICM and %zd kB AUX ICM\n",
2533 hermon, ( icm_len / 1024 ), ( icm_aux_len / 1024 ) );
2534 if ( ! hermon->icm ) {
2535 hermon->icm_len = icm_len;
2536 hermon->icm_aux_len = icm_aux_len;
2537 hermon->icm = umalloc ( hermon->icm_aux_len + hermon->icm_len );
2538 if ( ! hermon->icm ) {
2539 rc = -ENOMEM;
2540 goto err_alloc;
2541 }
2542 } else {
2543 assert ( hermon->icm_len == icm_len );
2544 assert ( hermon->icm_aux_len == icm_aux_len );
2545 }
2546 icm_phys = user_to_phys ( hermon->icm, 0 );
2547
2548 /* Map ICM auxiliary area */
2549 DBGC ( hermon, "Hermon %p mapping ICM AUX => %08lx\n",
2550 hermon, icm_phys );
2551 if ( ( rc = hermon_map_vpm ( hermon, hermon_cmd_map_icm_aux,
2552 0, icm_phys, icm_aux_len ) ) != 0 ) {
2553 DBGC ( hermon, "Hermon %p could not map AUX ICM: %s\n",
2554 hermon, strerror ( rc ) );
2555 goto err_map_icm_aux;
2556 }
2557 icm_phys += icm_aux_len;
2558
2559 /* MAP ICM area */
2560 for ( i = 0 ; i < HERMON_ICM_NUM_REGIONS ; i++ ) {
2561 DBGC ( hermon, "Hermon %p mapping ICM %llx+%zx => %08lx\n",
2562 hermon, hermon->icm_map[i].offset,
2563 hermon->icm_map[i].len, icm_phys );
2564 if ( ( rc = hermon_map_vpm ( hermon, hermon_cmd_map_icm,
2565 hermon->icm_map[i].offset,
2566 icm_phys,
2567 hermon->icm_map[i].len ) ) != 0 ){
2568 DBGC ( hermon, "Hermon %p could not map ICM: %s\n",
2569 hermon, strerror ( rc ) );
2570 goto err_map_icm;
2571 }
2572 icm_phys += hermon->icm_map[i].len;
2573 }
2574
2575 return 0;
2576
2577 err_map_icm:
2578 assert ( i == 0 ); /* We don't handle partial failure at present */
2579 err_map_icm_aux:
2580 hermon_cmd_unmap_icm_aux ( hermon );
2581 err_alloc:
2582 err_set_icm_size:
2583 return rc;
2584 }
2585
2586 /**
2587 * Unmap ICM
2588 *
2589 * @v hermon Hermon device
2590 */
2591 static void hermon_unmap_icm ( struct hermon *hermon ) {
2592 struct hermonprm_scalar_parameter unmap_icm;
2593 int i;
2594
2595 for ( i = ( HERMON_ICM_NUM_REGIONS - 1 ) ; i >= 0 ; i-- ) {
2596 memset ( &unmap_icm, 0, sizeof ( unmap_icm ) );
2597 MLX_FILL_1 ( &unmap_icm, 0, value_hi,
2598 ( hermon->icm_map[i].offset >> 32 ) );
2599 MLX_FILL_1 ( &unmap_icm, 1, value,
2600 hermon->icm_map[i].offset );
2601 hermon_cmd_unmap_icm ( hermon,
2602 ( 1 << fls ( ( hermon->icm_map[i].len /
2603 HERMON_PAGE_SIZE ) - 1)),
2604 &unmap_icm );
2605 }
2606 hermon_cmd_unmap_icm_aux ( hermon );
2607 }
2608
2609 /***************************************************************************
2610 *
2611 * Initialisation and teardown
2612 *
2613 ***************************************************************************
2614 */
2615
2616 /**
2617 * Reset device
2618 *
2619 * @v hermon Hermon device
2620 */
2621 static void hermon_reset ( struct hermon *hermon ) {
2622 struct pci_device *pci = hermon->pci;
2623 struct pci_config_backup backup;
2624 static const uint8_t backup_exclude[] =
2625 PCI_CONFIG_BACKUP_EXCLUDE ( 0x58, 0x5c );
2626
2627 /* Perform device reset and preserve PCI configuration */
2628 pci_backup ( pci, &backup, backup_exclude );
2629 writel ( HERMON_RESET_MAGIC,
2630 ( hermon->config + HERMON_RESET_OFFSET ) );
2631 mdelay ( HERMON_RESET_WAIT_TIME_MS );
2632 pci_restore ( pci, &backup, backup_exclude );
2633
2634 /* Reset command interface toggle */
2635 hermon->toggle = 0;
2636 }
2637
2638 /**
2639 * Set up memory protection table
2640 *
2641 * @v hermon Hermon device
2642 * @ret rc Return status code
2643 */
2644 static int hermon_setup_mpt ( struct hermon *hermon ) {
2645 struct hermonprm_mpt mpt;
2646 uint32_t key;
2647 int rc;
2648
2649 /* Derive key */
2650 key = ( hermon->cap.reserved_mrws | HERMON_MKEY_PREFIX );
2651 hermon->lkey = ( ( key << 8 ) | ( key >> 24 ) );
2652
2653 /* Initialise memory protection table */
2654 memset ( &mpt, 0, sizeof ( mpt ) );
2655 MLX_FILL_7 ( &mpt, 0,
2656 atomic, 1,
2657 rw, 1,
2658 rr, 1,
2659 lw, 1,
2660 lr, 1,
2661 pa, 1,
2662 r_w, 1 );
2663 MLX_FILL_1 ( &mpt, 2, mem_key, key );
2664 MLX_FILL_1 ( &mpt, 3,
2665 pd, HERMON_GLOBAL_PD );
2666 MLX_FILL_1 ( &mpt, 10, len64, 1 );
2667 if ( ( rc = hermon_cmd_sw2hw_mpt ( hermon,
2668 hermon->cap.reserved_mrws,
2669 &mpt ) ) != 0 ) {
2670 DBGC ( hermon, "Hermon %p could not set up MPT: %s\n",
2671 hermon, strerror ( rc ) );
2672 return rc;
2673 }
2674
2675 return 0;
2676 }
2677
2678 /**
2679 * Configure special queue pairs
2680 *
2681 * @v hermon Hermon device
2682 * @ret rc Return status code
2683 */
2684 static int hermon_configure_special_qps ( struct hermon *hermon ) {
2685 int rc;
2686
2687 /* Special QP block must be aligned on its own size */
2688 hermon->special_qpn_base = ( ( hermon->cap.reserved_qps +
2689 HERMON_NUM_SPECIAL_QPS - 1 )
2690 & ~( HERMON_NUM_SPECIAL_QPS - 1 ) );
2691 hermon->qpn_base = ( hermon->special_qpn_base +
2692 HERMON_NUM_SPECIAL_QPS );
2693 DBGC ( hermon, "Hermon %p special QPs at [%lx,%lx]\n", hermon,
2694 hermon->special_qpn_base, ( hermon->qpn_base - 1 ) );
2695
2696 /* Issue command to configure special QPs */
2697 if ( ( rc = hermon_cmd_conf_special_qp ( hermon, 0x00,
2698 hermon->special_qpn_base ) ) != 0 ) {
2699 DBGC ( hermon, "Hermon %p could not configure special QPs: "
2700 "%s\n", hermon, strerror ( rc ) );
2701 return rc;
2702 }
2703
2704 return 0;
2705 }
2706
2707 /**
2708 * Start Hermon device
2709 *
2710 * @v hermon Hermon device
2711 * @v running Firmware is already running
2712 * @ret rc Return status code
2713 */
2714 static int hermon_start ( struct hermon *hermon, int running ) {
2715 struct hermonprm_init_hca init_hca;
2716 unsigned int i;
2717 int rc;
2718
2719 /* Start firmware if not already running */
2720 if ( ! running ) {
2721 if ( ( rc = hermon_start_firmware ( hermon ) ) != 0 )
2722 goto err_start_firmware;
2723 }
2724
2725 /* Allocate and map ICM */
2726 memset ( &init_hca, 0, sizeof ( init_hca ) );
2727 if ( ( rc = hermon_map_icm ( hermon, &init_hca ) ) != 0 )
2728 goto err_map_icm;
2729
2730 /* Initialise HCA */
2731 MLX_FILL_1 ( &init_hca, 0, version, 0x02 /* "Must be 0x02" */ );
2732 MLX_FILL_1 ( &init_hca, 5, udp, 1 );
2733 MLX_FILL_1 ( &init_hca, 74, uar_parameters.log_max_uars, 8 );
2734 if ( ( rc = hermon_cmd_init_hca ( hermon, &init_hca ) ) != 0 ) {
2735 DBGC ( hermon, "Hermon %p could not initialise HCA: %s\n",
2736 hermon, strerror ( rc ) );
2737 goto err_init_hca;
2738 }
2739
2740 /* Set up memory protection */
2741 if ( ( rc = hermon_setup_mpt ( hermon ) ) != 0 )
2742 goto err_setup_mpt;
2743 for ( i = 0 ; i < hermon->cap.num_ports ; i++ )
2744 hermon->port[i].ibdev->rdma_key = hermon->lkey;
2745
2746 /* Set up event queue */
2747 if ( ( rc = hermon_create_eq ( hermon ) ) != 0 )
2748 goto err_create_eq;
2749
2750 /* Configure special QPs */
2751 if ( ( rc = hermon_configure_special_qps ( hermon ) ) != 0 )
2752 goto err_conf_special_qps;
2753
2754 return 0;
2755
2756 err_conf_special_qps:
2757 hermon_destroy_eq ( hermon );
2758 err_create_eq:
2759 err_setup_mpt:
2760 hermon_cmd_close_hca ( hermon );
2761 err_init_hca:
2762 hermon_unmap_icm ( hermon );
2763 err_map_icm:
2764 hermon_stop_firmware ( hermon );
2765 err_start_firmware:
2766 return rc;
2767 }
2768
2769 /**
2770 * Stop Hermon device
2771 *
2772 * @v hermon Hermon device
2773 */
2774 static void hermon_stop ( struct hermon *hermon ) {
2775 hermon_destroy_eq ( hermon );
2776 hermon_cmd_close_hca ( hermon );
2777 hermon_unmap_icm ( hermon );
2778 hermon_stop_firmware ( hermon );
2779 hermon_reset ( hermon );
2780 }
2781
2782 /**
2783 * Open Hermon device
2784 *
2785 * @v hermon Hermon device
2786 * @ret rc Return status code
2787 */
2788 static int hermon_open ( struct hermon *hermon ) {
2789 int rc;
2790
2791 /* Start device if applicable */
2792 if ( hermon->open_count == 0 ) {
2793 if ( ( rc = hermon_start ( hermon, 0 ) ) != 0 )
2794 return rc;
2795 }
2796
2797 /* Increment open counter */
2798 hermon->open_count++;
2799
2800 return 0;
2801 }
2802
2803 /**
2804 * Close Hermon device
2805 *
2806 * @v hermon Hermon device
2807 */
2808 static void hermon_close ( struct hermon *hermon ) {
2809
2810 /* Decrement open counter */
2811 assert ( hermon->open_count != 0 );
2812 hermon->open_count--;
2813
2814 /* Stop device if applicable */
2815 if ( hermon->open_count == 0 )
2816 hermon_stop ( hermon );
2817 }
2818
2819 /***************************************************************************
2820 *
2821 * Infiniband link-layer operations
2822 *
2823 ***************************************************************************
2824 */
2825
2826 /**
2827 * Initialise Infiniband link
2828 *
2829 * @v ibdev Infiniband device
2830 * @ret rc Return status code
2831 */
2832 static int hermon_ib_open ( struct ib_device *ibdev ) {
2833 struct hermon *hermon = ib_get_drvdata ( ibdev );
2834 union hermonprm_set_port set_port;
2835 int rc;
2836
2837 /* Open hardware */
2838 if ( ( rc = hermon_open ( hermon ) ) != 0 )
2839 goto err_open;
2840
2841 /* Set port parameters */
2842 memset ( &set_port, 0, sizeof ( set_port ) );
2843 MLX_FILL_8 ( &set_port.ib, 0,
2844 mmc, 1,
2845 mvc, 1,
2846 mp, 1,
2847 mg, 1,
2848 mtu_cap, IB_MTU_2048,
2849 vl_cap, IB_VL_0,
2850 rcm, 1,
2851 lss, 1 );
2852 MLX_FILL_2 ( &set_port.ib, 10,
2853 max_pkey, 1,
2854 max_gid, 1 );
2855 MLX_FILL_1 ( &set_port.ib, 28,
2856 link_speed_supported, 1 );
2857 if ( ( rc = hermon_cmd_set_port ( hermon, 0, ibdev->port,
2858 &set_port ) ) != 0 ) {
2859 DBGC ( hermon, "Hermon %p port %d could not set port: %s\n",
2860 hermon, ibdev->port, strerror ( rc ) );
2861 goto err_set_port;
2862 }
2863
2864 /* Initialise port */
2865 if ( ( rc = hermon_cmd_init_port ( hermon, ibdev->port ) ) != 0 ) {
2866 DBGC ( hermon, "Hermon %p port %d could not initialise port: "
2867 "%s\n", hermon, ibdev->port, strerror ( rc ) );
2868 goto err_init_port;
2869 }
2870
2871 /* Update MAD parameters */
2872 ib_smc_update ( ibdev, hermon_mad );
2873
2874 return 0;
2875
2876 err_init_port:
2877 err_set_port:
2878 hermon_close ( hermon );
2879 err_open:
2880 return rc;
2881 }
2882
2883 /**
2884 * Close Infiniband link
2885 *
2886 * @v ibdev Infiniband device
2887 */
2888 static void hermon_ib_close ( struct ib_device *ibdev ) {
2889 struct hermon *hermon = ib_get_drvdata ( ibdev );
2890 int rc;
2891
2892 /* Close port */
2893 if ( ( rc = hermon_cmd_close_port ( hermon, ibdev->port ) ) != 0 ) {
2894 DBGC ( hermon, "Hermon %p port %d could not close port: %s\n",
2895 hermon, ibdev->port, strerror ( rc ) );
2896 /* Nothing we can do about this */
2897 }
2898
2899 /* Close hardware */
2900 hermon_close ( hermon );
2901 }
2902
2903 /**
2904 * Inform embedded subnet management agent of a received MAD
2905 *
2906 * @v ibdev Infiniband device
2907 * @v mad MAD
2908 * @ret rc Return status code
2909 */
2910 static int hermon_inform_sma ( struct ib_device *ibdev,
2911 union ib_mad *mad ) {
2912 int rc;
2913
2914 /* Send the MAD to the embedded SMA */
2915 if ( ( rc = hermon_mad ( ibdev, mad ) ) != 0 )
2916 return rc;
2917
2918 /* Update parameters held in software */
2919 ib_smc_update ( ibdev, hermon_mad );
2920
2921 return 0;
2922 }
2923
2924 /***************************************************************************
2925 *
2926 * Multicast group operations
2927 *
2928 ***************************************************************************
2929 */
2930
2931 /**
2932 * Attach to multicast group
2933 *
2934 * @v ibdev Infiniband device
2935 * @v qp Queue pair
2936 * @v gid Multicast GID
2937 * @ret rc Return status code
2938 */
2939 static int hermon_mcast_attach ( struct ib_device *ibdev,
2940 struct ib_queue_pair *qp,
2941 union ib_gid *gid ) {
2942 struct hermon *hermon = ib_get_drvdata ( ibdev );
2943 struct hermonprm_mgm_hash hash;
2944 struct hermonprm_mcg_entry mcg;
2945 unsigned int index;
2946 int rc;
2947
2948 /* Generate hash table index */
2949 if ( ( rc = hermon_cmd_mgid_hash ( hermon, gid, &hash ) ) != 0 ) {
2950 DBGC ( hermon, "Hermon %p could not hash GID: %s\n",
2951 hermon, strerror ( rc ) );
2952 return rc;
2953 }
2954 index = MLX_GET ( &hash, hash );
2955
2956 /* Check for existing hash table entry */
2957 if ( ( rc = hermon_cmd_read_mcg ( hermon, index, &mcg ) ) != 0 ) {
2958 DBGC ( hermon, "Hermon %p could not read MCG %#x: %s\n",
2959 hermon, index, strerror ( rc ) );
2960 return rc;
2961 }
2962 if ( MLX_GET ( &mcg, hdr.members_count ) != 0 ) {
2963 /* FIXME: this implementation allows only a single QP
2964 * per multicast group, and doesn't handle hash
2965 * collisions. Sufficient for IPoIB but may need to
2966 * be extended in future.
2967 */
2968 DBGC ( hermon, "Hermon %p MGID index %#x already in use\n",
2969 hermon, index );
2970 return -EBUSY;
2971 }
2972
2973 /* Update hash table entry */
2974 MLX_FILL_1 ( &mcg, 1, hdr.members_count, 1 );
2975 MLX_FILL_1 ( &mcg, 8, qp[0].qpn, qp->qpn );
2976 memcpy ( &mcg.u.dwords[4], gid, sizeof ( *gid ) );
2977 if ( ( rc = hermon_cmd_write_mcg ( hermon, index, &mcg ) ) != 0 ) {
2978 DBGC ( hermon, "Hermon %p could not write MCG %#x: %s\n",
2979 hermon, index, strerror ( rc ) );
2980 return rc;
2981 }
2982
2983 return 0;
2984 }
2985
2986 /**
2987 * Detach from multicast group
2988 *
2989 * @v ibdev Infiniband device
2990 * @v qp Queue pair
2991 * @v gid Multicast GID
2992 */
2993 static void hermon_mcast_detach ( struct ib_device *ibdev,
2994 struct ib_queue_pair *qp __unused,
2995 union ib_gid *gid ) {
2996 struct hermon *hermon = ib_get_drvdata ( ibdev );
2997 struct hermonprm_mgm_hash hash;
2998 struct hermonprm_mcg_entry mcg;
2999 unsigned int index;
3000 int rc;
3001
3002 /* Generate hash table index */
3003 if ( ( rc = hermon_cmd_mgid_hash ( hermon, gid, &hash ) ) != 0 ) {
3004 DBGC ( hermon, "Hermon %p could not hash GID: %s\n",
3005 hermon, strerror ( rc ) );
3006 return;
3007 }
3008 index = MLX_GET ( &hash, hash );
3009
3010 /* Clear hash table entry */
3011 memset ( &mcg, 0, sizeof ( mcg ) );
3012 if ( ( rc = hermon_cmd_write_mcg ( hermon, index, &mcg ) ) != 0 ) {
3013 DBGC ( hermon, "Hermon %p could not write MCG %#x: %s\n",
3014 hermon, index, strerror ( rc ) );
3015 return;
3016 }
3017 }
3018
3019 /** Hermon Infiniband operations */
3020 static struct ib_device_operations hermon_ib_operations = {
3021 .create_cq = hermon_create_cq,
3022 .destroy_cq = hermon_destroy_cq,
3023 .create_qp = hermon_create_qp,
3024 .modify_qp = hermon_modify_qp,
3025 .destroy_qp = hermon_destroy_qp,
3026 .post_send = hermon_post_send,
3027 .post_recv = hermon_post_recv,
3028 .poll_cq = hermon_poll_cq,
3029 .poll_eq = hermon_poll_eq,
3030 .open = hermon_ib_open,
3031 .close = hermon_ib_close,
3032 .mcast_attach = hermon_mcast_attach,
3033 .mcast_detach = hermon_mcast_detach,
3034 .set_port_info = hermon_inform_sma,