[golan] Add Connect-IB, ConnectX-4 and ConnectX-4 Lx (Infiniband) support
[ipxe.git] / src / drivers / infiniband / golan.c
1 /*
2 * Copyright (C) 2013-2015 Mellanox Technologies Ltd.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA.
18 */
19
20 FILE_LICENCE ( GPL2_OR_LATER );
21
22 #include <errno.h>
23 #include <strings.h>
24 #include <byteswap.h>
25 #include <ipxe/malloc.h>
26 #include <ipxe/umalloc.h>
27 #include <ipxe/infiniband.h>
28 #include <ipxe/ib_smc.h>
29 #include <ipxe/iobuf.h>
30 #include <ipxe/netdevice.h>
31 #include <ipxe/ethernet.h>
32 #include <ipxe/if_ether.h>
33 #include <ipxe/in.h>
34 #include <ipxe/ipoib.h>
35 #include "flexboot_nodnic.h"
36 #include "nodnic_shomron_prm.h"
37 #include "golan.h"
38 #include "mlx_utils/include/public/mlx_bail.h"
39 #include "mlx_utils/mlx_lib/mlx_link_speed/mlx_link_speed.h"
40 #include "mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig.h"
41 #include "mlx_utils/include/public/mlx_pci_gw.h"
42 #include "mlx_nodnic/include/mlx_port.h"
43
44 /******************************************************************************/
45 /************* Very simple memory management for umalloced pages **************/
46 /******* Temporary solution until full memory management is implemented *******/
47 /******************************************************************************/
48 #define GOLAN_PAGES 20
49 struct golan_page {
50 struct list_head list;
51 userptr_t addr;
52 };
53
54 static void golan_free_pages ( struct list_head *head ) {
55 struct golan_page *page, *tmp;
56 list_for_each_entry_safe ( page, tmp, head, list ) {
57 list_del ( &page->list );
58 ufree ( page->addr );
59 free ( page );
60 }
61 }
62
63 static int golan_init_pages ( struct list_head *head ) {
64 struct golan_page *new_entry;
65 int rc, i;
66
67 if ( !head ) {
68 rc = -EINVAL;
69 goto err_golan_init_pages_bad_param;
70 }
71
72 INIT_LIST_HEAD ( head );
73
74 for ( i = 0; i < GOLAN_PAGES; i++ ) {
75 new_entry = zalloc ( sizeof ( *new_entry ) );
76 if ( new_entry == NULL ) {
77 rc = -ENOMEM;
78 goto err_golan_init_pages_alloc_page;
79 }
80 new_entry->addr = umalloc ( GOLAN_PAGE_SIZE );
81 if ( new_entry->addr == UNULL ) {
82 free ( new_entry );
83 rc = -ENOMEM;
84 goto err_golan_init_pages_alloc_page;
85 }
86 list_add ( &new_entry->list, head );
87 }
88
89 return 0;
90
91 err_golan_init_pages_alloc_page:
92 golan_free_pages ( head );
93 err_golan_init_pages_bad_param:
94 return rc;
95 }
96
97 static userptr_t golan_get_page ( struct list_head *head ) {
98 struct golan_page *page;
99 userptr_t addr;
100
101 if ( list_empty ( head ) )
102 return UNULL;
103
104 page = list_first_entry ( head, struct golan_page, list );
105 list_del ( &page->list );
106 addr = page->addr;
107 free ( page );
108 return addr;
109 }
110
111 /******************************************************************************/
112
113 const char *golan_qp_state_as_string[] = {
114 "RESET",
115 "INIT",
116 "RTR",
117 "RTS",
118 "SQD",
119 "SQE",
120 "ERR"
121 };
122
123 inline int golan_check_rc_and_cmd_status ( struct golan_cmd_layout *cmd, int rc ) {
124 struct golan_outbox_hdr *out_hdr = ( struct golan_outbox_hdr * ) ( cmd->out );
125 if ( rc == -EBUSY ) {
126 DBG ( "HCA is busy (rc = -EBUSY)\n" );
127 return rc;
128 } else if ( out_hdr->status ) {
129 DBG("%s status = 0x%x - syndrom = 0x%x\n", __FUNCTION__,
130 out_hdr->status, be32_to_cpu(out_hdr->syndrome));
131 return out_hdr->status;
132 }
133 return 0;
134 }
135
136 #define GOLAN_CHECK_RC_AND_CMD_STATUS(_lable) \
137 do { \
138 if ( ( rc = golan_check_rc_and_cmd_status ( cmd, rc ) ) ) \
139 goto _lable; \
140 } while (0)
141
142 #define GOLAN_PRINT_RC_AND_CMD_STATUS golan_check_rc_and_cmd_status ( cmd, rc )
143
144
145 struct mbox {
146 union {
147 struct golan_cmd_prot_block mblock;
148 u8 data[MAILBOX_STRIDE];
149 __be64 qdata[MAILBOX_STRIDE >> 3];
150 };
151 };
152
153 static inline uint32_t ilog2(uint32_t mem)
154 {
155 return ( fls ( mem ) - 1 );
156 }
157
158 #define CTRL_SIG_SZ (sizeof(mailbox->mblock) - sizeof(mailbox->mblock.bdata) - 2)
159
160 static inline u8 xor8_buf(void *buf, int len)
161 {
162 u8 sum = 0;
163 int i;
164 u8 *ptr = buf;
165
166 for (i = 0; i < len; ++i)
167 sum ^= ptr[i];
168
169 return sum;
170 }
171
172 static inline int verify_block_sig(struct golan_cmd_prot_block *block)
173 {
174 if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff)
175 return -EINVAL;
176
177 if (xor8_buf(block, sizeof(*block)) != 0xff)
178 return -EINVAL;
179 return 0;
180 }
181
182 static inline const char *cmd_status_str(u8 status)
183 {
184 switch (status) {
185 case 0x0: return "OK";
186 case 0x1: return "internal error";
187 case 0x2: return "bad operation";
188 case 0x3: return "bad parameter";
189 case 0x4: return "bad system state";
190 case 0x5: return "bad resource";
191 case 0x6: return "resource busy";
192 case 0x8: return "limits exceeded";
193 case 0x9: return "bad resource state";
194 case 0xa: return "bad index";
195 case 0xf: return "no resources";
196 case 0x50: return "bad input length";
197 case 0x51: return "bad output length";
198 case 0x10: return "bad QP state";
199 case 0x30: return "bad packet (discarded)";
200 case 0x40: return "bad size too many outstanding CQEs";
201 case 0xff: return "Command Timed Out";
202 default: return "unknown status";
203 }
204 }
205
206 static inline uint16_t fw_rev_maj(struct golan *golan)
207 {
208 return be32_to_cpu(readl(&golan->iseg->fw_rev)) & 0xffff;
209 }
210
211 static inline u16 fw_rev_min(struct golan *golan)
212 {
213 return be32_to_cpu(readl(&golan->iseg->fw_rev)) >> 16;
214 }
215
216 static inline u16 fw_rev_sub(struct golan *golan)
217 {
218 return be32_to_cpu(readl(&golan->iseg->cmdif_rev_fw_sub)) & 0xffff;
219 }
220
221 static inline u16 cmdif_rev(struct golan *golan)
222 {
223 return be32_to_cpu(readl(&golan->iseg->cmdif_rev_fw_sub)) >> 16;
224 }
225
226
227 static inline struct golan_cmd_layout *get_cmd( struct golan *golan, int idx )
228 {
229 return golan->cmd.addr + (idx << golan->cmd.log_stride);
230 }
231
232 static inline void golan_calc_sig(struct golan *golan, uint32_t cmd_idx,
233 uint32_t inbox_idx, uint32_t outbox_idx)
234 {
235 struct golan_cmd_layout *cmd = get_cmd(golan, cmd_idx);
236 struct mbox *mailbox = NULL;
237
238 if (inbox_idx != NO_MBOX) {
239 mailbox = GET_INBOX(golan, inbox_idx);
240 mailbox->mblock.token = cmd->token;
241 mailbox->mblock.ctrl_sig = ~xor8_buf(mailbox->mblock.rsvd0,
242 CTRL_SIG_SZ);
243 }
244 if (outbox_idx != NO_MBOX) {
245 mailbox = GET_OUTBOX(golan, outbox_idx);
246 mailbox->mblock.token = cmd->token;
247 mailbox->mblock.ctrl_sig = ~xor8_buf(mailbox->mblock.rsvd0,
248 CTRL_SIG_SZ);
249 }
250 cmd->sig = ~xor8_buf(cmd, sizeof(*cmd));
251 }
252
253 /**
254 * Get Golan FW
255 */
256 static int fw_ver_and_cmdif ( struct golan *golan ) {
257 DBGC (golan ,"\n[%x:%x]rev maj.min.submin = %x.%x.%x cmdif = %x\n",
258 golan->iseg->fw_rev,
259 golan->iseg->cmdif_rev_fw_sub,
260 fw_rev_maj ( golan ), fw_rev_min ( golan ),
261 fw_rev_sub ( golan ), cmdif_rev ( golan));
262
263 if (cmdif_rev ( golan) != PXE_CMDIF_REF) {
264 DBGC (golan ,"CMDIF %d not supported current is %d\n",
265 cmdif_rev ( golan ), PXE_CMDIF_REF);
266 return 1;
267 }
268 return 0;
269 }
270
271 static inline void show_out_status(uint32_t *out)
272 {
273 DBG("%x\n", be32_to_cpu(out[0]));
274 DBG("%x\n", be32_to_cpu(out[1]));
275 DBG("%x\n", be32_to_cpu(out[2]));
276 DBG("%x\n", be32_to_cpu(out[3]));
277 }
278 /**
279 * Check if CMD has finished.
280 */
281 static inline uint32_t is_command_finished( struct golan *golan, int idx)
282 {
283 wmb();
284 return !(get_cmd( golan , idx )->status_own & CMD_OWNER_HW);
285 }
286
287 /**
288 * Wait for Golan command completion
289 *
290 * @v golan Golan device
291 * @ret rc Return status code
292 */
293 static inline int golan_cmd_wait(struct golan *golan, int idx, const char *command)
294 {
295 unsigned int wait;
296 int rc = -EBUSY;
297
298 for ( wait = GOLAN_HCR_MAX_WAIT_MS ; wait ; --wait ) {
299 if (is_command_finished(golan, idx)) {
300 rc = CMD_STATUS(golan, idx);
301 rmb();
302 break;
303 } else {
304 mdelay ( 1 );
305 }
306 }
307 if (rc) {
308 DBGC (golan ,"[%s]RC is %s[%x]\n", command, cmd_status_str(rc), rc);
309 }
310
311 golan->cmd_bm &= ~(1 << idx);
312 return rc;
313 }
314
315 /**
316 * Notify the HW that commands are ready
317 */
318 static inline void send_command(struct golan *golan)
319 {
320 wmb(); //Make sure the command is visible in "memory".
321 writel(cpu_to_be32(golan->cmd_bm) , &golan->iseg->cmd_dbell);
322 }
323
324 static inline int send_command_and_wait(struct golan *golan, uint32_t cmd_idx,
325 uint32_t inbox_idx, uint32_t outbox_idx, const char *command)
326 {
327 golan_calc_sig(golan, cmd_idx, inbox_idx, outbox_idx);
328 send_command(golan);
329 return golan_cmd_wait(golan, cmd_idx, command);
330 }
331
332 /**
333 * Prepare a FW command,
334 * In - comamnd idx (Must be valid)
335 * writes the command parameters.
336 */
337 static inline struct golan_cmd_layout *write_cmd(struct golan *golan, int idx,
338 uint16_t opcode, uint16_t opmod,
339 uint16_t inbox_idx,
340 uint16_t outbox_idx, uint16_t inlen,
341 uint16_t outlen)
342 {
343 struct golan_cmd_layout *cmd = get_cmd(golan , idx);
344 struct golan_inbox_hdr *hdr = (struct golan_inbox_hdr *)cmd->in;
345 static uint8_t token;
346
347 memset(cmd, 0, sizeof(*cmd));
348
349 cmd->type = GOLAN_PCI_CMD_XPORT;
350 cmd->status_own = CMD_OWNER_HW;
351 cmd->outlen = cpu_to_be32(outlen);
352 cmd->inlen = cpu_to_be32(inlen);
353 hdr->opcode = cpu_to_be16(opcode);
354 hdr->opmod = cpu_to_be16(opmod);
355
356 if (inbox_idx != NO_MBOX) {
357 memset(GET_INBOX(golan, inbox_idx), 0, MAILBOX_SIZE);
358 cmd->in_ptr = VIRT_2_BE64_BUS(GET_INBOX(golan, inbox_idx));
359 cmd->token = ++token;
360 }
361 if (outbox_idx != NO_MBOX) {
362 memset(GET_OUTBOX(golan, outbox_idx), 0, MAILBOX_SIZE);
363 cmd->out_ptr = VIRT_2_BE64_BUS(GET_OUTBOX(golan, outbox_idx));
364 }
365
366 golan->cmd_bm |= 1 << idx;
367
368 assert ( cmd != NULL );
369 return cmd;
370 }
371
372 static inline int golan_core_enable_hca(struct golan *golan)
373 {
374 struct golan_cmd_layout *cmd;
375 int rc = 0;
376
377 DBGC(golan, "%s\n", __FUNCTION__);
378
379 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_ENABLE_HCA, 0x0,
380 NO_MBOX, NO_MBOX,
381 sizeof(struct golan_enable_hca_mbox_in),
382 sizeof(struct golan_enable_hca_mbox_out));
383
384 rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
385 GOLAN_PRINT_RC_AND_CMD_STATUS;
386 return rc;
387 }
388
389 static inline void golan_disable_hca(struct golan *golan)
390 {
391 struct golan_cmd_layout *cmd;
392 int rc;
393
394 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DISABLE_HCA, 0x0,
395 NO_MBOX, NO_MBOX,
396 sizeof(struct golan_disable_hca_mbox_in),
397 sizeof(struct golan_disable_hca_mbox_out));
398 rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
399 GOLAN_PRINT_RC_AND_CMD_STATUS;
400 }
401
402 static inline int golan_set_hca_cap(struct golan *golan)
403 {
404 struct golan_cmd_layout *cmd;
405 int rc;
406
407 DBGC(golan, "%s\n", __FUNCTION__);
408
409 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_SET_HCA_CAP, 0x0,
410 GEN_MBOX, NO_MBOX,
411 sizeof(struct golan_cmd_set_hca_cap_mbox_in),
412 sizeof(struct golan_cmd_set_hca_cap_mbox_out));
413
414 golan->caps.flags &= ~GOLAN_DEV_CAP_FLAG_CMDIF_CSUM;
415 DBGC( golan , "%s caps.uar_sz = %d\n", __FUNCTION__, golan->caps.uar_sz);
416 DBGC( golan , "%s caps.log_pg_sz = %d\n", __FUNCTION__, golan->caps.log_pg_sz);
417 DBGC( golan , "%s caps.log_uar_sz = %d\n", __FUNCTION__, be32_to_cpu(golan->caps.uar_page_sz));
418 golan->caps.uar_page_sz = 0;
419
420
421 memcpy(((struct golan_hca_cap *)GET_INBOX(golan, GEN_MBOX)),
422 &(golan->caps),
423 sizeof(struct golan_hca_cap));
424
425 //if command failed we should reset the caps in golan->caps
426 rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
427 GOLAN_PRINT_RC_AND_CMD_STATUS;
428 return rc;
429 }
430
431 static inline int golan_qry_hca_cap(struct golan *golan)
432 {
433 struct golan_cmd_layout *cmd;
434 int rc = 0;
435
436 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_QUERY_HCA_CAP, 0x1,
437 NO_MBOX, GEN_MBOX,
438 sizeof(struct golan_cmd_query_hca_cap_mbox_in),
439 sizeof(struct golan_cmd_query_hca_cap_mbox_out));
440
441 rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, GEN_MBOX, __FUNCTION__);
442 GOLAN_CHECK_RC_AND_CMD_STATUS( err_query_hca_cap );
443
444 memcpy(&(golan->caps),
445 ((struct golan_hca_cap *)GET_OUTBOX(golan, GEN_MBOX)),
446 sizeof(struct golan_hca_cap));
447 err_query_hca_cap:
448 return rc;
449 }
450
451 static inline int golan_take_pages ( struct golan *golan, uint32_t pages, __be16 func_id ) {
452 uint32_t out_num_entries = 0;
453 int size_ibox = sizeof(struct golan_manage_pages_inbox);
454 int size_obox = sizeof(struct golan_manage_pages_outbox);
455 int rc = 0;
456
457 DBGC(golan, "%s\n", __FUNCTION__);
458
459 while ( pages > 0 ) {
460 uint32_t pas_num = min(pages, MAX_PASE_MBOX);
461 unsigned i;
462 struct golan_cmd_layout *cmd;
463 struct golan_manage_pages_inbox *in;
464 struct golan_manage_pages_outbox_data *out;
465
466 size_ibox += (pas_num * GOLAN_PAS_SIZE);
467 size_obox += (pas_num * GOLAN_PAS_SIZE);
468
469 cmd = write_cmd(golan, MEM_CMD_IDX, GOLAN_CMD_OP_MANAGE_PAGES, GOLAN_PAGES_TAKE,
470 MEM_MBOX, MEM_MBOX,
471 size_ibox,
472 size_obox);
473
474 in = (struct golan_manage_pages_inbox *)cmd->in; /* Warning (WE CANT USE THE LAST 2 FIELDS) */
475
476 in->func_id = func_id; /* Already BE */
477 in->num_entries = cpu_to_be32(pas_num);
478
479 if ( ( rc = send_command_and_wait(golan, MEM_CMD_IDX, MEM_MBOX, MEM_MBOX, __FUNCTION__) ) == 0 ) {
480 out = (struct golan_manage_pages_outbox_data *)GET_OUTBOX(golan, MEM_MBOX);
481 out_num_entries = be32_to_cpu(((struct golan_manage_pages_outbox *)(cmd->out))->num_entries);
482 for (i = 0; i < out_num_entries; ++i) {
483 ufree(BE64_BUS_2_USR(out->pas[i]));
484 }
485 } else {
486 if ( rc == -EBUSY ) {
487 DBGC (golan ,"HCA is busy (rc = -EBUSY)\n" );
488 } else {
489 DBGC (golan ,"%s: rc =0x%x[%s]<%x> syn 0x%x[0x%x] for %d pages\n",
490 __FUNCTION__, rc, cmd_status_str(rc),
491 CMD_SYND(golan, MEM_CMD_IDX),
492 get_cmd( golan , MEM_CMD_IDX )->status_own,
493 be32_to_cpu(CMD_SYND(golan, MEM_CMD_IDX)), pas_num);
494 }
495 return rc;
496 }
497
498 pages -= out_num_entries;
499 }
500 DBGC( golan , "%s Pages handled\n", __FUNCTION__);
501 return 0;
502 }
503
504 static inline int golan_provide_pages ( struct golan *golan , uint32_t pages, __be16 func_id ) {
505 struct mbox *mailbox;
506 int size_ibox = sizeof(struct golan_manage_pages_inbox);
507 int size_obox = sizeof(struct golan_manage_pages_outbox);
508 int rc = 0;
509
510 DBGC(golan, "%s\n", __FUNCTION__);
511
512 while ( pages > 0 ) {
513 uint32_t pas_num = min(pages, MAX_PASE_MBOX);
514 unsigned i, j;
515 struct golan_cmd_layout *cmd;
516 struct golan_manage_pages_inbox *in;
517 userptr_t addr = 0;
518
519 mailbox = GET_INBOX(golan, MEM_MBOX);
520 size_ibox += (pas_num * GOLAN_PAS_SIZE);
521 size_obox += (pas_num * GOLAN_PAS_SIZE);
522
523 cmd = write_cmd(golan, MEM_CMD_IDX, GOLAN_CMD_OP_MANAGE_PAGES, GOLAN_PAGES_GIVE,
524 MEM_MBOX, MEM_MBOX,
525 size_ibox,
526 size_obox);
527
528 in = (struct golan_manage_pages_inbox *)cmd->in; /* Warning (WE CANT USE THE LAST 2 FIELDS) */
529
530 in->func_id = func_id; /* Already BE */
531 in->num_entries = cpu_to_be32(pas_num);
532
533 for ( i = 0 , j = MANAGE_PAGES_PSA_OFFSET; i < pas_num; ++i ,++j ) {
534 if (!(addr = umalloc(GOLAN_PAGE_SIZE))) {
535 rc = -ENOMEM;
536 DBGC (golan ,"Couldnt allocated page \n");
537 goto malloc_dma_failed;
538 }
539 if (GOLAN_PAGE_MASK & user_to_phys(addr, 0)) {
540 DBGC (golan ,"Addr not Page alligned [%lx %lx]\n", user_to_phys(addr, 0), addr);
541 }
542 mailbox->mblock.data[j] = USR_2_BE64_BUS(addr);
543 }
544
545 if ( ( rc = send_command_and_wait(golan, MEM_CMD_IDX, MEM_MBOX, MEM_MBOX, __FUNCTION__) ) == 0 ) {
546 pages -= pas_num;
547 golan->total_dma_pages += pas_num;
548 } else {
549 if ( rc == -EBUSY ) {
550 DBGC (golan ,"HCA is busy (rc = -EBUSY)\n" );
551 } else {
552 DBGC (golan ,"%s: rc =0x%x[%s]<%x> syn 0x%x[0x%x] for %d pages\n",
553 __FUNCTION__, rc, cmd_status_str(rc),
554 CMD_SYND(golan, MEM_CMD_IDX),
555 get_cmd( golan , MEM_CMD_IDX )->status_own,
556 be32_to_cpu(CMD_SYND(golan, MEM_CMD_IDX)), pas_num);
557 }
558 ufree ( addr );
559 goto err_send_command;
560 }
561 }
562 DBGC( golan , "%s Pages handled\n", __FUNCTION__);
563 return 0;
564
565 err_send_command:
566 malloc_dma_failed:
567 /* Go over In box and free pages */
568 /* Send Error to FW */
569 /* What is next - Disable HCA? */
570 DBGC (golan ,"%s Failed (rc = 0x%x)\n", __FUNCTION__, rc);
571 return rc;
572 }
573
574 static inline int golan_handle_pages(struct golan *golan,
575 enum golan_qry_pages_mode qry,
576 enum golan_manage_pages_mode mode)
577 {
578 struct golan_cmd_layout *cmd;
579
580 int rc = 0;
581 int32_t pages;
582 uint16_t total_pages;
583 __be16 func_id;
584
585 DBGC(golan, "%s\n", __FUNCTION__);
586
587 cmd = write_cmd(golan, MEM_CMD_IDX, GOLAN_CMD_OP_QUERY_PAGES, qry,
588 NO_MBOX, NO_MBOX,
589 sizeof(struct golan_query_pages_inbox),
590 sizeof(struct golan_query_pages_outbox));
591
592 rc = send_command_and_wait(golan, MEM_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
593 GOLAN_CHECK_RC_AND_CMD_STATUS( err_handle_pages_query );
594
595 pages = be32_to_cpu(QRY_PAGES_OUT(golan, MEM_CMD_IDX)->num_pages);
596
597 DBGC( golan , "%s pages needed: %d\n", __FUNCTION__, pages);
598
599 func_id = QRY_PAGES_OUT(golan, MEM_CMD_IDX)->func_id;
600
601 total_pages = (( pages >= 0 ) ? pages : ( pages * ( -1 ) ));
602
603 if ( mode == GOLAN_PAGES_GIVE ) {
604 rc = golan_provide_pages(golan, total_pages, func_id);
605 } else {
606 rc = golan_take_pages(golan, golan->total_dma_pages, func_id);
607 golan->total_dma_pages = 0;
608 }
609
610 if ( rc ) {
611 DBGC (golan , "Failed to %s pages (rc = %d) - DMA pages allocated = %d\n",
612 ( ( mode == GOLAN_PAGES_GIVE ) ? "give" : "take" ), rc , golan->total_dma_pages );
613 return rc;
614 }
615
616 return 0;
617
618 err_handle_pages_query:
619 DBGC (golan ,"%s Qyery pages failed (rc = 0x%x)\n", __FUNCTION__, rc);
620 return rc;
621 }
622
623 static inline int golan_set_access_reg ( struct golan *golan __attribute__ (( unused )), uint32_t reg __attribute__ (( unused )))
624 {
625 #if 0
626 write_cmd(golan, _CMD_IDX, GOLAN_CMD_OP_QUERY_PAGES, 0x0,
627 NO_MBOX, NO_MBOX,
628 sizeof(struct golan_reg_host_endianess),
629 sizeof(struct golan_reg_host_endianess));
630 in->arg = cpu_to_be32(arg);
631 in->register_id = cpu_to_be16(reg_num);
632 #endif
633 DBGC (golan ," %s Not implemented yet\n", __FUNCTION__);
634 return 0;
635 }
636
637 static inline void golan_cmd_uninit ( struct golan *golan )
638 {
639 free_dma(golan->mboxes.outbox, GOLAN_PAGE_SIZE);
640 free_dma(golan->mboxes.inbox, GOLAN_PAGE_SIZE);
641 free_dma(golan->cmd.addr, GOLAN_PAGE_SIZE);
642 }
643
644 /**
645 * Initialise Golan Command Q parameters
646 * -- Alocate a 4kb page for the Command Q
647 * -- Read the stride and log num commands available
648 * -- Write the address to cmdq_phy_addr in iseg
649 * @v golan Golan device
650 */
651 static inline int golan_cmd_init ( struct golan *golan )
652 {
653 int rc = 0;
654 uint32_t addr_l_sz;
655
656 if (!(golan->cmd.addr = malloc_dma(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) {
657 rc = -ENOMEM;
658 goto malloc_dma_failed;
659 }
660 if (!(golan->mboxes.inbox = malloc_dma(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) {
661 rc = -ENOMEM;
662 goto malloc_dma_inbox_failed;
663 }
664 if (!(golan->mboxes.outbox = malloc_dma(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) {
665 rc = -ENOMEM;
666 goto malloc_dma_outbox_failed;
667 }
668 addr_l_sz = be32_to_cpu(readl(&golan->iseg->cmdq_addr_l_sz));
669
670 golan->cmd.log_stride = addr_l_sz & 0xf;
671 golan->cmd.size = 1 << (( addr_l_sz >> 4 ) & 0xf);
672
673 addr_l_sz = virt_to_bus(golan->cmd.addr);
674 writel(0 /* cpu_to_be32(golan->cmd.addr) >> 32 */, &golan->iseg->cmdq_addr_h);
675 writel(cpu_to_be32(addr_l_sz), &golan->iseg->cmdq_addr_l_sz);
676 wmb(); //Make sure the addr is visible in "memory".
677
678 addr_l_sz = be32_to_cpu(readl(&golan->iseg->cmdq_addr_l_sz));
679
680 DBGC( golan , "%s Command interface was initialized\n", __FUNCTION__);
681 return 0;
682
683 malloc_dma_outbox_failed:
684 free_dma(golan->mboxes.inbox, GOLAN_PAGE_SIZE);
685 malloc_dma_inbox_failed:
686 free_dma(golan->cmd.addr, GOLAN_PAGE_SIZE);
687 malloc_dma_failed:
688 DBGC (golan ,"%s Failed to initialize command interface (rc = 0x%x)\n",
689 __FUNCTION__, rc);
690 return rc;
691 }
692
693 static inline int golan_hca_init(struct golan *golan)
694 {
695 struct golan_cmd_layout *cmd;
696 int rc = 0;
697
698 DBGC(golan, "%s\n", __FUNCTION__);
699
700 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_INIT_HCA, 0x0,
701 NO_MBOX, NO_MBOX,
702 sizeof(struct golan_cmd_init_hca_mbox_in),
703 sizeof(struct golan_cmd_init_hca_mbox_out));
704
705 rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
706 GOLAN_PRINT_RC_AND_CMD_STATUS;
707 return rc;
708 }
709
710 static inline void golan_teardown_hca(struct golan *golan, enum golan_teardown op_mod)
711 {
712 struct golan_cmd_layout *cmd;
713 int rc;
714
715 DBGC (golan, "%s in\n", __FUNCTION__);
716
717 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_TEARDOWN_HCA, op_mod,
718 NO_MBOX, NO_MBOX,
719 sizeof(struct golan_cmd_teardown_hca_mbox_in),
720 sizeof(struct golan_cmd_teardown_hca_mbox_out));
721
722 rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
723 GOLAN_PRINT_RC_AND_CMD_STATUS;
724
725 DBGC (golan, "%s HCA teardown compleated\n", __FUNCTION__);
726 }
727
728 static inline int golan_alloc_uar(struct golan *golan)
729 {
730 struct golan_uar *uar = &golan->uar;
731 struct golan_cmd_layout *cmd;
732 struct golan_alloc_uar_mbox_out *out;
733 int rc;
734
735 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_ALLOC_UAR, 0x0,
736 NO_MBOX, NO_MBOX,
737 sizeof(struct golan_alloc_uar_mbox_in),
738 sizeof(struct golan_alloc_uar_mbox_out));
739
740 rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
741 GOLAN_CHECK_RC_AND_CMD_STATUS( err_alloc_uar_cmd );
742 out = (struct golan_alloc_uar_mbox_out *) ( cmd->out );
743
744 uar->index = be32_to_cpu(out->uarn) & 0xffffff;
745
746 uar->phys = (pci_bar_start(golan->pci, GOLAN_HCA_BAR) + (uar->index << GOLAN_PAGE_SHIFT));
747 uar->virt = (void *)(ioremap(uar->phys, GOLAN_PAGE_SIZE));
748
749 DBGC( golan , "%s: UAR allocated with index 0x%x\n", __FUNCTION__, uar->index);
750 return 0;
751
752 err_alloc_uar_cmd:
753 DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
754 return rc;
755 }
756
757 static void golan_dealloc_uar(struct golan *golan)
758 {
759 struct golan_cmd_layout *cmd;
760 uint32_t uar_index = golan->uar.index;
761 int rc;
762
763 DBGC (golan, "%s in\n", __FUNCTION__);
764
765 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DEALLOC_UAR, 0x0,
766 NO_MBOX, NO_MBOX,
767 sizeof(struct golan_free_uar_mbox_in),
768 sizeof(struct golan_free_uar_mbox_out));
769
770 ((struct golan_free_uar_mbox_in *)(cmd->in))->uarn = cpu_to_be32(uar_index);
771 rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
772 GOLAN_PRINT_RC_AND_CMD_STATUS;
773 golan->uar.index = 0;
774
775 DBGC (golan, "%s UAR (0x%x) was destroyed\n", __FUNCTION__, uar_index);
776 }
777
778 static void golan_eq_update_ci(struct golan_event_queue *eq, int arm)
779 {
780 __be32 *addr = eq->doorbell + (arm ? 0 : 2);
781 u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
782 writel(cpu_to_be32(val) , addr);
783 /* We still want ordering, just not swabbing, so add a barrier */
784 wmb();
785 }
786
787 static int golan_create_eq(struct golan *golan)
788 {
789 struct golan_event_queue *eq = &golan->eq;
790 struct golan_create_eq_mbox_in_data *in;
791 struct golan_cmd_layout *cmd;
792 struct golan_create_eq_mbox_out *out;
793 int rc, i;
794 userptr_t addr;
795
796 eq->cons_index = 0;
797 eq->size = GOLAN_NUM_EQES * sizeof(eq->eqes[0]);
798 addr = golan_get_page ( &golan->pages );
799 if (!addr) {
800 rc = -ENOMEM;
801 goto err_create_eq_eqe_alloc;
802 }
803 eq->eqes = (struct golan_eqe *)user_to_virt(addr, 0);
804
805 /* Set EQEs ownership bit to HW ownership */
806 for (i = 0; i < GOLAN_NUM_EQES; ++i) {
807 eq->eqes[i].owner = GOLAN_EQE_HW_OWNERSHIP;
808 }
809
810 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_CREATE_EQ, 0x0,
811 GEN_MBOX, NO_MBOX,
812 sizeof(struct golan_create_eq_mbox_in) + GOLAN_PAS_SIZE,
813 sizeof(struct golan_create_eq_mbox_out));
814
815 in = (struct golan_create_eq_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
816
817 /* Fill the physical address of the page */
818 in->pas[0] = USR_2_BE64_BUS(addr);
819 in->ctx.log_sz_usr_page = cpu_to_be32((ilog2(GOLAN_NUM_EQES)) << 24 | golan->uar.index);
820 DBGC( golan , "UAR idx %x (BE %x)\n", golan->uar.index, in->ctx.log_sz_usr_page);
821 in->events_mask = cpu_to_be64(1 << GOLAN_EVENT_TYPE_PORT_CHANGE);
822
823 rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
824 GOLAN_CHECK_RC_AND_CMD_STATUS( err_create_eq_cmd );
825 out = (struct golan_create_eq_mbox_out *)cmd->out;
826
827 eq->eqn = out->eq_number;
828 eq->doorbell = ((void *)golan->uar.virt) + GOLAN_EQ_DOORBELL_OFFSET;
829
830 /* EQs are created in ARMED state */
831 golan_eq_update_ci(eq, GOLAN_EQ_UNARMED);
832
833 DBGC( golan , "%s: Event queue created (EQN = 0x%x)\n", __FUNCTION__, eq->eqn);
834 return 0;
835
836 err_create_eq_cmd:
837 ufree(virt_to_user(golan->eq.eqes));
838 err_create_eq_eqe_alloc:
839 DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
840 return rc;
841 }
842
843 static void golan_destory_eq(struct golan *golan)
844 {
845 struct golan_cmd_layout *cmd;
846 uint8_t eqn = golan->eq.eqn;
847 int rc;
848
849 DBGC (golan, "%s in\n", __FUNCTION__);
850
851 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DESTROY_EQ, 0x0,
852 NO_MBOX, NO_MBOX,
853 sizeof(struct golan_destroy_eq_mbox_in),
854 sizeof(struct golan_destroy_eq_mbox_out));
855
856 ((struct golan_destroy_eq_mbox_in *)(cmd->in))->eqn = eqn;
857 rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
858 GOLAN_PRINT_RC_AND_CMD_STATUS;
859
860 ufree(virt_to_user(golan->eq.eqes));
861 golan->eq.eqn = 0;
862
863 DBGC( golan, "%s Event queue (0x%x) was destroyed\n", __FUNCTION__, eqn);
864 }
865
866 static int golan_alloc_pd(struct golan *golan)
867 {
868 struct golan_cmd_layout *cmd;
869 struct golan_alloc_pd_mbox_out *out;
870 int rc;
871
872 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_ALLOC_PD, 0x0,
873 NO_MBOX, NO_MBOX,
874 sizeof(struct golan_alloc_pd_mbox_in),
875 sizeof(struct golan_alloc_pd_mbox_out));
876
877 rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
878 GOLAN_CHECK_RC_AND_CMD_STATUS( err_alloc_pd_cmd );
879 out = (struct golan_alloc_pd_mbox_out *) ( cmd->out );
880
881 golan->pdn = (be32_to_cpu(out->pdn) & 0xffffff);
882 DBGC( golan , "%s: Protection domain created (PDN = 0x%x)\n", __FUNCTION__,
883 golan->pdn);
884 return 0;
885
886 err_alloc_pd_cmd:
887 DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
888 return rc;
889 }
890
891 static void golan_dealloc_pd(struct golan *golan)
892 {
893 struct golan_cmd_layout *cmd;
894 uint32_t pdn = golan->pdn;
895 int rc;
896
897 DBGC (golan,"%s in\n", __FUNCTION__);
898
899 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DEALLOC_PD, 0x0,
900 NO_MBOX, NO_MBOX,
901 sizeof(struct golan_alloc_pd_mbox_in),
902 sizeof(struct golan_alloc_pd_mbox_out));
903
904 ((struct golan_dealloc_pd_mbox_in *)(cmd->in))->pdn = cpu_to_be32(pdn);
905 rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
906 GOLAN_PRINT_RC_AND_CMD_STATUS;
907 golan->pdn = 0;
908
909 DBGC (golan ,"%s Protection domain (0x%x) was destroyed\n", __FUNCTION__, pdn);
910 }
911
912 static int golan_create_mkey(struct golan *golan)
913 {
914 struct golan_create_mkey_mbox_in_data *in;
915 struct golan_cmd_layout *cmd;
916 struct golan_create_mkey_mbox_out *out;
917 int rc;
918
919 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_CREATE_MKEY, 0x0,
920 GEN_MBOX, NO_MBOX,
921 sizeof(struct golan_create_mkey_mbox_in),
922 sizeof(struct golan_create_mkey_mbox_out));
923
924 in = (struct golan_create_mkey_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
925
926 in->seg.flags = GOLAN_IB_ACCESS_LOCAL_WRITE | GOLAN_IB_ACCESS_LOCAL_READ;
927 in->seg.flags_pd = cpu_to_be32(golan->pdn | GOLAN_MKEY_LEN64);
928 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << GOLAN_CREATE_MKEY_SEG_QPN_BIT);
929
930 rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
931 GOLAN_CHECK_RC_AND_CMD_STATUS( err_create_mkey_cmd );
932 out = (struct golan_create_mkey_mbox_out *) ( cmd->out );
933
934 golan->mkey = ((be32_to_cpu(out->mkey) & 0xffffff) << 8);
935 DBGC( golan , "%s: Got DMA Key for local access read/write (MKEY = 0x%x)\n",
936 __FUNCTION__, golan->mkey);
937 return 0;
938 err_create_mkey_cmd:
939 DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
940 return rc;
941 }
942
943 static void golan_destroy_mkey(struct golan *golan)
944 {
945 struct golan_cmd_layout *cmd;
946 u32 mkey = golan->mkey;
947 int rc;
948
949 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DESTROY_MKEY, 0x0,
950 NO_MBOX, NO_MBOX,
951 sizeof(struct golan_destroy_mkey_mbox_in),
952 sizeof(struct golan_destroy_mkey_mbox_out));
953 ((struct golan_destroy_mkey_mbox_in *)(cmd->in))->mkey = cpu_to_be32(mkey >> 8);
954 rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
955 GOLAN_PRINT_RC_AND_CMD_STATUS;
956 golan->mkey = 0;
957
958 DBGC( golan , "%s DMA Key (0x%x) for local access write was destroyed\n"
959 , __FUNCTION__, mkey);
960 }
961
962
963 /**
964 * Initialise Golan PCI parameters
965 *
966 * @v golan Golan device
967 */
968 static inline void golan_pci_init(struct golan *golan)
969 {
970 struct pci_device *pci = golan->pci;
971
972 /* Fix up PCI device */
973 adjust_pci_device ( pci );
974
975 /* Get HCA BAR */
976 golan->iseg = ioremap ( pci_bar_start ( pci, GOLAN_HCA_BAR),
977 GOLAN_PCI_CONFIG_BAR_SIZE );
978 }
979
980 static inline struct golan *golan_alloc()
981 {
982 void *golan = zalloc(sizeof(struct golan));
983 if ( !golan )
984 goto err_zalloc;
985
986 return golan;
987
988 err_zalloc:
989 return NULL;
990 }
991
992 /**
993 * Create completion queue
994 *
995 * @v ibdev Infiniband device
996 * @v cq Completion queue
997 * @ret rc Return status code
998 */
999 static int golan_create_cq(struct ib_device *ibdev,
1000 struct ib_completion_queue *cq)
1001 {
1002 struct golan *golan = ib_get_drvdata(ibdev);
1003 struct golan_completion_queue *golan_cq;
1004 struct golan_cmd_layout *cmd;
1005 struct golan_create_cq_mbox_in_data *in;
1006 struct golan_create_cq_mbox_out *out;
1007 int rc;
1008 unsigned int i;
1009 userptr_t addr;
1010
1011 golan_cq = zalloc(sizeof(*golan_cq));
1012 if (!golan_cq) {
1013 rc = -ENOMEM;
1014 goto err_create_cq;
1015 }
1016 golan_cq->size = sizeof(golan_cq->cqes[0]) * cq->num_cqes;
1017 golan_cq->doorbell_record = malloc_dma(GOLAN_CQ_DB_RECORD_SIZE,
1018 GOLAN_CQ_DB_RECORD_SIZE);
1019 if (!golan_cq->doorbell_record) {
1020 rc = -ENOMEM;
1021 goto err_create_cq_db_alloc;
1022 }
1023
1024 addr = golan_get_page ( &golan->pages );
1025 if (!addr) {
1026 rc = -ENOMEM;
1027 goto err_create_cq_cqe_alloc;
1028 }
1029 golan_cq->cqes = (struct golan_cqe64 *)user_to_virt(addr, 0);
1030
1031 /* Set CQEs ownership bit to HW ownership */
1032 for (i = 0; i < cq->num_cqes; ++i) {
1033 golan_cq->cqes[i].op_own = ((GOLAN_CQE_OPCODE_NOT_VALID <<
1034 GOLAN_CQE_OPCODE_BIT) |
1035 GOLAN_CQE_HW_OWNERSHIP);
1036 }
1037
1038 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_CREATE_CQ, 0x0,
1039 GEN_MBOX, NO_MBOX,
1040 sizeof(struct golan_create_cq_mbox_in) + GOLAN_PAS_SIZE,
1041 sizeof(struct golan_create_cq_mbox_out));
1042
1043 in = (struct golan_create_cq_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
1044
1045 /* Fill the physical address of the page */
1046 in->pas[0] = USR_2_BE64_BUS(addr);
1047 in->ctx.cqe_sz_flags = GOLAN_CQE_SIZE_64 << 5;
1048 in->ctx.log_sz_usr_page = cpu_to_be32(((ilog2(cq->num_cqes)) << 24) | golan->uar.index);
1049 in->ctx.c_eqn = cpu_to_be16(golan->eq.eqn);
1050 in->ctx.db_record_addr = VIRT_2_BE64_BUS(golan_cq->doorbell_record);
1051
1052 rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
1053 GOLAN_CHECK_RC_AND_CMD_STATUS( err_create_cq_cmd );
1054 out = (struct golan_create_cq_mbox_out *) ( cmd->out );
1055
1056 cq->cqn = (be32_to_cpu(out->cqn) & 0xffffff);
1057
1058 ib_cq_set_drvdata(cq, golan_cq);
1059
1060 DBGC( golan , "%s CQ created successfully (CQN = 0x%lx)\n", __FUNCTION__, cq->cqn);
1061 return 0;
1062
1063 err_create_cq_cmd:
1064 ufree(virt_to_user(golan_cq->cqes));
1065 err_create_cq_cqe_alloc:
1066 free_dma(golan_cq->doorbell_record, GOLAN_CQ_DB_RECORD_SIZE);
1067 err_create_cq_db_alloc:
1068 free ( golan_cq );
1069 err_create_cq:
1070 DBGC (golan ,"%s out rc = 0x%x\n", __FUNCTION__, rc);
1071 return rc;
1072 }
1073
1074 /**
1075 * Destroy completion queue
1076 *
1077 * @v ibdev Infiniband device
1078 * @v cq Completion queue
1079 */
1080 static void golan_destroy_cq(struct ib_device *ibdev,
1081 struct ib_completion_queue *cq)
1082 {
1083 struct golan *golan = ib_get_drvdata(ibdev);
1084 struct golan_completion_queue *golan_cq = ib_cq_get_drvdata(cq);
1085 struct golan_cmd_layout *cmd;
1086 uint32_t cqn = cq->cqn;
1087 int rc;
1088
1089 DBGC (golan, "%s in\n", __FUNCTION__);
1090
1091 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DESTROY_CQ, 0x0,
1092 NO_MBOX, NO_MBOX,
1093 sizeof(struct golan_destroy_cq_mbox_in),
1094 sizeof(struct golan_destroy_cq_mbox_out));
1095 ((struct golan_destroy_cq_mbox_in *)(cmd->in))->cqn = cpu_to_be32(cqn);
1096 rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
1097 GOLAN_PRINT_RC_AND_CMD_STATUS;
1098 cq->cqn = 0;
1099
1100 ib_cq_set_drvdata(cq, NULL);
1101 ufree(virt_to_user(golan_cq->cqes));
1102 free_dma(golan_cq->doorbell_record, GOLAN_CQ_DB_RECORD_SIZE);
1103 free(golan_cq);
1104
1105 DBGC (golan, "%s CQ number 0x%x was destroyed\n", __FUNCTION__, cqn);
1106 }
1107
1108 static void golan_cq_clean(struct ib_completion_queue *cq)
1109 {
1110 ib_poll_cq(cq->ibdev, cq);
1111 }
1112
1113 static int golan_qp_type_to_st(enum ib_queue_pair_type type)
1114 {
1115 int qpt = type;
1116
1117 switch (qpt) {
1118 case IB_QPT_RC:
1119 return GOLAN_QP_ST_RC;
1120 case IB_QPT_UD:
1121 return GOLAN_QP_ST_UD;
1122 case IB_QPT_SMI:
1123 return GOLAN_QP_ST_QP0;
1124 case IB_QPT_GSI:
1125 return GOLAN_QP_ST_QP1;
1126 case IB_QPT_ETH:
1127 default:
1128 return -EINVAL;
1129 }
1130 }
1131 #if 0
1132 static int golan_is_special_qp(enum ib_queue_pair_type type)
1133 {
1134 return (type == IB_QPT_GSI || type == IB_QPT_SMI);
1135 }
1136 #endif
1137 static int golan_create_qp_aux(struct ib_device *ibdev,
1138 struct ib_queue_pair *qp,
1139 int *qpn)
1140 {
1141 struct golan *golan = ib_get_drvdata(ibdev);
1142 struct golan_queue_pair *golan_qp;
1143 struct golan_create_qp_mbox_in_data *in;
1144 struct golan_cmd_layout *cmd;
1145 struct golan_wqe_data_seg *data;
1146 struct golan_create_qp_mbox_out *out;
1147 userptr_t addr;
1148 uint32_t wqe_size_in_bytes;
1149 uint32_t max_qp_size_in_wqes;
1150 unsigned int i;
1151 int rc;
1152
1153 golan_qp = zalloc(sizeof(*golan_qp));
1154 if (!golan_qp) {
1155 rc = -ENOMEM;
1156 goto err_create_qp;
1157 }
1158
1159 if ( ( qp->type == IB_QPT_SMI ) || ( qp->type == IB_QPT_GSI ) ||
1160 ( qp->type == IB_QPT_UD ) ) {
1161 golan_qp->rq.grh_size = ( qp->recv.num_wqes *
1162 sizeof ( golan_qp->rq.grh[0] ));
1163 }
1164
1165 /* Calculate receive queue size */
1166 golan_qp->rq.size = qp->recv.num_wqes * GOLAN_RECV_WQE_SIZE;
1167 if (GOLAN_RECV_WQE_SIZE > be16_to_cpu(golan->caps.max_wqe_sz_rq)) {
1168 DBGC (golan ,"%s receive wqe size [%zd] > max wqe size [%d]\n", __FUNCTION__,
1169 GOLAN_RECV_WQE_SIZE, be16_to_cpu(golan->caps.max_wqe_sz_rq));
1170 rc = -EINVAL;
1171 goto err_create_qp_rq_size;
1172 }
1173
1174 wqe_size_in_bytes = sizeof(golan_qp->sq.wqes[0]);
1175 /* Calculate send queue size */
1176 if (wqe_size_in_bytes > be16_to_cpu(golan->caps.max_wqe_sz_sq)) {
1177 DBGC (golan ,"%s send WQE size [%d] > max WQE size [%d]\n", __FUNCTION__,
1178 wqe_size_in_bytes,
1179 be16_to_cpu(golan->caps.max_wqe_sz_sq));
1180 rc = -EINVAL;
1181 goto err_create_qp_sq_wqe_size;
1182 }
1183 golan_qp->sq.size = (qp->send.num_wqes * wqe_size_in_bytes);
1184 max_qp_size_in_wqes = (1 << ((uint32_t)(golan->caps.log_max_qp_sz)));
1185 if (qp->send.num_wqes > max_qp_size_in_wqes) {
1186 DBGC (golan ,"%s send wq size [%d] > max wq size [%d]\n", __FUNCTION__,
1187 golan_qp->sq.size, max_qp_size_in_wqes);
1188 rc = -EINVAL;
1189 goto err_create_qp_sq_size;
1190 }
1191
1192 golan_qp->size = golan_qp->sq.size + golan_qp->rq.size;
1193
1194 /* allocate dma memory for WQEs (1 page is enough) - should change it */
1195 addr = golan_get_page ( &golan->pages );
1196 if (!addr) {
1197 rc = -ENOMEM;
1198 goto err_create_qp_wqe_alloc;
1199 }
1200 golan_qp->wqes = user_to_virt(addr, 0);
1201 golan_qp->rq.wqes = golan_qp->wqes;
1202 golan_qp->sq.wqes = golan_qp->wqes + golan_qp->rq.size;//(union golan_send_wqe *)&
1203 //(((struct golan_recv_wqe_ud *)(golan_qp->wqes))[qp->recv.num_wqes]);
1204
1205 if ( golan_qp->rq.grh_size ) {
1206 golan_qp->rq.grh = ( golan_qp->wqes +
1207 golan_qp->sq.size +
1208 golan_qp->rq.size );
1209 }
1210
1211 /* Invalidate all WQEs */
1212 data = &golan_qp->rq.wqes[0].data[0];
1213 for ( i = 0 ; i < ( golan_qp->rq.size / sizeof ( *data ) ); i++ ){
1214 data->lkey = cpu_to_be32 ( GOLAN_INVALID_LKEY );
1215 data++;
1216 }
1217
1218 golan_qp->doorbell_record = malloc_dma(sizeof(struct golan_qp_db),
1219 sizeof(struct golan_qp_db));
1220 if (!golan_qp->doorbell_record) {
1221 rc = -ENOMEM;
1222 goto err_create_qp_db_alloc;
1223 }
1224 memset(golan_qp->doorbell_record, 0, sizeof(struct golan_qp_db));
1225
1226 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_CREATE_QP, 0x0,
1227 GEN_MBOX, NO_MBOX,
1228 sizeof(struct golan_create_qp_mbox_in) + GOLAN_PAS_SIZE,
1229 sizeof(struct golan_create_qp_mbox_out));
1230
1231 in = (struct golan_create_qp_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
1232
1233 /* Fill the physical address of the page */
1234 in->pas[0] = USR_2_BE64_BUS(addr);
1235 in->ctx.qp_counter_set_usr_page = cpu_to_be32(golan->uar.index);
1236
1237 in->ctx.flags_pd = cpu_to_be32(golan->pdn);
1238 in->ctx.flags = cpu_to_be32((golan_qp_type_to_st(qp->type)
1239 << GOLAN_QP_CTX_ST_BIT) |
1240 (GOLAN_QP_PM_MIGRATED <<
1241 GOLAN_QP_CTX_PM_STATE_BIT));
1242 // cgs set to 0, initialy.
1243 // atomic mode
1244 in->ctx.rq_size_stride = ((ilog2(qp->recv.num_wqes) <<
1245 GOLAN_QP_CTX_RQ_SIZE_BIT) |
1246 (sizeof(golan_qp->rq.wqes[0]) / GOLAN_RECV_WQE_SIZE));
1247 in->ctx.sq_crq_size = cpu_to_be16(ilog2(golan_qp->sq.size / GOLAN_SEND_WQE_BB_SIZE)
1248 << GOLAN_QP_CTX_SQ_SIZE_BIT);
1249 in->ctx.cqn_send = cpu_to_be32(qp->send.cq->cqn);
1250 in->ctx.cqn_recv = cpu_to_be32(qp->recv.cq->cqn);
1251 in->ctx.db_rec_addr = VIRT_2_BE64_BUS(golan_qp->doorbell_record);
1252
1253 rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
1254 GOLAN_CHECK_RC_AND_CMD_STATUS( err_create_qp_cmd );
1255 out = (struct golan_create_qp_mbox_out *)cmd->out;
1256
1257 *qpn = (be32_to_cpu(out->qpn) & 0xffffff);
1258 /*
1259 * Hardware wants QPN written in big-endian order (after
1260 * shifting) for send doorbell. Precompute this value to save
1261 * a little bit when posting sends.
1262 */
1263 golan_qp->doorbell_qpn = cpu_to_be32(*qpn << 8);
1264 golan_qp->state = GOLAN_IB_QPS_RESET;
1265
1266 ib_qp_set_drvdata(qp, golan_qp);
1267
1268 return 0;
1269
1270 err_create_qp_cmd:
1271 free_dma(golan_qp->doorbell_record, sizeof(struct golan_qp_db));
1272 err_create_qp_db_alloc:
1273 ufree((userptr_t)golan_qp->wqes);
1274 err_create_qp_wqe_alloc:
1275 err_create_qp_sq_size:
1276 err_create_qp_sq_wqe_size:
1277 err_create_qp_rq_size:
1278 free ( golan_qp );
1279 err_create_qp:
1280 return rc;
1281 }
1282
1283 /**
1284 * Create queue pair
1285 *
1286 * @v ibdev Infiniband device
1287 * @v qp Queue pair
1288 * @ret rc Return status code
1289 */
1290 static int golan_create_qp(struct ib_device *ibdev,
1291 struct ib_queue_pair *qp)
1292 {
1293 int rc, qpn = -1;
1294
1295 switch (qp->type) {
1296 case IB_QPT_UD:
1297 case IB_QPT_SMI:
1298 case IB_QPT_GSI:
1299 rc = golan_create_qp_aux(ibdev, qp, &qpn);
1300 if (rc) {
1301 DBG ( "%s Failed to create QP (rc = 0x%x)\n", __FUNCTION__, rc);
1302 return rc;
1303 }
1304 qp->qpn = qpn;
1305
1306 break;
1307 case IB_QPT_ETH:
1308 case IB_QPT_RC:
1309 default:
1310 DBG ( "%s unsupported QP type (0x%x)\n", __FUNCTION__, qp->type);
1311 return -EINVAL;
1312 }
1313
1314 return 0;
1315 }
1316
1317 static int golan_modify_qp_rst_to_init(struct ib_device *ibdev,
1318 struct ib_queue_pair *qp __unused,
1319 struct golan_modify_qp_mbox_in_data *in)
1320 {
1321 int rc = 0;
1322
1323 in->ctx.qkey = cpu_to_be32((uint32_t)(qp->qkey));
1324
1325 in->ctx.pri_path.port = ibdev->port;
1326 in->ctx.flags |= cpu_to_be32(GOLAN_QP_PM_MIGRATED << GOLAN_QP_CTX_PM_STATE_BIT);
1327 in->ctx.pri_path.pkey_index = 0; /* default index */
1328 /* QK is 0 */
1329 /* QP cntr set 0 */
1330 return rc;
1331 }
1332
1333 static int golan_modify_qp_init_to_rtr(struct ib_device *ibdev __unused,
1334 struct ib_queue_pair *qp __unused,
1335 struct golan_modify_qp_mbox_in_data *in)
1336 {
1337 int rc = 0;
1338
1339 in->optparam = 0;
1340 return rc;
1341 }
1342
1343 static int golan_modify_qp_rtr_to_rts(struct ib_device *ibdev __unused,
1344 struct ib_queue_pair *qp __unused,
1345 struct golan_modify_qp_mbox_in_data *in __unused)
1346 {
1347 int rc = 0;
1348
1349 in->optparam = 0;
1350 /* In good flow psn in 0 */
1351 return rc;
1352 }
1353
1354 static int golan_modify_qp_to_rst(struct ib_device *ibdev,
1355 struct ib_queue_pair *qp)
1356 {
1357 struct golan *golan = ib_get_drvdata(ibdev);
1358 struct golan_queue_pair *golan_qp = ib_qp_get_drvdata(qp);
1359 struct golan_cmd_layout *cmd;
1360 int rc;
1361
1362 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_2RST_QP, 0x0,
1363 NO_MBOX, NO_MBOX,
1364 sizeof(struct golan_modify_qp_mbox_in),
1365 sizeof(struct golan_modify_qp_mbox_out));
1366 ((struct golan_modify_qp_mbox_in *)(cmd->in))->qpn = cpu_to_be32(qp->qpn);
1367 rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
1368 GOLAN_CHECK_RC_AND_CMD_STATUS( err_modify_qp_2rst_cmd );
1369
1370 golan_qp->state = GOLAN_IB_QPS_RESET;
1371 DBGC( golan , "%s QP number 0x%lx was modified to RESET\n",
1372 __FUNCTION__, qp->qpn);
1373
1374 return 0;
1375
1376 err_modify_qp_2rst_cmd:
1377 DBGC (golan ,"%s Failed to modify QP number 0x%lx (rc = 0x%x)\n",
1378 __FUNCTION__, qp->qpn, rc);
1379 return rc;
1380 }
1381
1382 static int (*golan_modify_qp_methods[])(struct ib_device *ibdev,
1383 struct ib_queue_pair *qp,
1384 struct golan_modify_qp_mbox_in_data *in) = {
1385
1386 [GOLAN_IB_QPS_RESET] = golan_modify_qp_rst_to_init,
1387 [GOLAN_IB_QPS_INIT] = golan_modify_qp_init_to_rtr,
1388 [GOLAN_IB_QPS_RTR] = golan_modify_qp_rtr_to_rts
1389 };
1390
1391 static int golan_modify_qp(struct ib_device *ibdev,
1392 struct ib_queue_pair *qp)
1393 {
1394 struct golan *golan = ib_get_drvdata(ibdev);
1395 struct golan_queue_pair *golan_qp = ib_qp_get_drvdata(qp);
1396 struct golan_modify_qp_mbox_in_data *in;
1397 struct golan_cmd_layout *cmd;
1398 enum golan_ib_qp_state prev_state;
1399 int rc;
1400 int modify_cmd[] = {GOLAN_CMD_OP_RST2INIT_QP,
1401 GOLAN_CMD_OP_INIT2RTR_QP,
1402 GOLAN_CMD_OP_RTR2RTS_QP};
1403
1404 while (golan_qp->state < GOLAN_IB_QPS_RTS) {
1405 prev_state = golan_qp->state;
1406 cmd = write_cmd(golan, DEF_CMD_IDX, modify_cmd[golan_qp->state], 0x0,
1407 GEN_MBOX, NO_MBOX,
1408 sizeof(struct golan_modify_qp_mbox_in),
1409 sizeof(struct golan_modify_qp_mbox_out));
1410
1411 in = (struct golan_modify_qp_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
1412 ((struct golan_modify_qp_mbox_in *)(cmd->in))->qpn = cpu_to_be32(qp->qpn);
1413 rc = golan_modify_qp_methods[golan_qp->state](ibdev, qp, in);
1414 if (rc) {
1415 goto err_modify_qp_fill_inbox;
1416 }
1417 // in->ctx.qp_counter_set_usr_page = cpu_to_be32(golan->uar.index);
1418 rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
1419 GOLAN_CHECK_RC_AND_CMD_STATUS( err_modify_qp_cmd );
1420
1421 ++(golan_qp->state);
1422
1423 DBGC( golan , "%s QP number 0x%lx was modified from %s to %s\n",
1424 __FUNCTION__, qp->qpn, golan_qp_state_as_string[prev_state],
1425 golan_qp_state_as_string[golan_qp->state]);
1426 }
1427
1428 DBGC( golan , "%s QP number 0x%lx is ready to receive/send packets.\n",
1429 __FUNCTION__, qp->qpn);
1430 return 0;
1431
1432 err_modify_qp_cmd:
1433 err_modify_qp_fill_inbox:
1434 DBGC (golan ,"%s Failed to modify QP number 0x%lx (rc = 0x%x)\n",
1435 __FUNCTION__, qp->qpn, rc);
1436 return rc;
1437 }
1438
1439 /**
1440 * Destroy queue pair
1441 *
1442 * @v ibdev Infiniband device
1443 * @v qp Queue pair
1444 */
1445 static void golan_destroy_qp(struct ib_device *ibdev,
1446 struct ib_queue_pair *qp)
1447 {
1448 struct golan *golan = ib_get_drvdata(ibdev);
1449 struct golan_queue_pair *golan_qp = ib_qp_get_drvdata(qp);
1450 struct golan_cmd_layout *cmd;
1451 unsigned long qpn = qp->qpn;
1452 int rc;
1453
1454 DBGC (golan, "%s in\n", __FUNCTION__);
1455
1456 if (golan_qp->state != GOLAN_IB_QPS_RESET) {
1457 if (golan_modify_qp_to_rst(ibdev, qp)) {
1458 DBGC (golan ,"%s Failed to modify QP 0x%lx to RESET\n", __FUNCTION__,
1459 qp->qpn);
1460 }
1461 }
1462
1463 if (qp->recv.cq) {
1464 golan_cq_clean(qp->recv.cq);
1465 }
1466 if (qp->send.cq && (qp->send.cq != qp->recv.cq)) {
1467 golan_cq_clean(qp->send.cq);
1468 }
1469
1470 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DESTROY_QP, 0x0,
1471 NO_MBOX, NO_MBOX,
1472 sizeof(struct golan_destroy_qp_mbox_in),
1473 sizeof(struct golan_destroy_qp_mbox_out));
1474 ((struct golan_destroy_qp_mbox_in *)(cmd->in))->qpn = cpu_to_be32(qpn);
1475 rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
1476 GOLAN_PRINT_RC_AND_CMD_STATUS;
1477 qp->qpn = 0;
1478
1479 ib_qp_set_drvdata(qp, NULL);
1480 free_dma(golan_qp->doorbell_record, sizeof(struct golan_qp_db));
1481 ufree((userptr_t)golan_qp->wqes);
1482 free(golan_qp);
1483
1484 DBGC( golan ,"%s QP 0x%lx was destroyed\n", __FUNCTION__, qpn);
1485 }
1486
1487 /**
1488 * Calculate transmission rate
1489 *
1490 * @v av Address vector
1491 * @ret golan_rate Golan rate
1492 */
1493 static unsigned int golan_rate(enum ib_rate rate) {
1494 return (((rate >= IB_RATE_2_5) && (rate <= IB_RATE_120)) ? (rate + 5) : 0);
1495 }
1496
1497 /**
1498 * Post send work queue entry
1499 *
1500 * @v ibdev Infiniband device
1501 * @v qp Queue pair
1502 * @v av Address vector
1503 * @v iobuf I/O buffer
1504 * @ret rc Return status code
1505 */
1506 static int golan_post_send(struct ib_device *ibdev,
1507 struct ib_queue_pair *qp,
1508 struct ib_address_vector *av,
1509 struct io_buffer *iobuf)
1510 {
1511 struct golan *golan = ib_get_drvdata(ibdev);
1512 struct golan_queue_pair *golan_qp = ib_qp_get_drvdata(qp);
1513 struct golan_send_wqe_ud *wqe = NULL;
1514 struct golan_av *datagram = NULL;
1515 unsigned long wqe_idx_mask;
1516 unsigned long wqe_idx;
1517 struct golan_wqe_data_seg *data = NULL;
1518 struct golan_wqe_ctrl_seg *ctrl = NULL;
1519 // static uint8_t toggle = 0;
1520
1521
1522 wqe_idx_mask = (qp->send.num_wqes - 1);
1523 wqe_idx = (qp->send.next_idx & wqe_idx_mask);
1524 if (qp->send.iobufs[wqe_idx]) {
1525 DBGC (golan ,"%s Send queue of QPN 0x%lx is full\n", __FUNCTION__, qp->qpn);
1526 return -ENOMEM;
1527 }
1528
1529 qp->send.iobufs[wqe_idx] = iobuf;
1530
1531 // change to this
1532 //wqe_size_in_octa_words = golan_qp->sq.wqe_size_in_wqebb >> 4;
1533
1534 wqe = &golan_qp->sq.wqes[wqe_idx].ud;
1535
1536 //CHECK HW OWNERSHIP BIT ???
1537
1538 memset(wqe, 0, sizeof(*wqe));
1539
1540 ctrl = &wqe->ctrl;
1541 ctrl->opmod_idx_opcode = cpu_to_be32(GOLAN_SEND_OPCODE |
1542 ((u32)(golan_qp->sq.next_idx) <<
1543 GOLAN_WQE_CTRL_WQE_IDX_BIT));
1544 ctrl->qpn_ds = cpu_to_be32(GOLAN_SEND_UD_WQE_SIZE >> 4) |
1545 golan_qp->doorbell_qpn;
1546 ctrl->fm_ce_se = 0x8;//10 - 0 - 0
1547 data = &wqe->data;
1548 data->byte_count = cpu_to_be32(iob_len(iobuf));
1549 data->lkey = cpu_to_be32(golan->mkey);
1550 data->addr = VIRT_2_BE64_BUS(iobuf->data);
1551
1552 datagram = &wqe->datagram;
1553 datagram->key.qkey.qkey = cpu_to_be32(av->qkey);
1554 datagram->dqp_dct = cpu_to_be32((1 << 31) | av->qpn);
1555 datagram->stat_rate_sl = ((golan_rate(av->rate) << 4) | av->sl);
1556 datagram->fl_mlid = (ibdev->lid & 0x007f); /* take only the 7 low bits of the LID */
1557 datagram->rlid = cpu_to_be16(av->lid);
1558 datagram->grh_gid_fl = cpu_to_be32(av->gid_present << 30);
1559 memcpy(datagram->rgid, av->gid.bytes, 16 /* sizeof(datagram->rgid) */);
1560
1561 /*
1562 * Make sure that descriptors are written before
1563 * updating doorbell record and ringing the doorbell
1564 */
1565 ++(qp->send.next_idx);
1566 golan_qp->sq.next_idx = (golan_qp->sq.next_idx + GOLAN_WQEBBS_PER_SEND_UD_WQE);
1567 golan_qp->doorbell_record->send_db = cpu_to_be16(golan_qp->sq.next_idx);
1568 wmb();
1569 writeq(*((__be64 *)ctrl), golan->uar.virt + 0x800);// +
1570 // ((toggle++ & 0x1) ? 0x100 : 0x0));
1571 return 0;
1572 }
1573
1574 /**
1575 * Post receive work queue entry
1576 *
1577 * @v ibdev Infiniband device
1578 * @v qp Queue pair
1579 * @v iobuf I/O buffer
1580 * @ret rc Return status code
1581 */
1582 static int golan_post_recv(struct ib_device *ibdev,
1583 struct ib_queue_pair *qp,
1584 struct io_buffer *iobuf)
1585 {
1586 struct golan *golan = ib_get_drvdata(ibdev);
1587 struct golan_queue_pair *golan_qp = ib_qp_get_drvdata(qp);
1588 struct ib_work_queue *wq = &qp->recv;
1589 struct golan_recv_wqe_ud *wqe;
1590 struct ib_global_route_header *grh;
1591 struct golan_wqe_data_seg *data;
1592 unsigned int wqe_idx_mask;
1593
1594 /* Allocate work queue entry */
1595 wqe_idx_mask = (wq->num_wqes - 1);
1596 if (wq->iobufs[wq->next_idx & wqe_idx_mask]) {
1597 DBGC (golan ,"%s Receive queue of QPN 0x%lx is full\n", __FUNCTION__, qp->qpn);
1598 return -ENOMEM;
1599 }
1600
1601 wq->iobufs[wq->next_idx & wqe_idx_mask] = iobuf;
1602 wqe = & golan_qp->rq.wqes[wq->next_idx & wqe_idx_mask];
1603
1604 memset(wqe, 0, sizeof(*wqe));
1605 data = &wqe->data[0];
1606 if ( golan_qp->rq.grh ) {
1607 grh = &golan_qp->rq.grh[wq->next_idx & wqe_idx_mask];
1608 data->byte_count = cpu_to_be32 ( sizeof ( *grh ) );
1609 data->lkey = cpu_to_be32 ( golan->mkey );
1610 data->addr = VIRT_2_BE64_BUS ( grh );
1611 data++;
1612 }
1613
1614 data->byte_count = cpu_to_be32(iob_tailroom(iobuf));
1615 data->lkey = cpu_to_be32(golan->mkey);
1616 data->addr = VIRT_2_BE64_BUS(iobuf->data);
1617
1618 ++wq->next_idx;
1619
1620 /*
1621 * Make sure that descriptors are written before
1622 * updating doorbell record and ringing the doorbell
1623 */
1624 wmb();
1625 golan_qp->doorbell_record->recv_db = cpu_to_be16(qp->recv.next_idx & 0xffff);
1626
1627 return 0;
1628 }
1629
1630 static int golan_query_vport_context ( struct ib_device *ibdev ) {
1631 struct golan *golan = ib_get_drvdata ( ibdev );
1632 struct golan_cmd_layout *cmd;
1633 struct golan_query_hca_vport_context_data *context_data;
1634 int rc;
1635
1636 cmd = write_cmd ( golan, DEF_CMD_IDX, GOLAN_CMD_OP_QUERY_HCA_VPORT_CONTEXT,
1637 0x0, GEN_MBOX, GEN_MBOX,
1638 sizeof(struct golan_query_hca_vport_context_inbox),
1639 sizeof(struct golan_query_hca_vport_context_outbox) );
1640
1641 ((struct golan_query_hca_vport_context_inbox *)(cmd->in))->port_num = (u8)ibdev->port;
1642
1643 rc = send_command_and_wait ( golan, DEF_CMD_IDX, GEN_MBOX, GEN_MBOX, __FUNCTION__ );
1644 GOLAN_CHECK_RC_AND_CMD_STATUS( err_query_vport_context_cmd );
1645
1646 context_data = (struct golan_query_hca_vport_context_data *)( GET_OUTBOX ( golan, GEN_MBOX ) );
1647
1648 ibdev->node_guid.dwords[0] = context_data->node_guid[0];
1649 ibdev->node_guid.dwords[1] = context_data->node_guid[1];
1650 ibdev->lid = be16_to_cpu( context_data->lid );
1651 ibdev->sm_lid = be16_to_cpu( context_data->sm_lid );
1652 ibdev->sm_sl = context_data->sm_sl;
1653 ibdev->port_state = context_data->port_state;
1654
1655 return 0;
1656 err_query_vport_context_cmd:
1657 DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
1658 return rc;
1659 }
1660
1661
1662 static int golan_query_vport_gid ( struct ib_device *ibdev ) {
1663 struct golan *golan = ib_get_drvdata( ibdev );
1664 struct golan_cmd_layout *cmd;
1665 union ib_gid *ib_gid;
1666 int rc;
1667
1668 cmd = write_cmd( golan, DEF_CMD_IDX, GOLAN_CMD_OP_QUERY_HCA_VPORT_GID,
1669 0x0, GEN_MBOX, GEN_MBOX,
1670 sizeof(struct golan_query_hca_vport_gid_inbox),
1671 sizeof(struct golan_query_hca_vport_gid_outbox) );
1672
1673 ((struct golan_query_hca_vport_gid_inbox *)(cmd->in))->port_num = (u8)ibdev->port;
1674 ((struct golan_query_hca_vport_gid_inbox *)(cmd->in))->gid_index = 0;
1675 rc = send_command_and_wait ( golan, DEF_CMD_IDX, GEN_MBOX, GEN_MBOX, __FUNCTION__ );
1676 GOLAN_CHECK_RC_AND_CMD_STATUS( err_query_vport_gid_cmd );
1677
1678 ib_gid = (union ib_gid *)( GET_OUTBOX ( golan, GEN_MBOX ) );
1679
1680 memcpy ( &ibdev->gid, ib_gid, sizeof(ibdev->gid) );
1681
1682 return 0;
1683 err_query_vport_gid_cmd:
1684 DBGC ( golan, "%s [%d] out\n", __FUNCTION__, rc);
1685 return rc;
1686 }
1687
1688 static int golan_query_vport_pkey ( struct ib_device *ibdev ) {
1689 struct golan *golan = ib_get_drvdata ( ibdev );
1690 struct golan_cmd_layout *cmd;
1691 //struct golan_query_hca_vport_pkey_data *pkey_table;
1692 int pkey_table_size_in_entries = (1 << (7 + golan->caps.pkey_table_size));
1693 int rc;
1694
1695 cmd = write_cmd ( golan, DEF_CMD_IDX, GOLAN_CMD_OP_QUERY_HCA_VPORT_PKEY,
1696 0x0, GEN_MBOX, GEN_MBOX,
1697 sizeof(struct golan_query_hca_vport_pkey_inbox),
1698 sizeof(struct golan_outbox_hdr) + 8 +
1699 sizeof(struct golan_query_hca_vport_pkey_data) * pkey_table_size_in_entries );
1700
1701 ((struct golan_query_hca_vport_pkey_inbox *)(cmd->in))->port_num = (u8)ibdev->port;
1702 ((struct golan_query_hca_vport_pkey_inbox *)(cmd->in))->pkey_index = 0xffff;
1703 rc = send_command_and_wait ( golan, DEF_CMD_IDX, GEN_MBOX, GEN_MBOX, __FUNCTION__ );
1704 GOLAN_CHECK_RC_AND_CMD_STATUS( err_query_vport_pkey_cmd );
1705
1706 //pkey_table = (struct golan_query_hca_vport_pkey_data *)( GET_OUTBOX ( golan, GEN_MBOX ) );
1707
1708 return 0;
1709 err_query_vport_pkey_cmd:
1710 DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
1711 return rc;
1712 }
1713
1714 static int golan_get_ib_info ( struct ib_device *ibdev ) {
1715 int rc;
1716
1717 rc = golan_query_vport_context ( ibdev );
1718 if ( rc != 0 ) {
1719 DBG ( "golan_get_ib_info: golan_query_vport_context Failed (rc = %d)\n",rc );
1720 goto err_query_vport_context;
1721 }
1722
1723 rc = golan_query_vport_gid ( ibdev );
1724 if ( rc != 0 ) {
1725 DBG ( "golan_get_ib_info: golan_query_vport_gid Failed (rc = %d)\n",rc );
1726 goto err_query_vport_gid;
1727 }
1728
1729 rc = golan_query_vport_pkey ( ibdev );
1730 if ( rc != 0 ) {
1731 DBG ( "golan_get_ib_info: golan_query_vport_pkey Failed (rc = %d)\n",rc );
1732 goto err_query_vport_pkey;
1733 }
1734 return rc;
1735 err_query_vport_pkey:
1736 err_query_vport_gid:
1737 err_query_vport_context:
1738 DBG ( "%s [%d] out\n", __FUNCTION__, rc);
1739 return rc;
1740 }
1741
1742 static int golan_complete(struct ib_device *ibdev,
1743 struct ib_completion_queue *cq,
1744 struct golan_cqe64 *cqe64)
1745 {
1746 struct golan *golan = ib_get_drvdata(ibdev);
1747 struct ib_work_queue *wq;
1748 struct golan_queue_pair *golan_qp;
1749 struct ib_queue_pair *qp;
1750 struct io_buffer *iobuf = NULL;
1751 struct ib_address_vector recv_dest;
1752 struct ib_address_vector recv_source;
1753 struct ib_global_route_header *grh;
1754 struct golan_err_cqe *err_cqe64;
1755 int gid_present, idx;
1756 u16 wqe_ctr;
1757 uint8_t opcode;
1758 static int error_state;
1759 uint32_t qpn = be32_to_cpu(cqe64->sop_drop_qpn) & 0xffffff;
1760 int is_send = 0;
1761 size_t len;
1762
1763 opcode = cqe64->op_own >> GOLAN_CQE_OPCODE_BIT;
1764 DBGC2( golan , "%s completion with opcode 0x%x\n", __FUNCTION__, opcode);
1765
1766 if (opcode == GOLAN_CQE_REQ || opcode == GOLAN_CQE_REQ_ERR) {
1767 is_send = 1;
1768 } else {
1769 is_send = 0;
1770 }
1771 if (opcode == GOLAN_CQE_REQ_ERR || opcode == GOLAN_CQE_RESP_ERR) {
1772 err_cqe64 = (struct golan_err_cqe *)cqe64;
1773 int i = 0;
1774 if (!error_state++) {
1775 DBGC (golan ,"\n");
1776 for ( i = 0 ; i < 16 ; i += 2 ) {
1777 DBGC (golan ,"%x %x\n",
1778 be32_to_cpu(((uint32_t *)(err_cqe64))[i]),
1779 be32_to_cpu(((uint32_t *)(err_cqe64))[i + 1]));
1780 }
1781 DBGC (golan ,"CQE with error: Syndrome(0x%x), VendorSynd(0x%x), HW_SYN(0x%x)\n",
1782 err_cqe64->syndrome, err_cqe64->vendor_err_synd,
1783 err_cqe64->hw_syndrom);
1784 }
1785 }
1786 /* Identify work queue */
1787 wq = ib_find_wq(cq, qpn, is_send);
1788 if (!wq) {
1789 DBGC (golan ,"%s unknown %s QPN 0x%x in CQN 0x%lx\n",
1790 __FUNCTION__, (is_send ? "send" : "recv"), qpn, cq->cqn);
1791 return -EINVAL;
1792 }
1793
1794 qp = wq->qp;
1795 golan_qp = ib_qp_get_drvdata ( qp );
1796
1797 wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
1798 if (is_send) {
1799 wqe_ctr &= ((GOLAN_WQEBBS_PER_SEND_UD_WQE * wq->num_wqes) - 1);
1800 idx = wqe_ctr / GOLAN_WQEBBS_PER_SEND_UD_WQE;
1801 } else {
1802 idx = wqe_ctr & (wq->num_wqes - 1);
1803 }
1804
1805 iobuf = wq->iobufs[idx];
1806 if (!iobuf) {
1807 DBGC (golan ,"%s IO Buffer 0x%x not found in QPN 0x%x\n",
1808 __FUNCTION__, idx, qpn);
1809 return -EINVAL;
1810 }
1811 wq->iobufs[idx] = NULL;
1812
1813 if (is_send) {
1814 ib_complete_send(ibdev, qp, iobuf, (opcode == GOLAN_CQE_REQ_ERR));
1815 } else {
1816 len = be32_to_cpu(cqe64->byte_cnt);
1817 memset(&recv_dest, 0, sizeof(recv_dest));
1818 recv_dest.qpn = qpn;
1819 /* Construct address vector */
1820 memset(&recv_source, 0, sizeof(recv_source));
1821 switch (qp->type) {
1822 case IB_QPT_SMI:
1823 case IB_QPT_GSI:
1824 case IB_QPT_UD:
1825 /* Locate corresponding GRH */
1826 assert ( golan_qp->rq.grh != NULL );
1827 grh = &golan_qp->rq.grh[ idx ];
1828
1829 recv_source.qpn = be32_to_cpu(cqe64->flags_rqpn) & 0xffffff;
1830 recv_source.lid = be16_to_cpu(cqe64->slid);
1831 recv_source.sl = (be32_to_cpu(cqe64->flags_rqpn) >> 24) & 0xf;
1832 gid_present = (be32_to_cpu(cqe64->flags_rqpn) >> 28) & 3;
1833 if (!gid_present) {
1834 recv_dest.gid_present = recv_source.gid_present = 0;
1835 } else {
1836 recv_dest.gid_present = recv_source.gid_present = 1;
1837 //if (recv_source.gid_present == 0x1) {
1838 memcpy(&recv_source.gid, &grh->sgid, sizeof(recv_source.gid));
1839 memcpy(&recv_dest.gid, &grh->dgid, sizeof(recv_dest.gid));
1840 //} else { // recv_source.gid_present = 0x3
1841 /* GRH is located in the upper 64 byte of the CQE128
1842 * currently not supported */
1843 //;
1844 //}
1845 }
1846 len -= sizeof ( *grh );
1847 break;
1848 case IB_QPT_RC:
1849 case IB_QPT_ETH:
1850 default:
1851 DBGC (golan ,"%s Unsupported QP type (0x%x)\n", __FUNCTION__, qp->type);
1852 return -EINVAL;
1853 }
1854 assert(len <= iob_tailroom(iobuf));
1855 iob_put(iobuf, len);
1856 ib_complete_recv(ibdev, qp, &recv_dest, &recv_source, iobuf, (opcode == GOLAN_CQE_RESP_ERR));
1857 }
1858 return 0;
1859 }
1860
1861 static int golan_is_hw_ownership(struct ib_completion_queue *cq,
1862 struct golan_cqe64 *cqe64)
1863 {
1864 return ((cqe64->op_own & GOLAN_CQE_OWNER_MASK) !=
1865 ((cq->next_idx >> ilog2(cq->num_cqes)) & 1));
1866 }
1867 static void golan_poll_cq(struct ib_device *ibdev,
1868 struct ib_completion_queue *cq)
1869 {
1870 unsigned int i;
1871 int rc = 0;
1872 unsigned int cqe_idx_mask;
1873 struct golan_cqe64 *cqe64;
1874 struct golan_completion_queue *golan_cq = ib_cq_get_drvdata(cq);
1875 struct golan *golan = ib_get_drvdata(ibdev);
1876
1877 for (i = 0; i < cq->num_cqes; ++i) {
1878 /* Look for completion entry */
1879 cqe_idx_mask = (cq->num_cqes - 1);
1880 cqe64 = &golan_cq->cqes[cq->next_idx & cqe_idx_mask];
1881 /* temporary valid only for 64 byte CQE */
1882 if (golan_is_hw_ownership(cq, cqe64) ||
1883 ((cqe64->op_own >> GOLAN_CQE_OPCODE_BIT) ==
1884 GOLAN_CQE_OPCODE_NOT_VALID)) {
1885 break; /* HW ownership */
1886 }
1887
1888 DBGC2( golan , "%s CQN 0x%lx [%ld] \n", __FUNCTION__, cq->cqn, cq->next_idx);
1889 /*
1890 * Make sure we read CQ entry contents after we've checked the
1891 * ownership bit. (PRM - 6.5.3.2)
1892 */
1893 rmb();
1894 rc = golan_complete(ibdev, cq, cqe64);
1895 if (rc != 0) {
1896 DBGC (golan ,"%s CQN 0x%lx failed to complete\n", __FUNCTION__, cq->cqn);
1897 }
1898
1899 /* Update completion queue's index */
1900 cq->next_idx++;
1901
1902 /* Update doorbell record */
1903 *(golan_cq->doorbell_record) = cpu_to_be32(cq->next_idx & 0xffffff);
1904 }
1905 }
1906
1907 static const char *golan_eqe_type_str(u8 type)
1908 {
1909 switch (type) {
1910 case GOLAN_EVENT_TYPE_COMP:
1911 return "GOLAN_EVENT_TYPE_COMP";
1912 case GOLAN_EVENT_TYPE_PATH_MIG:
1913 return "GOLAN_EVENT_TYPE_PATH_MIG";
1914 case GOLAN_EVENT_TYPE_COMM_EST:
1915 return "GOLAN_EVENT_TYPE_COMM_EST";
1916 case GOLAN_EVENT_TYPE_SQ_DRAINED:
1917 return "GOLAN_EVENT_TYPE_SQ_DRAINED";
1918 case GOLAN_EVENT_TYPE_SRQ_LAST_WQE:
1919 return "GOLAN_EVENT_TYPE_SRQ_LAST_WQE";
1920 case GOLAN_EVENT_TYPE_SRQ_RQ_LIMIT:
1921 return "GOLAN_EVENT_TYPE_SRQ_RQ_LIMIT";
1922 case GOLAN_EVENT_TYPE_CQ_ERROR:
1923 return "GOLAN_EVENT_TYPE_CQ_ERROR";
1924 case GOLAN_EVENT_TYPE_WQ_CATAS_ERROR:
1925 return "GOLAN_EVENT_TYPE_WQ_CATAS_ERROR";
1926 case GOLAN_EVENT_TYPE_PATH_MIG_FAILED:
1927 return "GOLAN_EVENT_TYPE_PATH_MIG_FAILED";
1928 case GOLAN_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
1929 return "GOLAN_EVENT_TYPE_WQ_INVAL_REQ_ERROR";
1930 case GOLAN_EVENT_TYPE_WQ_ACCESS_ERROR:
1931 return "GOLAN_EVENT_TYPE_WQ_ACCESS_ERROR";
1932 case GOLAN_EVENT_TYPE_SRQ_CATAS_ERROR:
1933 return "GOLAN_EVENT_TYPE_SRQ_CATAS_ERROR";
1934 case GOLAN_EVENT_TYPE_INTERNAL_ERROR:
1935 return "GOLAN_EVENT_TYPE_INTERNAL_ERROR";
1936 case GOLAN_EVENT_TYPE_PORT_CHANGE:
1937 return "GOLAN_EVENT_TYPE_PORT_CHANGE";
1938 case GOLAN_EVENT_TYPE_GPIO_EVENT:
1939 return "GOLAN_EVENT_TYPE_GPIO_EVENT";
1940 case GOLAN_EVENT_TYPE_REMOTE_CONFIG:
1941 return "GOLAN_EVENT_TYPE_REMOTE_CONFIG";
1942 case GOLAN_EVENT_TYPE_DB_BF_CONGESTION:
1943 return "GOLAN_EVENT_TYPE_DB_BF_CONGESTION";
1944 case GOLAN_EVENT_TYPE_STALL_EVENT:
1945 return "GOLAN_EVENT_TYPE_STALL_EVENT";
1946 case GOLAN_EVENT_TYPE_CMD:
1947 return "GOLAN_EVENT_TYPE_CMD";
1948 case GOLAN_EVENT_TYPE_PAGE_REQUEST:
1949 return "GOLAN_EVENT_TYPE_PAGE_REQUEST";
1950 default:
1951 return "Unrecognized event";
1952 }
1953 }
1954
1955 static const char *golan_eqe_port_subtype_str(u8 subtype)
1956 {
1957 switch (subtype) {
1958 case GOLAN_PORT_CHANGE_SUBTYPE_DOWN:
1959 return "GOLAN_PORT_CHANGE_SUBTYPE_DOWN";
1960 case GOLAN_PORT_CHANGE_SUBTYPE_ACTIVE:
1961 return "GOLAN_PORT_CHANGE_SUBTYPE_ACTIVE";
1962 case GOLAN_PORT_CHANGE_SUBTYPE_INITIALIZED:
1963 return "GOLAN_PORT_CHANGE_SUBTYPE_INITIALIZED";
1964 case GOLAN_PORT_CHANGE_SUBTYPE_LID:
1965 return "GOLAN_PORT_CHANGE_SUBTYPE_LID";
1966 case GOLAN_PORT_CHANGE_SUBTYPE_PKEY:
1967 return "GOLAN_PORT_CHANGE_SUBTYPE_PKEY";
1968 case GOLAN_PORT_CHANGE_SUBTYPE_GUID:
1969 return "GOLAN_PORT_CHANGE_SUBTYPE_GUID";
1970 case GOLAN_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
1971 return "GOLAN_PORT_CHANGE_SUBTYPE_CLIENT_REREG";
1972 default:
1973 return "Unrecognized event";
1974 }
1975 }
1976
1977 /**
1978 * Update Infiniband parameters using Commands
1979 *
1980 * @v ibdev Infiniband device
1981 * @ret rc Return status code
1982 */
1983 static int golan_ib_update ( struct ib_device *ibdev ) {
1984 int rc;
1985
1986 /* Get IB parameters */
1987 if ( ( rc = golan_get_ib_info ( ibdev ) ) != 0 )
1988 return rc;
1989
1990 /* Notify Infiniband core of potential link state change */
1991 ib_link_state_changed ( ibdev );
1992
1993 return 0;
1994 }
1995
1996 static inline void golan_handle_port_event(struct golan *golan, struct golan_eqe *eqe)
1997 {
1998 struct ib_device *ibdev;
1999 u8 port;
2000
2001 port = (eqe->data.port.port >> 4) & 0xf;
2002 ibdev = golan->ports[port - 1].ibdev;
2003
2004 if ( ! ib_is_open ( ibdev ) )
2005 return;
2006
2007 switch (eqe->sub_type) {
2008 case GOLAN_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
2009 case GOLAN_PORT_CHANGE_SUBTYPE_ACTIVE:
2010 golan_ib_update ( ibdev );
2011 case GOLAN_PORT_CHANGE_SUBTYPE_DOWN:
2012 case GOLAN_PORT_CHANGE_SUBTYPE_LID:
2013 case GOLAN_PORT_CHANGE_SUBTYPE_PKEY:
2014 case GOLAN_PORT_CHANGE_SUBTYPE_GUID:
2015 case GOLAN_PORT_CHANGE_SUBTYPE_INITIALIZED:
2016 DBGC( golan , "%s event %s(%d) (sub event %s(%d))arrived on port %d\n",
2017 __FUNCTION__, golan_eqe_type_str(eqe->type), eqe->type,
2018 golan_eqe_port_subtype_str(eqe->sub_type),
2019 eqe->sub_type, port);
2020 break;
2021 default:
2022 DBGC (golan ,"%s Port event with unrecognized subtype: port %d, sub_type %d\n",
2023 __FUNCTION__, port, eqe->sub_type);
2024 }
2025 }
2026
2027 static struct golan_eqe *golan_next_eqe_sw(struct golan_event_queue *eq)
2028 {
2029 uint32_t entry = (eq->cons_index & (GOLAN_NUM_EQES - 1));
2030 struct golan_eqe *eqe = &(eq->eqes[entry]);
2031 return ((eqe->owner != ((eq->cons_index >> ilog2(GOLAN_NUM_EQES)) & 1)) ? NULL : eqe);
2032 }
2033
2034
2035 /**
2036 * Poll event queue
2037 *
2038 * @v ibdev Infiniband device
2039 */
2040 static void golan_poll_eq(struct ib_device *ibdev)
2041 {
2042 struct golan *golan = ib_get_drvdata(ibdev);
2043 struct golan_event_queue *eq = &(golan->eq);
2044 struct golan_eqe *eqe;
2045 u32 cqn;
2046 int counter = 0;
2047
2048 while ((eqe = golan_next_eqe_sw(eq)) && (counter < GOLAN_NUM_EQES)) {
2049 /*
2050 * Make sure we read EQ entry contents after we've
2051 * checked the ownership bit.
2052 */
2053 rmb();
2054
2055 DBGC( golan , "%s eqn %d, eqe type %s\n", __FUNCTION__, eq->eqn,
2056 golan_eqe_type_str(eqe->type));
2057 switch (eqe->type) {
2058 case GOLAN_EVENT_TYPE_COMP:
2059 /* We dont need to handle completion events since we
2060 * poll all the CQs after polling the EQ */
2061 break;
2062 case GOLAN_EVENT_TYPE_PATH_MIG:
2063 case GOLAN_EVENT_TYPE_COMM_EST:
2064 case GOLAN_EVENT_TYPE_SQ_DRAINED:
2065 case GOLAN_EVENT_TYPE_SRQ_LAST_WQE:
2066 case GOLAN_EVENT_TYPE_WQ_CATAS_ERROR:
2067 case GOLAN_EVENT_TYPE_PATH_MIG_FAILED:
2068 case GOLAN_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
2069 case GOLAN_EVENT_TYPE_WQ_ACCESS_ERROR:
2070 case GOLAN_EVENT_TYPE_SRQ_RQ_LIMIT:
2071 case GOLAN_EVENT_TYPE_SRQ_CATAS_ERROR:
2072 DBGC( golan , "%s event %s(%d) arrived\n", __FUNCTION__,
2073 golan_eqe_type_str(eqe->type), eqe->type);
2074 break;
2075 case GOLAN_EVENT_TYPE_CMD:
2076 // golan_cmd_comp_handler(be32_to_cpu(eqe->data.cmd.vector));
2077 break;
2078 case GOLAN_EVENT_TYPE_PORT_CHANGE:
2079 golan_handle_port_event(golan, eqe);
2080 break;
2081 case GOLAN_EVENT_TYPE_CQ_ERROR:
2082 cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
2083 DBGC (golan ,"CQ error on CQN 0x%x, syndrom 0x%x\n",
2084 cqn, eqe->data.cq_err.syndrome);
2085 // mlx5_cq_event(dev, cqn, eqe->type);
2086 break;
2087 case GOLAN_EVENT_TYPE_PAGE_REQUEST:
2088 {
2089 /* we should check if we get this event while we
2090 * waiting for a command */
2091 u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
2092 s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages);
2093
2094 DBGC (golan ,"%s page request for func 0x%x, napges %d\n",
2095 __FUNCTION__, func_id, npages);
2096 golan_provide_pages(golan, npages, func_id);
2097 }
2098 break;
2099 default:
2100 DBGC (golan ,"%s Unhandled event 0x%x on EQ 0x%x\n", __FUNCTION__,
2101 eqe->type, eq->eqn);
2102 break;
2103 }
2104
2105 ++eq->cons_index;
2106 golan_eq_update_ci(eq, GOLAN_EQ_UNARMED);
2107 ++counter;
2108 }
2109 }
2110
2111 /**
2112 * Attach to multicast group
2113 *
2114 * @v ibdev Infiniband device
2115 * @v qp Queue pair
2116 * @v gid Multicast GID
2117 * @ret rc Return status code
2118 */
2119 static int golan_mcast_attach(struct ib_device *ibdev,
2120 struct ib_queue_pair *qp,
2121 union ib_gid *gid)
2122 {
2123 struct golan *golan = ib_get_drvdata(ibdev);
2124 struct golan_cmd_layout *cmd;
2125 int rc;
2126
2127 if ( qp == NULL ) {
2128 DBGC( golan, "%s: Invalid pointer, could not attach QPN to MCG\n",
2129 __FUNCTION__ );
2130 return -EFAULT;
2131 }
2132
2133 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_ATTACH_TO_MCG, 0x0,
2134 GEN_MBOX, NO_MBOX,
2135 sizeof(struct golan_attach_mcg_mbox_in),
2136 sizeof(struct golan_attach_mcg_mbox_out));
2137 ((struct golan_attach_mcg_mbox_in *)(cmd->in))->qpn = cpu_to_be32(qp->qpn);
2138
2139 memcpy(GET_INBOX(golan, GEN_MBOX), gid, sizeof(*gid));
2140
2141 rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
2142 GOLAN_CHECK_RC_AND_CMD_STATUS( err_attach_to_mcg_cmd );
2143
2144 DBGC( golan , "%s: QPN 0x%lx was attached to MCG\n", __FUNCTION__, qp->qpn);
2145 return 0;
2146 err_attach_to_mcg_cmd:
2147 DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
2148 return rc;
2149 }
2150
2151 /**
2152 * Detach from multicast group
2153 *
2154 * @v ibdev Infiniband device
2155 * @v qp Queue pair
2156 * @v gid Multicast GID
2157 * @ret rc Return status code
2158 */
2159 static void golan_mcast_detach(struct ib_device *ibdev,
2160 struct ib_queue_pair *qp,
2161 union ib_gid *gid)
2162 {
2163 struct golan *golan = ib_get_drvdata(ibdev);
2164 struct golan_cmd_layout *cmd;
2165 int rc;
2166
2167 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DETACH_FROM_MCG, 0x0,
2168 GEN_MBOX, NO_MBOX,
2169 sizeof(struct golan_detach_mcg_mbox_in),
2170 sizeof(struct golan_detach_mcg_mbox_out));
2171 ((struct golan_detach_mcg_mbox_in *)(cmd->in))->qpn = cpu_to_be32(qp->qpn);
2172
2173 memcpy(GET_INBOX(golan, GEN_MBOX), gid, sizeof(*gid));
2174
2175 rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
2176 GOLAN_PRINT_RC_AND_CMD_STATUS;
2177
2178 DBGC( golan , "%s: QPN 0x%lx was detached from MCG\n", __FUNCTION__, qp->qpn);
2179 }
2180
2181 /**
2182 * Inform embedded subnet management agent of a received MAD
2183 *
2184 * @v ibdev Infiniband device
2185 * @v mad MAD
2186 * @ret rc Return status code
2187 */
2188 static int golan_inform_sma(struct ib_device *ibdev,
2189 union ib_mad *mad)
2190 {
2191 if (!ibdev || !mad) {
2192 return 1;
2193 }
2194
2195 return 0;
2196 }
2197
2198 static int golan_register_ibdev(struct golan_port *port)
2199 {
2200 struct ib_device *ibdev = port->ibdev;
2201 int rc;
2202
2203 golan_get_ib_info ( ibdev );
2204 /* Register Infiniband device */
2205 if ((rc = register_ibdev(ibdev)) != 0) {
2206 DBG ( "%s port %d could not register IB device: (rc = %d)\n",
2207 __FUNCTION__, ibdev->port, rc);
2208 return rc;
2209 }
2210
2211 port->netdev = ipoib_netdev( ibdev );
2212
2213 return 0;
2214 }
2215
2216 static inline void golan_bring_down(struct golan *golan)
2217 {
2218
2219 DBGC(golan, "%s: start\n", __FUNCTION__);
2220
2221 if (~golan->flags & GOLAN_OPEN) {
2222 DBGC(golan, "%s: end (already closed)\n", __FUNCTION__);
2223 return;
2224 }
2225
2226 golan_destroy_mkey(golan);
2227 golan_dealloc_pd(golan);
2228 golan_destory_eq(golan);
2229 golan_dealloc_uar(golan);
2230 golan_teardown_hca(golan, GOLAN_TEARDOWN_GRACEFUL);
2231 golan_handle_pages(golan, GOLAN_REG_PAGES , GOLAN_PAGES_TAKE);
2232 golan_disable_hca(golan);
2233 golan_cmd_uninit(golan);
2234 golan->flags &= ~GOLAN_OPEN;
2235 DBGC(golan, "%s: end\n", __FUNCTION__);
2236 }
2237
2238 static int golan_set_link_speed ( struct golan *golan ){
2239 mlx_utils utils;
2240 mlx_status status;
2241 int i = 0;
2242
2243 memset ( &utils, 0, sizeof ( utils ) );
2244
2245 status = mlx_utils_init ( &utils, golan->pci );
2246 MLX_CHECK_STATUS ( golan->pci, status, utils_init_err, "mlx_utils_init failed" );
2247
2248 status = mlx_pci_gw_init ( &utils );
2249 MLX_CHECK_STATUS ( golan->pci, status, pci_gw_init_err, "mlx_pci_gw_init failed" );
2250
2251 for ( i = 0; i < golan->caps.num_ports; ++i ) {
2252 status = mlx_set_link_speed( &utils, i + 1, LINK_SPEED_IB, LINK_SPEED_SDR );
2253 MLX_CHECK_STATUS ( golan->pci, status, set_link_speed_err, "mlx_set_link_speed failed" );
2254 }
2255
2256 set_link_speed_err:
2257 mlx_pci_gw_teardown( &utils );
2258 pci_gw_init_err:
2259 utils_init_err:
2260 return status;
2261 }
2262
2263 static inline int golan_bring_up(struct golan *golan)
2264 {
2265 int rc = 0;
2266 DBGC(golan, "%s\n", __FUNCTION__);
2267
2268 if (golan->flags & GOLAN_OPEN)
2269 return 0;
2270
2271 if (( rc = golan_cmd_init(golan) ))
2272 goto out;
2273
2274 if (( rc = golan_core_enable_hca(golan) ))
2275 goto cmd_uninit;
2276
2277 /* Query for need for boot pages */
2278 if (( rc = golan_handle_pages(golan, GOLAN_BOOT_PAGES, GOLAN_PAGES_GIVE) ))
2279 goto disable;
2280
2281 if (( rc = golan_qry_hca_cap(golan) ))
2282 goto pages;
2283
2284 if (( rc = golan_set_hca_cap(golan) ))
2285 goto pages;
2286
2287 if (( rc = golan_handle_pages(golan, GOLAN_INIT_PAGES, GOLAN_PAGES_GIVE) ))
2288 goto pages;
2289
2290 if (( rc = golan_set_link_speed ( golan ) ))
2291 goto pages_teardown;
2292
2293 //Reg Init?
2294 if (( rc = golan_hca_init(golan) ))
2295 goto pages_2;
2296
2297 if (( rc = golan_alloc_uar(golan) ))
2298 goto teardown;
2299
2300 if (( rc = golan_create_eq(golan) ))
2301 goto de_uar;
2302
2303 if (( rc = golan_alloc_pd(golan) ))
2304 goto de_eq;
2305
2306 if (( rc = golan_create_mkey(golan) ))
2307 goto de_pd;
2308
2309 golan->flags |= GOLAN_OPEN;
2310 return 0;
2311
2312 golan_destroy_mkey(golan);
2313 de_pd:
2314 golan_dealloc_pd(golan);
2315 de_eq:
2316 golan_destory_eq(golan);
2317 de_uar:
2318 golan_dealloc_uar(golan);
2319 teardown:
2320 golan_teardown_hca(golan, GOLAN_TEARDOWN_GRACEFUL);
2321 pages_2:
2322 pages_teardown:
2323 golan_handle_pages(golan, GOLAN_INIT_PAGES, GOLAN_PAGES_TAKE);
2324 pages:
2325 golan_handle_pages(golan, GOLAN_BOOT_PAGES, GOLAN_PAGES_TAKE);
2326 disable:
2327 golan_disable_hca(golan);
2328 cmd_uninit:
2329 golan_cmd_uninit(golan);
2330 out:
2331 return rc;
2332 }
2333
2334 /**
2335 * Close Infiniband link
2336 *
2337 * @v ibdev Infiniband device
2338 */
2339 static void golan_ib_close ( struct ib_device *ibdev __unused ) {}
2340
2341 /**
2342 * Initialise Infiniband link
2343 *
2344 * @v ibdev Infiniband device
2345 * @ret rc Return status code
2346 */
2347 static int golan_ib_open ( struct ib_device *ibdev ) {
2348 DBG ( "%s start\n", __FUNCTION__ );
2349
2350 if ( ! ibdev )
2351 return -EINVAL;
2352
2353 golan_ib_update ( ibdev );
2354
2355 DBG ( "%s end\n", __FUNCTION__ );
2356 return 0;
2357 }
2358
2359 /** Golan Infiniband operations */
2360 static struct ib_device_operations golan_ib_operations = {
2361 .create_cq = golan_create_cq,
2362 .destroy_cq = golan_destroy_cq,
2363 .create_qp = golan_create_qp,
2364 .modify_qp = golan_modify_qp,
2365 .destroy_qp = golan_destroy_qp,
2366 .post_send = golan_post_send,
2367 .post_recv = golan_post_recv,
2368 .poll_cq = golan_poll_cq,
2369 .poll_eq = golan_poll_eq,
2370 .open = golan_ib_open,
2371 .close = golan_ib_close,
2372 .mcast_attach = golan_mcast_attach,
2373 .mcast_detach = golan_mcast_detach,
2374 .set_port_info = golan_inform_sma,
2375 .set_pkey_table = golan_inform_sma,
2376 };
2377
2378 static int golan_probe_normal ( struct pci_device *pci ) {
2379 struct golan *golan;
2380 struct ib_device *ibdev;
2381 struct golan_port *port;
2382 int i;
2383 int rc = 0;
2384
2385 golan = golan_alloc();
2386 if ( !golan ) {
2387 rc = -ENOMEM;
2388 goto err_golan_alloc;
2389 }
2390
2391 if ( golan_init_pages( &golan->pages ) ) {
2392 rc = -ENOMEM;
2393 goto err_golan_golan_init_pages;
2394 }
2395
2396 /* Setup PCI bus and HCA BAR */
2397 pci_set_drvdata( pci, golan );
2398 golan->pci = pci;
2399 golan_pci_init( golan );
2400 /* config command queues */
2401 if ( fw_ver_and_cmdif( golan ) ) {
2402 rc = -1;
2403 goto err_fw_ver_cmdif;
2404 }
2405
2406 if ( golan_bring_up( golan ) ) {
2407 DBGC (golan ,"golan bringup failed\n");
2408 rc = -1;
2409 goto err_golan_bringup;
2410 }
2411
2412 /* Allocate Infiniband devices */
2413 for (i = 0; i < golan->caps.num_ports; ++i) {
2414 ibdev = alloc_ibdev( 0 );
2415 if ( !ibdev ) {
2416 rc = -ENOMEM;
2417 goto err_golan_probe_alloc_ibdev;
2418 }
2419 golan->ports[i].ibdev = ibdev;
2420 golan->ports[i].vep_number = 0;
2421 ibdev->op = &golan_ib_operations;
2422 ibdev->dev = &pci->dev;
2423 ibdev->port = (GOLAN_PORT_BASE + i);
2424 ib_set_drvdata( ibdev, golan );
2425 }
2426
2427 /* Register devices */
2428 for ( i = 0; i < golan->caps.num_ports; ++i ) {
2429 port = &golan->ports[i];
2430 if ((rc = golan_register_ibdev ( port ) ) != 0 )
2431 goto err_golan_probe_register_ibdev;
2432 }
2433
2434 return 0;
2435
2436 i = golan->caps.num_ports;
2437 err_golan_probe_register_ibdev:
2438 for ( i-- ; ( signed int ) i >= 0 ; i-- )
2439 unregister_ibdev ( golan->ports[i].ibdev );
2440
2441 i = golan->caps.num_ports;
2442 err_golan_probe_alloc_ibdev:
2443 for ( i-- ; ( signed int ) i >= 0 ; i-- )
2444 ibdev_put ( golan->ports[i].ibdev );
2445
2446 golan_bring_down ( golan );
2447 err_golan_bringup:
2448 err_fw_ver_cmdif:
2449 golan_free_pages( &golan->pages );
2450 err_golan_golan_init_pages:
2451 free ( golan );
2452 err_golan_alloc:
2453 DBGC (golan ,"%s rc = %d\n", __FUNCTION__, rc);
2454 return rc;
2455 }
2456
2457 static void golan_remove_normal ( struct pci_device *pci ) {
2458 struct golan *golan = pci_get_drvdata(pci);
2459 struct golan_port *port;
2460 int i;
2461
2462 DBGC(golan, "%s\n", __FUNCTION__);
2463
2464 for ( i = ( golan->caps.num_ports - 1 ) ; i >= 0 ; i-- ) {
2465 port = &golan->ports[i];
2466 unregister_ibdev ( port->ibdev );
2467 }
2468 for ( i = ( golan->caps.num_ports - 1 ) ; i >= 0 ; i-- ) {
2469 netdev_nullify ( golan->ports[i].netdev );
2470 netdev_put ( golan->ports[i].netdev );
2471 }
2472 for ( i = ( golan->caps.num_ports - 1 ) ; i >= 0 ; i-- ) {
2473 ibdev_put ( golan->ports[i].ibdev );
2474 }
2475
2476 golan_bring_down(golan);
2477
2478 golan_free_pages( &golan->pages );
2479 free(golan);
2480 }
2481
2482 /***************************************************************************
2483 * NODNIC operations
2484 **************************************************************************/
2485 static mlx_status shomron_fill_eth_send_wqe ( struct ib_device *ibdev,
2486 struct ib_queue_pair *qp, struct ib_address_vector *av __unused,
2487 struct io_buffer *iobuf, struct nodnic_send_wqbb *wqbb,
2488 unsigned long wqe_index ) {
2489 mlx_status status = MLX_SUCCESS;
2490 struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
2491 struct shomron_nodnic_eth_send_wqe *eth_wqe = NULL;
2492 struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1];
2493 struct flexboot_nodnic_queue_pair *flexboot_nodnic_qp =
2494 ib_qp_get_drvdata ( qp );
2495 nodnic_qp *nodnic_qp = flexboot_nodnic_qp->nodnic_queue_pair;
2496 struct nodnic_send_ring *send_ring = &nodnic_qp->send;
2497 mlx_uint32 qpn = 0;
2498
2499 eth_wqe = (struct shomron_nodnic_eth_send_wqe *)wqbb;
2500 memset ( ( ( ( void * ) eth_wqe ) ), 0,
2501 ( sizeof ( *eth_wqe ) ) );
2502
2503 status = nodnic_port_get_qpn(&port->port_priv, &send_ring->nodnic_ring,
2504 &qpn);
2505 if ( status != MLX_SUCCESS ) {
2506 DBG("nodnic_port_get_qpn failed\n");
2507 goto err;
2508 }
2509
2510 #define SHOMRON_GENERATE_CQE 0x3
2511 #define SHOMRON_INLINE_HEADERS_SIZE 18
2512 #define SHOMRON_INLINE_HEADERS_OFFSET 32
2513 MLX_FILL_2 ( &eth_wqe->ctrl, 0, opcode, FLEXBOOT_NODNIC_OPCODE_SEND,
2514 wqe_index, wqe_index & 0xFFFF);
2515 MLX_FILL_2 ( &eth_wqe->ctrl, 1, ds, 0x4 , qpn, qpn );
2516 MLX_FILL_1 ( &eth_wqe->ctrl, 2,
2517 ce, SHOMRON_GENERATE_CQE /* generate completion */
2518 );
2519 MLX_FILL_2 ( &eth_wqe->ctrl, 7,
2520 inline_headers1,
2521 cpu_to_be16(*(mlx_uint16 *)iobuf->data),
2522 inline_headers_size, SHOMRON_INLINE_HEADERS_SIZE
2523 );
2524 memcpy((void *)&eth_wqe->ctrl + SHOMRON_INLINE_HEADERS_OFFSET,
2525 iobuf->data + 2, SHOMRON_INLINE_HEADERS_SIZE - 2);
2526 iob_pull(iobuf, SHOMRON_INLINE_HEADERS_SIZE);
2527 MLX_FILL_1 ( &eth_wqe->data[0], 0,
2528 byte_count, iob_len ( iobuf ) );
2529 MLX_FILL_1 ( &eth_wqe->data[0], 1, l_key,
2530 flexboot_nodnic->device_priv.lkey );
2531 MLX_FILL_H ( &eth_wqe->data[0], 2,
2532 local_address_h, virt_to_bus ( iobuf->data ) );
2533 MLX_FILL_1 ( &eth_wqe->data[0], 3,
2534 local_address_l, virt_to_bus ( iobuf->data ) );
2535 err:
2536 return status;
2537 }
2538
2539 static mlx_status shomron_fill_completion( void *cqe, struct cqe_data *cqe_data ) {
2540 union shomronprm_completion_entry *cq_entry;
2541 uint32_t opcode;
2542
2543 cq_entry = (union shomronprm_completion_entry *)cqe;
2544 cqe_data->owner = MLX_GET ( &cq_entry->normal, owner );
2545 opcode = MLX_GET ( &cq_entry->normal, opcode );
2546 #define FLEXBOOT_NODNIC_OPCODE_CQ_SEND 0
2547 #define FLEXBOOT_NODNIC_OPCODE_CQ_RECV 2
2548 #define FLEXBOOT_NODNIC_OPCODE_CQ_SEND_ERR 13
2549 #define FLEXBOOT_NODNIC_OPCODE_CQ_RECV_ERR 14
2550 cqe_data->is_error =
2551 ( opcode >= FLEXBOOT_NODNIC_OPCODE_CQ_RECV_ERR);
2552 if ( cqe_data->is_error ) {
2553 cqe_data->syndrome = MLX_GET ( &cq_entry->error, syndrome );
2554 cqe_data->vendor_err_syndrome =
2555 MLX_GET ( &cq_entry->error, vendor_error_syndrome );
2556 cqe_data->is_send =
2557 (opcode == FLEXBOOT_NODNIC_OPCODE_CQ_SEND_ERR);
2558 } else {
2559 cqe_data->is_send =
2560 (opcode == FLEXBOOT_NODNIC_OPCODE_CQ_SEND);
2561 cqe_data->wqe_counter = MLX_GET ( &cq_entry->normal, wqe_counter );
2562 cqe_data->byte_cnt = MLX_GET ( &cq_entry->normal, byte_cnt );
2563
2564 }
2565 if ( cqe_data->is_send == TRUE )
2566 cqe_data->qpn = MLX_GET ( &cq_entry->normal, qpn );
2567 else
2568 cqe_data->qpn = MLX_GET ( &cq_entry->normal, srqn );
2569
2570 return 0;
2571 }
2572
2573 static mlx_status shomron_cqe_set_owner ( void *cq, unsigned int num_cqes ) {
2574 unsigned int i = 0;
2575 union shomronprm_completion_entry *cq_list;
2576
2577 cq_list = (union shomronprm_completion_entry *)cq;
2578 for ( ; i < num_cqes ; i++ )
2579 MLX_FILL_1 ( &cq_list[i].normal, 15, owner, 1 );
2580 return 0;
2581 }
2582
2583 static mlx_size shomron_get_cqe_size () {
2584 return sizeof ( union shomronprm_completion_entry );
2585 }
2586
2587 struct flexboot_nodnic_callbacks shomron_nodnic_callbacks = {
2588 .get_cqe_size = shomron_get_cqe_size,
2589 .fill_send_wqe[IB_QPT_ETH] = shomron_fill_eth_send_wqe,
2590 .fill_completion = shomron_fill_completion,
2591 .cqe_set_owner = shomron_cqe_set_owner,
2592 .irq = flexboot_nodnic_eth_irq,
2593 };
2594
2595 static int shomron_nodnic_supported = 0;
2596
2597 static int shomron_nodnic_is_supported ( struct pci_device *pci ) {
2598 if ( pci->device == 0x1011 )
2599 return 0;
2600
2601 return flexboot_nodnic_is_supported ( pci );
2602 }
2603 /**************************************************************************/
2604
2605 static int golan_probe ( struct pci_device *pci ) {
2606 int rc = -ENOTSUP;
2607
2608 DBG ( "%s: start\n", __FUNCTION__ );
2609
2610 if ( ! pci ) {
2611 DBG ( "%s: PCI is NULL\n", __FUNCTION__ );
2612 rc = -EINVAL;
2613 goto probe_done;
2614 }
2615
2616 shomron_nodnic_supported = shomron_nodnic_is_supported ( pci );
2617 if ( shomron_nodnic_supported ) {
2618 rc = flexboot_nodnic_probe ( pci, &shomron_nodnic_callbacks, NULL );
2619 if ( rc == 0 ) {
2620 DBG ( "%s: Using NODNIC driver\n", __FUNCTION__ );
2621 goto probe_done;
2622 }
2623 shomron_nodnic_supported = 0;
2624 }
2625
2626 if ( ! shomron_nodnic_supported ) {
2627 DBG ( "%s: Using normal driver\n", __FUNCTION__ );
2628 rc = golan_probe_normal ( pci );
2629 }
2630
2631 probe_done:
2632 DBG ( "%s: rc = %d\n", __FUNCTION__, rc );
2633 return rc;
2634 }
2635
2636 static void golan_remove ( struct pci_device *pci ) {
2637 DBG ( "%s: start\n", __FUNCTION__ );
2638
2639 if ( ! shomron_nodnic_supported ) {
2640 DBG ( "%s: Using normal driver remove\n", __FUNCTION__ );
2641 golan_remove_normal ( pci );
2642 return;
2643 }
2644
2645 DBG ( "%s: Using NODNIC driver remove\n", __FUNCTION__ );
2646
2647 flexboot_nodnic_remove ( pci );
2648
2649 DBG ( "%s: end\n", __FUNCTION__ );
2650 }
2651
2652 static struct pci_device_id golan_nics[] = {
2653 PCI_ROM ( 0x15b3, 0x1011, "ConnectIB", "ConnectIB HCA driver: DevID 4113", 0 ),
2654 PCI_ROM ( 0x15b3, 0x1013, "ConnectX-4", "ConnectX-4 HCA driver, DevID 4115", 0 ),
2655 PCI_ROM ( 0x15b3, 0x1015, "ConnectX-4Lx", "ConnectX-4Lx HCA driver, DevID 4117", 0 ),
2656 };
2657
2658 struct pci_driver golan_driver __pci_driver = {
2659 .ids = golan_nics,
2660 .id_count = (sizeof(golan_nics) / sizeof(golan_nics[0])),
2661 .probe = golan_probe,
2662 .remove = golan_remove,
2663 };