[golan] Set log_max_qp to 1
[ipxe.git] / src / drivers / infiniband / golan.c
1 /*
2 * Copyright (C) 2013-2015 Mellanox Technologies Ltd.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA.
18 */
19
20 FILE_LICENCE ( GPL2_OR_LATER );
21
22 #include <errno.h>
23 #include <strings.h>
24 #include <ipxe/malloc.h>
25 #include <ipxe/umalloc.h>
26 #include <ipxe/infiniband.h>
27 #include <ipxe/ib_smc.h>
28 #include <ipxe/iobuf.h>
29 #include <ipxe/netdevice.h>
30 #include "flexboot_nodnic.h"
31 #include <ipxe/ethernet.h>
32 #include <ipxe/if_ether.h>
33 #include <usr/ifmgmt.h>
34 #include <ipxe/in.h>
35 #include <byteswap.h>
36 #include "mlx_utils/include/public/mlx_pci_gw.h"
37 #include <config/general.h>
38 #include <ipxe/ipoib.h>
39 #include "mlx_nodnic/include/mlx_port.h"
40 #include "nodnic_shomron_prm.h"
41 #include "golan.h"
42 #include "mlx_utils/include/public/mlx_bail.h"
43 #include "mlx_utils/mlx_lib/mlx_link_speed/mlx_link_speed.h"
44
45
46 #define DEVICE_IS_CIB( device ) ( device == 0x1011 )
47
48 /******************************************************************************/
49 /************* Very simple memory management for umalloced pages **************/
50 /******* Temporary solution until full memory management is implemented *******/
51 /******************************************************************************/
52
53 struct golan_page {
54 struct list_head list;
55 userptr_t addr;
56 };
57
58 static void golan_free_fw_areas ( struct golan *golan ) {
59 int i;
60
61 for (i = 0; i < GOLAN_FW_AREAS_NUM; i++) {
62 if ( golan->fw_areas[i].area ) {
63 ufree ( golan->fw_areas[i].area );
64 golan->fw_areas[i].area = UNULL;
65 }
66 }
67 }
68
69 static int golan_init_fw_areas ( struct golan *golan ) {
70 int rc = 0, i = 0;
71
72 if ( ! golan ) {
73 rc = -EINVAL;
74 goto err_golan_init_fw_areas_bad_param;
75 }
76
77 for (i = 0; i < GOLAN_FW_AREAS_NUM; i++)
78 golan->fw_areas[i].area = UNULL;
79
80 return rc;
81
82 err_golan_init_fw_areas_bad_param:
83 return rc;
84 }
85
86 /******************************************************************************/
87
88 const char *golan_qp_state_as_string[] = {
89 "RESET",
90 "INIT",
91 "RTR",
92 "RTS",
93 "SQD",
94 "SQE",
95 "ERR"
96 };
97
98 static inline int golan_check_rc_and_cmd_status ( struct golan_cmd_layout *cmd, int rc ) {
99 struct golan_outbox_hdr *out_hdr = ( struct golan_outbox_hdr * ) ( cmd->out );
100 if ( rc == -EBUSY ) {
101 DBG ( "HCA is busy (rc = -EBUSY)\n" );
102 return rc;
103 } else if ( out_hdr->status ) {
104 DBG("%s status = 0x%x - syndrom = 0x%x\n", __FUNCTION__,
105 out_hdr->status, be32_to_cpu(out_hdr->syndrome));
106 return out_hdr->status;
107 }
108 return 0;
109 }
110
111 #define GOLAN_CHECK_RC_AND_CMD_STATUS(_lable) \
112 do { \
113 if ( ( rc = golan_check_rc_and_cmd_status ( cmd, rc ) ) ) \
114 goto _lable; \
115 } while (0)
116
117 #define GOLAN_PRINT_RC_AND_CMD_STATUS golan_check_rc_and_cmd_status ( cmd, rc )
118
119
120 struct mbox {
121 union {
122 struct golan_cmd_prot_block mblock;
123 u8 data[MAILBOX_STRIDE];
124 __be64 qdata[MAILBOX_STRIDE >> 3];
125 };
126 };
127
128 static inline uint32_t ilog2(uint32_t mem)
129 {
130 return ( fls ( mem ) - 1 );
131 }
132
133 #define CTRL_SIG_SZ (sizeof(mailbox->mblock) - sizeof(mailbox->mblock.bdata) - 2)
134
135 static inline u8 xor8_buf(void *buf, int len)
136 {
137 u8 sum = 0;
138 int i;
139 u8 *ptr = buf;
140
141 for (i = 0; i < len; ++i)
142 sum ^= ptr[i];
143
144 return sum;
145 }
146
147 static inline const char *cmd_status_str(u8 status)
148 {
149 switch (status) {
150 case 0x0: return "OK";
151 case 0x1: return "internal error";
152 case 0x2: return "bad operation";
153 case 0x3: return "bad parameter";
154 case 0x4: return "bad system state";
155 case 0x5: return "bad resource";
156 case 0x6: return "resource busy";
157 case 0x8: return "limits exceeded";
158 case 0x9: return "bad resource state";
159 case 0xa: return "bad index";
160 case 0xf: return "no resources";
161 case 0x50: return "bad input length";
162 case 0x51: return "bad output length";
163 case 0x10: return "bad QP state";
164 case 0x30: return "bad packet (discarded)";
165 case 0x40: return "bad size too many outstanding CQEs";
166 case 0xff: return "Command Timed Out";
167 default: return "unknown status";
168 }
169 }
170
171 static inline uint16_t fw_rev_maj(struct golan *golan)
172 {
173 return be32_to_cpu(readl(&golan->iseg->fw_rev)) & 0xffff;
174 }
175
176 static inline u16 fw_rev_min(struct golan *golan)
177 {
178 return be32_to_cpu(readl(&golan->iseg->fw_rev)) >> 16;
179 }
180
181 static inline u16 fw_rev_sub(struct golan *golan)
182 {
183 return be32_to_cpu(readl(&golan->iseg->cmdif_rev_fw_sub)) & 0xffff;
184 }
185
186 static inline u16 cmdif_rev(struct golan *golan)
187 {
188 return be32_to_cpu(readl(&golan->iseg->cmdif_rev_fw_sub)) >> 16;
189 }
190
191
192 static inline struct golan_cmd_layout *get_cmd( struct golan *golan, int idx )
193 {
194 return golan->cmd.addr + (idx << golan->cmd.log_stride);
195 }
196
197 static inline void golan_calc_sig(struct golan *golan, uint32_t cmd_idx,
198 uint32_t inbox_idx, uint32_t outbox_idx)
199 {
200 struct golan_cmd_layout *cmd = get_cmd(golan, cmd_idx);
201 struct mbox *mailbox = NULL;
202
203 if (inbox_idx != NO_MBOX) {
204 mailbox = GET_INBOX(golan, inbox_idx);
205 mailbox->mblock.token = cmd->token;
206 mailbox->mblock.ctrl_sig = ~xor8_buf(mailbox->mblock.rsvd0,
207 CTRL_SIG_SZ);
208 }
209 if (outbox_idx != NO_MBOX) {
210 mailbox = GET_OUTBOX(golan, outbox_idx);
211 mailbox->mblock.token = cmd->token;
212 mailbox->mblock.ctrl_sig = ~xor8_buf(mailbox->mblock.rsvd0,
213 CTRL_SIG_SZ);
214 }
215 cmd->sig = ~xor8_buf(cmd, sizeof(*cmd));
216 }
217
218 static inline void show_out_status(uint32_t *out)
219 {
220 DBG("%x\n", be32_to_cpu(out[0]));
221 DBG("%x\n", be32_to_cpu(out[1]));
222 DBG("%x\n", be32_to_cpu(out[2]));
223 DBG("%x\n", be32_to_cpu(out[3]));
224 }
225 /**
226 * Check if CMD has finished.
227 */
228 static inline uint32_t is_command_finished( struct golan *golan, int idx)
229 {
230 wmb();
231 return !(get_cmd( golan , idx )->status_own & CMD_OWNER_HW);
232 }
233
234 /**
235 * Wait for Golan command completion
236 *
237 * @v golan Golan device
238 * @ret rc Return status code
239 */
240 static inline int golan_cmd_wait(struct golan *golan, int idx, const char *command)
241 {
242 unsigned int wait;
243 int rc = -EBUSY;
244
245 for ( wait = GOLAN_HCR_MAX_WAIT_MS ; wait ; --wait ) {
246 if (is_command_finished(golan, idx)) {
247 rc = CMD_STATUS(golan, idx);
248 rmb();
249 break;
250 } else {
251 mdelay ( 1 );
252 }
253 }
254 if (rc) {
255 DBGC (golan ,"[%s]RC is %s[%x]\n", command, cmd_status_str(rc), rc);
256 }
257
258 golan->cmd_bm &= ~(1 << idx);
259 return rc;
260 }
261
262 /**
263 * Notify the HW that commands are ready
264 */
265 static inline void send_command(struct golan *golan)
266 {
267 wmb(); //Make sure the command is visible in "memory".
268 writel(cpu_to_be32(golan->cmd_bm) , &golan->iseg->cmd_dbell);
269 }
270
271 static inline int send_command_and_wait(struct golan *golan, uint32_t cmd_idx,
272 uint32_t inbox_idx, uint32_t outbox_idx, const char *command)
273 {
274 golan_calc_sig(golan, cmd_idx, inbox_idx, outbox_idx);
275 send_command(golan);
276 return golan_cmd_wait(golan, cmd_idx, command);
277 }
278
279 /**
280 * Prepare a FW command,
281 * In - comamnd idx (Must be valid)
282 * writes the command parameters.
283 */
284 static inline struct golan_cmd_layout *write_cmd(struct golan *golan, int idx,
285 uint16_t opcode, uint16_t opmod,
286 uint16_t inbox_idx,
287 uint16_t outbox_idx, uint16_t inlen,
288 uint16_t outlen)
289 {
290 struct golan_cmd_layout *cmd = get_cmd(golan , idx);
291 struct golan_inbox_hdr *hdr = (struct golan_inbox_hdr *)cmd->in;
292 static uint8_t token;
293
294 memset(cmd, 0, sizeof(*cmd));
295
296 cmd->type = GOLAN_PCI_CMD_XPORT;
297 cmd->status_own = CMD_OWNER_HW;
298 cmd->outlen = cpu_to_be32(outlen);
299 cmd->inlen = cpu_to_be32(inlen);
300 hdr->opcode = cpu_to_be16(opcode);
301 hdr->opmod = cpu_to_be16(opmod);
302
303 if (inbox_idx != NO_MBOX) {
304 memset(GET_INBOX(golan, inbox_idx), 0, MAILBOX_SIZE);
305 cmd->in_ptr = VIRT_2_BE64_BUS(GET_INBOX(golan, inbox_idx));
306 cmd->token = ++token;
307 }
308 if (outbox_idx != NO_MBOX) {
309 memset(GET_OUTBOX(golan, outbox_idx), 0, MAILBOX_SIZE);
310 cmd->out_ptr = VIRT_2_BE64_BUS(GET_OUTBOX(golan, outbox_idx));
311 }
312
313 golan->cmd_bm |= 1 << idx;
314
315 assert ( cmd != NULL );
316 return cmd;
317 }
318
319 static inline int golan_core_enable_hca(struct golan *golan)
320 {
321 struct golan_cmd_layout *cmd;
322 int rc = 0;
323
324 DBGC(golan, "%s\n", __FUNCTION__);
325
326 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_ENABLE_HCA, 0x0,
327 NO_MBOX, NO_MBOX,
328 sizeof(struct golan_enable_hca_mbox_in),
329 sizeof(struct golan_enable_hca_mbox_out));
330
331 rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
332 GOLAN_PRINT_RC_AND_CMD_STATUS;
333 return rc;
334 }
335
336 static inline void golan_disable_hca(struct golan *golan)
337 {
338 struct golan_cmd_layout *cmd;
339 int rc;
340
341 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DISABLE_HCA, 0x0,
342 NO_MBOX, NO_MBOX,
343 sizeof(struct golan_disable_hca_mbox_in),
344 sizeof(struct golan_disable_hca_mbox_out));
345 rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
346 GOLAN_PRINT_RC_AND_CMD_STATUS;
347 }
348
349 static inline int golan_set_hca_cap(struct golan *golan)
350 {
351 struct golan_cmd_layout *cmd;
352 int rc;
353
354 DBGC(golan, "%s\n", __FUNCTION__);
355
356 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_SET_HCA_CAP, 0x0,
357 GEN_MBOX, NO_MBOX,
358 sizeof(struct golan_cmd_set_hca_cap_mbox_in),
359 sizeof(struct golan_cmd_set_hca_cap_mbox_out));
360
361 golan->caps.flags &= ~GOLAN_DEV_CAP_FLAG_CMDIF_CSUM;
362 DBGC( golan , "%s caps.uar_sz = %d\n", __FUNCTION__, golan->caps.uar_sz);
363 DBGC( golan , "%s caps.log_pg_sz = %d\n", __FUNCTION__, golan->caps.log_pg_sz);
364 DBGC( golan , "%s caps.log_uar_sz = %d\n", __FUNCTION__, be32_to_cpu(golan->caps.uar_page_sz));
365 golan->caps.uar_page_sz = 0;
366 golan->caps.log_max_qp = GOLAN_LOG_MAX_QP;
367
368 memcpy(((struct golan_hca_cap *)GET_INBOX(golan, GEN_MBOX)),
369 &(golan->caps),
370 sizeof(struct golan_hca_cap));
371
372 //if command failed we should reset the caps in golan->caps
373 rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
374 GOLAN_PRINT_RC_AND_CMD_STATUS;
375 return rc;
376 }
377
378 static inline int golan_qry_hca_cap(struct golan *golan)
379 {
380 struct golan_cmd_layout *cmd;
381 int rc = 0;
382
383 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_QUERY_HCA_CAP, 0x1,
384 NO_MBOX, GEN_MBOX,
385 sizeof(struct golan_cmd_query_hca_cap_mbox_in),
386 sizeof(struct golan_cmd_query_hca_cap_mbox_out));
387
388 rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, GEN_MBOX, __FUNCTION__);
389 GOLAN_CHECK_RC_AND_CMD_STATUS( err_query_hca_cap );
390
391 memcpy(&(golan->caps),
392 ((struct golan_hca_cap *)GET_OUTBOX(golan, GEN_MBOX)),
393 sizeof(struct golan_hca_cap));
394 err_query_hca_cap:
395 return rc;
396 }
397
398 static inline int golan_take_pages ( struct golan *golan, uint32_t pages, __be16 func_id ) {
399 uint32_t out_num_entries = 0;
400 int size_ibox = 0;
401 int size_obox = 0;
402 int rc = 0;
403
404 DBGC(golan, "%s\n", __FUNCTION__);
405
406 while ( pages > 0 ) {
407 uint32_t pas_num = min(pages, MAX_PASE_MBOX);
408 struct golan_cmd_layout *cmd;
409 struct golan_manage_pages_inbox *in;
410
411 size_ibox = sizeof(struct golan_manage_pages_inbox) + (pas_num * GOLAN_PAS_SIZE);
412 size_obox = sizeof(struct golan_manage_pages_outbox) + (pas_num * GOLAN_PAS_SIZE);
413
414 cmd = write_cmd(golan, MEM_CMD_IDX, GOLAN_CMD_OP_MANAGE_PAGES, GOLAN_PAGES_TAKE,
415 MEM_MBOX, MEM_MBOX,
416 size_ibox,
417 size_obox);
418
419 in = (struct golan_manage_pages_inbox *)cmd->in; /* Warning (WE CANT USE THE LAST 2 FIELDS) */
420
421 in->func_id = func_id; /* Already BE */
422 in->num_entries = cpu_to_be32(pas_num);
423
424 if ( ( rc = send_command_and_wait(golan, MEM_CMD_IDX, MEM_MBOX, MEM_MBOX, __FUNCTION__) ) == 0 ) {
425 out_num_entries = be32_to_cpu(((struct golan_manage_pages_outbox *)(cmd->out))->num_entries);
426 } else {
427 if ( rc == -EBUSY ) {
428 DBGC (golan ,"HCA is busy (rc = -EBUSY)\n" );
429 } else {
430 DBGC (golan ,"%s: rc =0x%x[%s]<%x> syn 0x%x[0x%x] for %d pages\n",
431 __FUNCTION__, rc, cmd_status_str(rc),
432 CMD_SYND(golan, MEM_CMD_IDX),
433 get_cmd( golan , MEM_CMD_IDX )->status_own,
434 be32_to_cpu(CMD_SYND(golan, MEM_CMD_IDX)), pas_num);
435 }
436 return rc;
437 }
438
439 pages -= out_num_entries;
440 }
441 DBGC( golan , "%s Pages handled\n", __FUNCTION__);
442 return rc;
443 }
444
445 static inline int golan_provide_pages ( struct golan *golan , uint32_t pages
446 , __be16 func_id,struct golan_firmware_area *fw_area) {
447 struct mbox *mailbox;
448 int size_ibox = 0;
449 int size_obox = 0;
450 int rc = 0;
451 userptr_t next_page_addr = UNULL;
452
453 DBGC(golan, "%s\n", __FUNCTION__);
454 if ( ! fw_area->area ) {
455 fw_area->area = umalloc ( GOLAN_PAGE_SIZE * pages );
456 if ( fw_area->area == UNULL ) {
457 rc = -ENOMEM;
458 DBGC (golan ,"Failed to allocated %d pages \n",pages);
459 goto err_golan_alloc_fw_area;
460 }
461 fw_area->npages = pages;
462 }
463 assert ( fw_area->npages == pages );
464 next_page_addr = fw_area->area;
465 while ( pages > 0 ) {
466 uint32_t pas_num = min(pages, MAX_PASE_MBOX);
467 unsigned i, j;
468 struct golan_cmd_layout *cmd;
469 struct golan_manage_pages_inbox *in;
470 userptr_t addr = 0;
471
472 mailbox = GET_INBOX(golan, MEM_MBOX);
473 size_ibox = sizeof(struct golan_manage_pages_inbox) + (pas_num * GOLAN_PAS_SIZE);
474 size_obox = sizeof(struct golan_manage_pages_outbox) + (pas_num * GOLAN_PAS_SIZE);
475
476 cmd = write_cmd(golan, MEM_CMD_IDX, GOLAN_CMD_OP_MANAGE_PAGES, GOLAN_PAGES_GIVE,
477 MEM_MBOX, MEM_MBOX,
478 size_ibox,
479 size_obox);
480
481 in = (struct golan_manage_pages_inbox *)cmd->in; /* Warning (WE CANT USE THE LAST 2 FIELDS) */
482
483 in->func_id = func_id; /* Already BE */
484 in->num_entries = cpu_to_be32(pas_num);
485
486 for ( i = 0 , j = MANAGE_PAGES_PSA_OFFSET; i < pas_num; ++i ,++j,
487 next_page_addr += GOLAN_PAGE_SIZE ) {
488 addr = next_page_addr;
489 if (GOLAN_PAGE_MASK & user_to_phys(addr, 0)) {
490 DBGC (golan ,"Addr not Page alligned [%lx %lx]\n", user_to_phys(addr, 0), addr);
491 }
492 mailbox->mblock.data[j] = USR_2_BE64_BUS(addr);
493 }
494
495 if ( ( rc = send_command_and_wait(golan, MEM_CMD_IDX, MEM_MBOX, MEM_MBOX, __FUNCTION__) ) == 0 ) {
496 pages -= pas_num;
497 golan->total_dma_pages += pas_num;
498 } else {
499 if ( rc == -EBUSY ) {
500 DBGC (golan ,"HCA is busy (rc = -EBUSY)\n" );
501 } else {
502 DBGC (golan ,"%s: rc =0x%x[%s]<%x> syn 0x%x[0x%x] for %d pages\n",
503 __FUNCTION__, rc, cmd_status_str(rc),
504 CMD_SYND(golan, MEM_CMD_IDX),
505 get_cmd( golan , MEM_CMD_IDX )->status_own,
506 be32_to_cpu(CMD_SYND(golan, MEM_CMD_IDX)), pas_num);
507 }
508 goto err_send_command;
509 }
510 }
511 DBGC( golan , "%s Pages handled\n", __FUNCTION__);
512 return 0;
513
514 err_send_command:
515 err_golan_alloc_fw_area:
516 /* Go over In box and free pages */
517 /* Send Error to FW */
518 /* What is next - Disable HCA? */
519 DBGC (golan ,"%s Failed (rc = 0x%x)\n", __FUNCTION__, rc);
520 return rc;
521 }
522
523 static inline int golan_handle_pages(struct golan *golan,
524 enum golan_qry_pages_mode qry,
525 enum golan_manage_pages_mode mode)
526 {
527 struct golan_cmd_layout *cmd;
528
529 int rc = 0;
530 int32_t pages;
531 uint16_t total_pages;
532 __be16 func_id;
533
534 DBGC(golan, "%s\n", __FUNCTION__);
535
536 cmd = write_cmd(golan, MEM_CMD_IDX, GOLAN_CMD_OP_QUERY_PAGES, qry,
537 NO_MBOX, NO_MBOX,
538 sizeof(struct golan_query_pages_inbox),
539 sizeof(struct golan_query_pages_outbox));
540
541 rc = send_command_and_wait(golan, MEM_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
542 GOLAN_CHECK_RC_AND_CMD_STATUS( err_handle_pages_query );
543
544 pages = be32_to_cpu(QRY_PAGES_OUT(golan, MEM_CMD_IDX)->num_pages);
545
546 DBGC( golan , "%s pages needed: %d\n", __FUNCTION__, pages);
547
548 func_id = QRY_PAGES_OUT(golan, MEM_CMD_IDX)->func_id;
549
550 total_pages = (( pages >= 0 ) ? pages : ( pages * ( -1 ) ));
551
552 if ( mode == GOLAN_PAGES_GIVE ) {
553 rc = golan_provide_pages(golan, total_pages, func_id, & ( golan->fw_areas[qry-1] ));
554 } else {
555 rc = golan_take_pages(golan, golan->total_dma_pages, func_id);
556 golan->total_dma_pages = 0;
557 }
558
559 if ( rc ) {
560 DBGC (golan , "Failed to %s pages (rc = %d) - DMA pages allocated = %d\n",
561 ( ( mode == GOLAN_PAGES_GIVE ) ? "give" : "take" ), rc , golan->total_dma_pages );
562 return rc;
563 }
564
565 return 0;
566
567 err_handle_pages_query:
568 DBGC (golan ,"%s Qyery pages failed (rc = 0x%x)\n", __FUNCTION__, rc);
569 return rc;
570 }
571
572 static inline int golan_set_access_reg ( struct golan *golan __attribute__ (( unused )), uint32_t reg __attribute__ (( unused )))
573 {
574 #if 0
575 write_cmd(golan, _CMD_IDX, GOLAN_CMD_OP_QUERY_PAGES, 0x0,
576 NO_MBOX, NO_MBOX,
577 sizeof(struct golan_reg_host_endianess),
578 sizeof(struct golan_reg_host_endianess));
579 in->arg = cpu_to_be32(arg);
580 in->register_id = cpu_to_be16(reg_num);
581 #endif
582 DBGC (golan ," %s Not implemented yet\n", __FUNCTION__);
583 return 0;
584 }
585
586 static inline void golan_cmd_uninit ( struct golan *golan )
587 {
588 free_dma(golan->mboxes.outbox, GOLAN_PAGE_SIZE);
589 free_dma(golan->mboxes.inbox, GOLAN_PAGE_SIZE);
590 free_dma(golan->cmd.addr, GOLAN_PAGE_SIZE);
591 }
592
593 /**
594 * Initialise Golan Command Q parameters
595 * -- Alocate a 4kb page for the Command Q
596 * -- Read the stride and log num commands available
597 * -- Write the address to cmdq_phy_addr in iseg
598 * @v golan Golan device
599 */
600 static inline int golan_cmd_init ( struct golan *golan )
601 {
602 int rc = 0;
603 uint32_t addr_l_sz;
604
605 if (!(golan->cmd.addr = malloc_dma(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) {
606 rc = -ENOMEM;
607 goto malloc_dma_failed;
608 }
609 if (!(golan->mboxes.inbox = malloc_dma(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) {
610 rc = -ENOMEM;
611 goto malloc_dma_inbox_failed;
612 }
613 if (!(golan->mboxes.outbox = malloc_dma(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) {
614 rc = -ENOMEM;
615 goto malloc_dma_outbox_failed;
616 }
617 addr_l_sz = be32_to_cpu(readl(&golan->iseg->cmdq_addr_l_sz));
618
619 golan->cmd.log_stride = addr_l_sz & 0xf;
620 golan->cmd.size = 1 << (( addr_l_sz >> 4 ) & 0xf);
621
622 addr_l_sz = virt_to_bus(golan->cmd.addr);
623 writel(0 /* cpu_to_be32(golan->cmd.addr) >> 32 */, &golan->iseg->cmdq_addr_h);
624 writel(cpu_to_be32(addr_l_sz), &golan->iseg->cmdq_addr_l_sz);
625 wmb(); //Make sure the addr is visible in "memory".
626
627 addr_l_sz = be32_to_cpu(readl(&golan->iseg->cmdq_addr_l_sz));
628
629 DBGC( golan , "%s Command interface was initialized\n", __FUNCTION__);
630 return 0;
631
632 malloc_dma_outbox_failed:
633 free_dma(golan->mboxes.inbox, GOLAN_PAGE_SIZE);
634 malloc_dma_inbox_failed:
635 free_dma(golan->cmd.addr, GOLAN_PAGE_SIZE);
636 malloc_dma_failed:
637 DBGC (golan ,"%s Failed to initialize command interface (rc = 0x%x)\n",
638 __FUNCTION__, rc);
639 return rc;
640 }
641
642 static inline int golan_hca_init(struct golan *golan)
643 {
644 struct golan_cmd_layout *cmd;
645 int rc = 0;
646
647 DBGC(golan, "%s\n", __FUNCTION__);
648
649 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_INIT_HCA, 0x0,
650 NO_MBOX, NO_MBOX,
651 sizeof(struct golan_cmd_init_hca_mbox_in),
652 sizeof(struct golan_cmd_init_hca_mbox_out));
653
654 rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
655 GOLAN_PRINT_RC_AND_CMD_STATUS;
656 return rc;
657 }
658
659 static inline void golan_teardown_hca(struct golan *golan, enum golan_teardown op_mod)
660 {
661 struct golan_cmd_layout *cmd;
662 int rc;
663
664 DBGC (golan, "%s in\n", __FUNCTION__);
665
666 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_TEARDOWN_HCA, op_mod,
667 NO_MBOX, NO_MBOX,
668 sizeof(struct golan_cmd_teardown_hca_mbox_in),
669 sizeof(struct golan_cmd_teardown_hca_mbox_out));
670
671 rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
672 GOLAN_PRINT_RC_AND_CMD_STATUS;
673
674 DBGC (golan, "%s HCA teardown compleated\n", __FUNCTION__);
675 }
676
677 static inline int golan_alloc_uar(struct golan *golan)
678 {
679 struct golan_uar *uar = &golan->uar;
680 struct golan_cmd_layout *cmd;
681 struct golan_alloc_uar_mbox_out *out;
682 int rc;
683
684 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_ALLOC_UAR, 0x0,
685 NO_MBOX, NO_MBOX,
686 sizeof(struct golan_alloc_uar_mbox_in),
687 sizeof(struct golan_alloc_uar_mbox_out));
688
689 rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
690 GOLAN_CHECK_RC_AND_CMD_STATUS( err_alloc_uar_cmd );
691 out = (struct golan_alloc_uar_mbox_out *) ( cmd->out );
692
693 uar->index = be32_to_cpu(out->uarn) & 0xffffff;
694
695 uar->phys = (pci_bar_start(golan->pci, GOLAN_HCA_BAR) + (uar->index << GOLAN_PAGE_SHIFT));
696 uar->virt = (void *)(ioremap(uar->phys, GOLAN_PAGE_SIZE));
697
698 DBGC( golan , "%s: UAR allocated with index 0x%x\n", __FUNCTION__, uar->index);
699 return 0;
700
701 err_alloc_uar_cmd:
702 DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
703 return rc;
704 }
705
706 static void golan_dealloc_uar(struct golan *golan)
707 {
708 struct golan_cmd_layout *cmd;
709 uint32_t uar_index = golan->uar.index;
710 int rc;
711
712 DBGC (golan, "%s in\n", __FUNCTION__);
713
714 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DEALLOC_UAR, 0x0,
715 NO_MBOX, NO_MBOX,
716 sizeof(struct golan_free_uar_mbox_in),
717 sizeof(struct golan_free_uar_mbox_out));
718
719 ((struct golan_free_uar_mbox_in *)(cmd->in))->uarn = cpu_to_be32(uar_index);
720 rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
721 GOLAN_PRINT_RC_AND_CMD_STATUS;
722 golan->uar.index = 0;
723
724 DBGC (golan, "%s UAR (0x%x) was destroyed\n", __FUNCTION__, uar_index);
725 }
726
727 static void golan_eq_update_ci(struct golan_event_queue *eq, int arm)
728 {
729 __be32 *addr = eq->doorbell + (arm ? 0 : 2);
730 u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
731 writel(cpu_to_be32(val) , addr);
732 /* We still want ordering, just not swabbing, so add a barrier */
733 wmb();
734 }
735
736 static int golan_create_eq(struct golan *golan)
737 {
738 struct golan_event_queue *eq = &golan->eq;
739 struct golan_create_eq_mbox_in_data *in;
740 struct golan_cmd_layout *cmd;
741 struct golan_create_eq_mbox_out *out;
742 int rc, i;
743
744 eq->cons_index = 0;
745 eq->size = GOLAN_NUM_EQES * sizeof(eq->eqes[0]);
746 eq->eqes = malloc_dma ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE );
747 if (!eq->eqes) {
748 rc = -ENOMEM;
749 goto err_create_eq_eqe_alloc;
750 }
751
752 /* Set EQEs ownership bit to HW ownership */
753 for (i = 0; i < GOLAN_NUM_EQES; ++i) {
754 eq->eqes[i].owner = GOLAN_EQE_HW_OWNERSHIP;
755 }
756
757 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_CREATE_EQ, 0x0,
758 GEN_MBOX, NO_MBOX,
759 sizeof(struct golan_create_eq_mbox_in) + GOLAN_PAS_SIZE,
760 sizeof(struct golan_create_eq_mbox_out));
761
762 in = (struct golan_create_eq_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
763
764 /* Fill the physical address of the page */
765 in->pas[0] = VIRT_2_BE64_BUS( eq->eqes );
766 in->ctx.log_sz_usr_page = cpu_to_be32((ilog2(GOLAN_NUM_EQES)) << 24 | golan->uar.index);
767 DBGC( golan , "UAR idx %x (BE %x)\n", golan->uar.index, in->ctx.log_sz_usr_page);
768 in->events_mask = cpu_to_be64(1 << GOLAN_EVENT_TYPE_PORT_CHANGE);
769
770 rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
771 GOLAN_CHECK_RC_AND_CMD_STATUS( err_create_eq_cmd );
772 out = (struct golan_create_eq_mbox_out *)cmd->out;
773
774 eq->eqn = out->eq_number;
775 eq->doorbell = ((void *)golan->uar.virt) + GOLAN_EQ_DOORBELL_OFFSET;
776
777 /* EQs are created in ARMED state */
778 golan_eq_update_ci(eq, GOLAN_EQ_UNARMED);
779
780 DBGC( golan , "%s: Event queue created (EQN = 0x%x)\n", __FUNCTION__, eq->eqn);
781 return 0;
782
783 err_create_eq_cmd:
784 free_dma ( eq->eqes , GOLAN_PAGE_SIZE );
785 err_create_eq_eqe_alloc:
786 DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
787 return rc;
788 }
789
790 static void golan_destory_eq(struct golan *golan)
791 {
792 struct golan_cmd_layout *cmd;
793 struct golan_destroy_eq_mbox_in *in;
794 uint8_t eqn = golan->eq.eqn;
795 int rc;
796
797 DBGC (golan, "%s in\n", __FUNCTION__);
798
799 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DESTROY_EQ, 0x0,
800 NO_MBOX, NO_MBOX,
801 sizeof(struct golan_destroy_eq_mbox_in),
802 sizeof(struct golan_destroy_eq_mbox_out));
803
804 in = GOLAN_MBOX_IN ( cmd, in );
805 in->eqn = eqn;
806 rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
807 GOLAN_PRINT_RC_AND_CMD_STATUS;
808
809 free_dma ( golan->eq.eqes , GOLAN_PAGE_SIZE );
810 golan->eq.eqn = 0;
811
812 DBGC( golan, "%s Event queue (0x%x) was destroyed\n", __FUNCTION__, eqn);
813 }
814
815 static int golan_alloc_pd(struct golan *golan)
816 {
817 struct golan_cmd_layout *cmd;
818 struct golan_alloc_pd_mbox_out *out;
819 int rc;
820
821 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_ALLOC_PD, 0x0,
822 NO_MBOX, NO_MBOX,
823 sizeof(struct golan_alloc_pd_mbox_in),
824 sizeof(struct golan_alloc_pd_mbox_out));
825
826 rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
827 GOLAN_CHECK_RC_AND_CMD_STATUS( err_alloc_pd_cmd );
828 out = (struct golan_alloc_pd_mbox_out *) ( cmd->out );
829
830 golan->pdn = (be32_to_cpu(out->pdn) & 0xffffff);
831 DBGC( golan , "%s: Protection domain created (PDN = 0x%x)\n", __FUNCTION__,
832 golan->pdn);
833 return 0;
834
835 err_alloc_pd_cmd:
836 DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
837 return rc;
838 }
839
840 static void golan_dealloc_pd(struct golan *golan)
841 {
842 struct golan_cmd_layout *cmd;
843 uint32_t pdn = golan->pdn;
844 int rc;
845
846 DBGC (golan,"%s in\n", __FUNCTION__);
847
848 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DEALLOC_PD, 0x0,
849 NO_MBOX, NO_MBOX,
850 sizeof(struct golan_alloc_pd_mbox_in),
851 sizeof(struct golan_alloc_pd_mbox_out));
852
853 ((struct golan_dealloc_pd_mbox_in *)(cmd->in))->pdn = cpu_to_be32(pdn);
854 rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
855 GOLAN_PRINT_RC_AND_CMD_STATUS;
856 golan->pdn = 0;
857
858 DBGC (golan ,"%s Protection domain (0x%x) was destroyed\n", __FUNCTION__, pdn);
859 }
860
861 static int golan_create_mkey(struct golan *golan)
862 {
863 struct golan_create_mkey_mbox_in_data *in;
864 struct golan_cmd_layout *cmd;
865 struct golan_create_mkey_mbox_out *out;
866 int rc;
867
868 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_CREATE_MKEY, 0x0,
869 GEN_MBOX, NO_MBOX,
870 sizeof(struct golan_create_mkey_mbox_in),
871 sizeof(struct golan_create_mkey_mbox_out));
872
873 in = (struct golan_create_mkey_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
874
875 in->seg.flags = GOLAN_IB_ACCESS_LOCAL_WRITE | GOLAN_IB_ACCESS_LOCAL_READ;
876 in->seg.flags_pd = cpu_to_be32(golan->pdn | GOLAN_MKEY_LEN64);
877 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << GOLAN_CREATE_MKEY_SEG_QPN_BIT);
878
879 rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
880 GOLAN_CHECK_RC_AND_CMD_STATUS( err_create_mkey_cmd );
881 out = (struct golan_create_mkey_mbox_out *) ( cmd->out );
882
883 golan->mkey = ((be32_to_cpu(out->mkey) & 0xffffff) << 8);
884 DBGC( golan , "%s: Got DMA Key for local access read/write (MKEY = 0x%x)\n",
885 __FUNCTION__, golan->mkey);
886 return 0;
887 err_create_mkey_cmd:
888 DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
889 return rc;
890 }
891
892 static void golan_destroy_mkey(struct golan *golan)
893 {
894 struct golan_cmd_layout *cmd;
895 u32 mkey = golan->mkey;
896 int rc;
897
898 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DESTROY_MKEY, 0x0,
899 NO_MBOX, NO_MBOX,
900 sizeof(struct golan_destroy_mkey_mbox_in),
901 sizeof(struct golan_destroy_mkey_mbox_out));
902 ((struct golan_destroy_mkey_mbox_in *)(cmd->in))->mkey = cpu_to_be32(mkey >> 8);
903 rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
904 GOLAN_PRINT_RC_AND_CMD_STATUS;
905 golan->mkey = 0;
906
907 DBGC( golan , "%s DMA Key (0x%x) for local access write was destroyed\n"
908 , __FUNCTION__, mkey);
909 }
910
911
912 /**
913 * Initialise Golan PCI parameters
914 *
915 * @v golan Golan device
916 */
917 static inline void golan_pci_init(struct golan *golan)
918 {
919 struct pci_device *pci = golan->pci;
920
921 /* Fix up PCI device */
922 adjust_pci_device ( pci );
923
924 /* Get HCA BAR */
925 golan->iseg = ioremap ( pci_bar_start ( pci, GOLAN_HCA_BAR),
926 GOLAN_PCI_CONFIG_BAR_SIZE );
927 }
928
929 static inline struct golan *golan_alloc()
930 {
931 void *golan = zalloc(sizeof(struct golan));
932 if ( !golan )
933 goto err_zalloc;
934
935 return golan;
936
937 err_zalloc:
938 return NULL;
939 }
940
941 /**
942 * Create completion queue
943 *
944 * @v ibdev Infiniband device
945 * @v cq Completion queue
946 * @ret rc Return status code
947 */
948 static int golan_create_cq(struct ib_device *ibdev,
949 struct ib_completion_queue *cq)
950 {
951 struct golan *golan = ib_get_drvdata(ibdev);
952 struct golan_completion_queue *golan_cq;
953 struct golan_cmd_layout *cmd;
954 struct golan_create_cq_mbox_in_data *in;
955 struct golan_create_cq_mbox_out *out;
956 int rc;
957 unsigned int i;
958
959 golan_cq = zalloc(sizeof(*golan_cq));
960 if (!golan_cq) {
961 rc = -ENOMEM;
962 goto err_create_cq;
963 }
964 golan_cq->size = sizeof(golan_cq->cqes[0]) * cq->num_cqes;
965 golan_cq->doorbell_record = malloc_dma(GOLAN_CQ_DB_RECORD_SIZE,
966 GOLAN_CQ_DB_RECORD_SIZE);
967 if (!golan_cq->doorbell_record) {
968 rc = -ENOMEM;
969 goto err_create_cq_db_alloc;
970 }
971
972 golan_cq->cqes = malloc_dma ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE );
973 if (!golan_cq->cqes) {
974 rc = -ENOMEM;
975 goto err_create_cq_cqe_alloc;
976 }
977
978 /* Set CQEs ownership bit to HW ownership */
979 for (i = 0; i < cq->num_cqes; ++i) {
980 golan_cq->cqes[i].op_own = ((GOLAN_CQE_OPCODE_NOT_VALID <<
981 GOLAN_CQE_OPCODE_BIT) |
982 GOLAN_CQE_HW_OWNERSHIP);
983 }
984
985 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_CREATE_CQ, 0x0,
986 GEN_MBOX, NO_MBOX,
987 sizeof(struct golan_create_cq_mbox_in) + GOLAN_PAS_SIZE,
988 sizeof(struct golan_create_cq_mbox_out));
989
990 in = (struct golan_create_cq_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
991
992 /* Fill the physical address of the page */
993 in->pas[0] = VIRT_2_BE64_BUS( golan_cq->cqes );
994 in->ctx.cqe_sz_flags = GOLAN_CQE_SIZE_64 << 5;
995 in->ctx.log_sz_usr_page = cpu_to_be32(((ilog2(cq->num_cqes)) << 24) | golan->uar.index);
996 in->ctx.c_eqn = cpu_to_be16(golan->eq.eqn);
997 in->ctx.db_record_addr = VIRT_2_BE64_BUS(golan_cq->doorbell_record);
998
999 rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
1000 GOLAN_CHECK_RC_AND_CMD_STATUS( err_create_cq_cmd );
1001 out = (struct golan_create_cq_mbox_out *) ( cmd->out );
1002
1003 cq->cqn = (be32_to_cpu(out->cqn) & 0xffffff);
1004
1005 ib_cq_set_drvdata(cq, golan_cq);
1006
1007 DBGC( golan , "%s CQ created successfully (CQN = 0x%lx)\n", __FUNCTION__, cq->cqn);
1008 return 0;
1009
1010 err_create_cq_cmd:
1011 free_dma( golan_cq->cqes , GOLAN_PAGE_SIZE );
1012 err_create_cq_cqe_alloc:
1013 free_dma(golan_cq->doorbell_record, GOLAN_CQ_DB_RECORD_SIZE);
1014 err_create_cq_db_alloc:
1015 free ( golan_cq );
1016 err_create_cq:
1017 DBGC (golan ,"%s out rc = 0x%x\n", __FUNCTION__, rc);
1018 return rc;
1019 }
1020
1021 /**
1022 * Destroy completion queue
1023 *
1024 * @v ibdev Infiniband device
1025 * @v cq Completion queue
1026 */
1027 static void golan_destroy_cq(struct ib_device *ibdev,
1028 struct ib_completion_queue *cq)
1029 {
1030 struct golan *golan = ib_get_drvdata(ibdev);
1031 struct golan_completion_queue *golan_cq = ib_cq_get_drvdata(cq);
1032 struct golan_cmd_layout *cmd;
1033 uint32_t cqn = cq->cqn;
1034 int rc;
1035
1036 DBGC (golan, "%s in\n", __FUNCTION__);
1037
1038 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DESTROY_CQ, 0x0,
1039 NO_MBOX, NO_MBOX,
1040 sizeof(struct golan_destroy_cq_mbox_in),
1041 sizeof(struct golan_destroy_cq_mbox_out));
1042 ((struct golan_destroy_cq_mbox_in *)(cmd->in))->cqn = cpu_to_be32(cqn);
1043 rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
1044 GOLAN_PRINT_RC_AND_CMD_STATUS;
1045 cq->cqn = 0;
1046
1047 ib_cq_set_drvdata(cq, NULL);
1048 free_dma ( golan_cq->cqes , GOLAN_PAGE_SIZE );
1049 free_dma(golan_cq->doorbell_record, GOLAN_CQ_DB_RECORD_SIZE);
1050 free(golan_cq);
1051
1052 DBGC (golan, "%s CQ number 0x%x was destroyed\n", __FUNCTION__, cqn);
1053 }
1054
1055 static void golan_cq_clean(struct ib_completion_queue *cq)
1056 {
1057 ib_poll_cq(cq->ibdev, cq);
1058 }
1059
1060 static int golan_qp_type_to_st(enum ib_queue_pair_type type)
1061 {
1062 int qpt = type;
1063
1064 switch (qpt) {
1065 case IB_QPT_RC:
1066 return GOLAN_QP_ST_RC;
1067 case IB_QPT_UD:
1068 return GOLAN_QP_ST_UD;
1069 case IB_QPT_SMI:
1070 return GOLAN_QP_ST_QP0;
1071 case IB_QPT_GSI:
1072 return GOLAN_QP_ST_QP1;
1073 case IB_QPT_ETH:
1074 default:
1075 return -EINVAL;
1076 }
1077 }
1078 #if 0
1079 static int golan_is_special_qp(enum ib_queue_pair_type type)
1080 {
1081 return (type == IB_QPT_GSI || type == IB_QPT_SMI);
1082 }
1083 #endif
1084 static int golan_create_qp_aux(struct ib_device *ibdev,
1085 struct ib_queue_pair *qp,
1086 int *qpn)
1087 {
1088 struct golan *golan = ib_get_drvdata(ibdev);
1089 struct golan_queue_pair *golan_qp;
1090 struct golan_create_qp_mbox_in_data *in;
1091 struct golan_cmd_layout *cmd;
1092 struct golan_wqe_data_seg *data;
1093 struct golan_create_qp_mbox_out *out;
1094 uint32_t wqe_size_in_bytes;
1095 uint32_t max_qp_size_in_wqes;
1096 unsigned int i;
1097 int rc;
1098
1099 golan_qp = zalloc(sizeof(*golan_qp));
1100 if (!golan_qp) {
1101 rc = -ENOMEM;
1102 goto err_create_qp;
1103 }
1104
1105 if ( ( qp->type == IB_QPT_SMI ) || ( qp->type == IB_QPT_GSI ) ||
1106 ( qp->type == IB_QPT_UD ) ) {
1107 golan_qp->rq.grh_size = ( qp->recv.num_wqes *
1108 sizeof ( golan_qp->rq.grh[0] ));
1109 }
1110
1111 /* Calculate receive queue size */
1112 golan_qp->rq.size = qp->recv.num_wqes * GOLAN_RECV_WQE_SIZE;
1113 if (GOLAN_RECV_WQE_SIZE > be16_to_cpu(golan->caps.max_wqe_sz_rq)) {
1114 DBGC (golan ,"%s receive wqe size [%zd] > max wqe size [%d]\n", __FUNCTION__,
1115 GOLAN_RECV_WQE_SIZE, be16_to_cpu(golan->caps.max_wqe_sz_rq));
1116 rc = -EINVAL;
1117 goto err_create_qp_rq_size;
1118 }
1119
1120 wqe_size_in_bytes = sizeof(golan_qp->sq.wqes[0]);
1121 /* Calculate send queue size */
1122 if (wqe_size_in_bytes > be16_to_cpu(golan->caps.max_wqe_sz_sq)) {
1123 DBGC (golan ,"%s send WQE size [%d] > max WQE size [%d]\n", __FUNCTION__,
1124 wqe_size_in_bytes,
1125 be16_to_cpu(golan->caps.max_wqe_sz_sq));
1126 rc = -EINVAL;
1127 goto err_create_qp_sq_wqe_size;
1128 }
1129 golan_qp->sq.size = (qp->send.num_wqes * wqe_size_in_bytes);
1130 max_qp_size_in_wqes = (1 << ((uint32_t)(golan->caps.log_max_qp_sz)));
1131 if (qp->send.num_wqes > max_qp_size_in_wqes) {
1132 DBGC (golan ,"%s send wq size [%d] > max wq size [%d]\n", __FUNCTION__,
1133 golan_qp->sq.size, max_qp_size_in_wqes);
1134 rc = -EINVAL;
1135 goto err_create_qp_sq_size;
1136 }
1137
1138 golan_qp->size = golan_qp->sq.size + golan_qp->rq.size;
1139
1140 /* allocate dma memory for WQEs (1 page is enough) - should change it */
1141 golan_qp->wqes = malloc_dma ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE );
1142 if (!golan_qp->wqes) {
1143 rc = -ENOMEM;
1144 goto err_create_qp_wqe_alloc;
1145 }
1146 golan_qp->rq.wqes = golan_qp->wqes;
1147 golan_qp->sq.wqes = golan_qp->wqes + golan_qp->rq.size;//(union golan_send_wqe *)&
1148 //(((struct golan_recv_wqe_ud *)(golan_qp->wqes))[qp->recv.num_wqes]);
1149
1150 if ( golan_qp->rq.grh_size ) {
1151 golan_qp->rq.grh = ( golan_qp->wqes +
1152 golan_qp->sq.size +
1153 golan_qp->rq.size );
1154 }
1155
1156 /* Invalidate all WQEs */
1157 data = &golan_qp->rq.wqes[0].data[0];
1158 for ( i = 0 ; i < ( golan_qp->rq.size / sizeof ( *data ) ); i++ ){
1159 data->lkey = cpu_to_be32 ( GOLAN_INVALID_LKEY );
1160 data++;
1161 }
1162
1163 golan_qp->doorbell_record = malloc_dma(sizeof(struct golan_qp_db),
1164 sizeof(struct golan_qp_db));
1165 if (!golan_qp->doorbell_record) {
1166 rc = -ENOMEM;
1167 goto err_create_qp_db_alloc;
1168 }
1169 memset(golan_qp->doorbell_record, 0, sizeof(struct golan_qp_db));
1170
1171 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_CREATE_QP, 0x0,
1172 GEN_MBOX, NO_MBOX,
1173 sizeof(struct golan_create_qp_mbox_in) + GOLAN_PAS_SIZE,
1174 sizeof(struct golan_create_qp_mbox_out));
1175
1176 in = (struct golan_create_qp_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
1177
1178 /* Fill the physical address of the page */
1179 in->pas[0] = VIRT_2_BE64_BUS(golan_qp->wqes);
1180 in->ctx.qp_counter_set_usr_page = cpu_to_be32(golan->uar.index);
1181
1182 in->ctx.flags_pd = cpu_to_be32(golan->pdn);
1183 in->ctx.flags = cpu_to_be32((golan_qp_type_to_st(qp->type)
1184 << GOLAN_QP_CTX_ST_BIT) |
1185 (GOLAN_QP_PM_MIGRATED <<
1186 GOLAN_QP_CTX_PM_STATE_BIT));
1187 // cgs set to 0, initialy.
1188 // atomic mode
1189 in->ctx.rq_size_stride = ((ilog2(qp->recv.num_wqes) <<
1190 GOLAN_QP_CTX_RQ_SIZE_BIT) |
1191 (sizeof(golan_qp->rq.wqes[0]) / GOLAN_RECV_WQE_SIZE));
1192 in->ctx.sq_crq_size = cpu_to_be16(ilog2(golan_qp->sq.size / GOLAN_SEND_WQE_BB_SIZE)
1193 << GOLAN_QP_CTX_SQ_SIZE_BIT);
1194 in->ctx.cqn_send = cpu_to_be32(qp->send.cq->cqn);
1195 in->ctx.cqn_recv = cpu_to_be32(qp->recv.cq->cqn);
1196 in->ctx.db_rec_addr = VIRT_2_BE64_BUS(golan_qp->doorbell_record);
1197
1198 rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
1199 GOLAN_CHECK_RC_AND_CMD_STATUS( err_create_qp_cmd );
1200 out = (struct golan_create_qp_mbox_out *)cmd->out;
1201
1202 *qpn = (be32_to_cpu(out->qpn) & 0xffffff);
1203 /*
1204 * Hardware wants QPN written in big-endian order (after
1205 * shifting) for send doorbell. Precompute this value to save
1206 * a little bit when posting sends.
1207 */
1208 golan_qp->doorbell_qpn = cpu_to_be32(*qpn << 8);
1209 golan_qp->state = GOLAN_IB_QPS_RESET;
1210
1211 ib_qp_set_drvdata(qp, golan_qp);
1212
1213 return 0;
1214
1215 err_create_qp_cmd:
1216 free_dma(golan_qp->doorbell_record, sizeof(struct golan_qp_db));
1217 err_create_qp_db_alloc:
1218 free_dma ( golan_qp->wqes, GOLAN_PAGE_SIZE );
1219 err_create_qp_wqe_alloc:
1220 err_create_qp_sq_size:
1221 err_create_qp_sq_wqe_size:
1222 err_create_qp_rq_size:
1223 free ( golan_qp );
1224 err_create_qp:
1225 return rc;
1226 }
1227
1228 /**
1229 * Create queue pair
1230 *
1231 * @v ibdev Infiniband device
1232 * @v qp Queue pair
1233 * @ret rc Return status code
1234 */
1235 static int golan_create_qp(struct ib_device *ibdev,
1236 struct ib_queue_pair *qp)
1237 {
1238 int rc, qpn = -1;
1239
1240 switch (qp->type) {
1241 case IB_QPT_UD:
1242 case IB_QPT_SMI:
1243 case IB_QPT_GSI:
1244 rc = golan_create_qp_aux(ibdev, qp, &qpn);
1245 if (rc) {
1246 DBG ( "%s Failed to create QP (rc = 0x%x)\n", __FUNCTION__, rc);
1247 return rc;
1248 }
1249 qp->qpn = qpn;
1250
1251 break;
1252 case IB_QPT_ETH:
1253 case IB_QPT_RC:
1254 default:
1255 DBG ( "%s unsupported QP type (0x%x)\n", __FUNCTION__, qp->type);
1256 return -EINVAL;
1257 }
1258
1259 return 0;
1260 }
1261
1262 static int golan_modify_qp_rst_to_init(struct ib_device *ibdev,
1263 struct ib_queue_pair *qp __unused,
1264 struct golan_modify_qp_mbox_in_data *in)
1265 {
1266 int rc = 0;
1267
1268 in->ctx.qkey = cpu_to_be32((uint32_t)(qp->qkey));
1269
1270 in->ctx.pri_path.port = ibdev->port;
1271 in->ctx.flags |= cpu_to_be32(GOLAN_QP_PM_MIGRATED << GOLAN_QP_CTX_PM_STATE_BIT);
1272 in->ctx.pri_path.pkey_index = 0;
1273 /* QK is 0 */
1274 /* QP cntr set 0 */
1275 return rc;
1276 }
1277
1278 static int golan_modify_qp_init_to_rtr(struct ib_device *ibdev __unused,
1279 struct ib_queue_pair *qp __unused,
1280 struct golan_modify_qp_mbox_in_data *in)
1281 {
1282 int rc = 0;
1283
1284 in->optparam = 0;
1285 return rc;
1286 }
1287
1288 static int golan_modify_qp_rtr_to_rts(struct ib_device *ibdev __unused,
1289 struct ib_queue_pair *qp __unused,
1290 struct golan_modify_qp_mbox_in_data *in __unused)
1291 {
1292 int rc = 0;
1293
1294 in->optparam = 0;
1295 /* In good flow psn in 0 */
1296 return rc;
1297 }
1298
1299 static int golan_modify_qp_to_rst(struct ib_device *ibdev,
1300 struct ib_queue_pair *qp)
1301 {
1302 struct golan *golan = ib_get_drvdata(ibdev);
1303 struct golan_queue_pair *golan_qp = ib_qp_get_drvdata(qp);
1304 struct golan_cmd_layout *cmd;
1305 int rc;
1306
1307 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_2RST_QP, 0x0,
1308 NO_MBOX, NO_MBOX,
1309 sizeof(struct golan_modify_qp_mbox_in),
1310 sizeof(struct golan_modify_qp_mbox_out));
1311 ((struct golan_modify_qp_mbox_in *)(cmd->in))->qpn = cpu_to_be32(qp->qpn);
1312 rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
1313 GOLAN_CHECK_RC_AND_CMD_STATUS( err_modify_qp_2rst_cmd );
1314
1315 golan_qp->state = GOLAN_IB_QPS_RESET;
1316 DBGC( golan , "%s QP number 0x%lx was modified to RESET\n",
1317 __FUNCTION__, qp->qpn);
1318
1319 return 0;
1320
1321 err_modify_qp_2rst_cmd:
1322 DBGC (golan ,"%s Failed to modify QP number 0x%lx (rc = 0x%x)\n",
1323 __FUNCTION__, qp->qpn, rc);
1324 return rc;
1325 }
1326
1327 static int (*golan_modify_qp_methods[])(struct ib_device *ibdev,
1328 struct ib_queue_pair *qp,
1329 struct golan_modify_qp_mbox_in_data *in) = {
1330
1331 [GOLAN_IB_QPS_RESET] = golan_modify_qp_rst_to_init,
1332 [GOLAN_IB_QPS_INIT] = golan_modify_qp_init_to_rtr,
1333 [GOLAN_IB_QPS_RTR] = golan_modify_qp_rtr_to_rts
1334 };
1335
1336 static int golan_modify_qp(struct ib_device *ibdev,
1337 struct ib_queue_pair *qp)
1338 {
1339 struct golan *golan = ib_get_drvdata(ibdev);
1340 struct golan_queue_pair *golan_qp = ib_qp_get_drvdata(qp);
1341 struct golan_modify_qp_mbox_in_data *in;
1342 struct golan_cmd_layout *cmd;
1343 enum golan_ib_qp_state prev_state;
1344 int rc;
1345 int modify_cmd[] = {GOLAN_CMD_OP_RST2INIT_QP,
1346 GOLAN_CMD_OP_INIT2RTR_QP,
1347 GOLAN_CMD_OP_RTR2RTS_QP};
1348
1349 while (golan_qp->state < GOLAN_IB_QPS_RTS) {
1350 prev_state = golan_qp->state;
1351 cmd = write_cmd(golan, DEF_CMD_IDX, modify_cmd[golan_qp->state], 0x0,
1352 GEN_MBOX, NO_MBOX,
1353 sizeof(struct golan_modify_qp_mbox_in),
1354 sizeof(struct golan_modify_qp_mbox_out));
1355
1356 in = (struct golan_modify_qp_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
1357 ((struct golan_modify_qp_mbox_in *)(cmd->in))->qpn = cpu_to_be32(qp->qpn);
1358 rc = golan_modify_qp_methods[golan_qp->state](ibdev, qp, in);
1359 if (rc) {
1360 goto err_modify_qp_fill_inbox;
1361 }
1362 // in->ctx.qp_counter_set_usr_page = cpu_to_be32(golan->uar.index);
1363 rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
1364 GOLAN_CHECK_RC_AND_CMD_STATUS( err_modify_qp_cmd );
1365
1366 ++(golan_qp->state);
1367
1368 DBGC( golan , "%s QP number 0x%lx was modified from %s to %s\n",
1369 __FUNCTION__, qp->qpn, golan_qp_state_as_string[prev_state],
1370 golan_qp_state_as_string[golan_qp->state]);
1371 }
1372
1373 DBGC( golan , "%s QP number 0x%lx is ready to receive/send packets.\n",
1374 __FUNCTION__, qp->qpn);
1375 return 0;
1376
1377 err_modify_qp_cmd:
1378 err_modify_qp_fill_inbox:
1379 DBGC (golan ,"%s Failed to modify QP number 0x%lx (rc = 0x%x)\n",
1380 __FUNCTION__, qp->qpn, rc);
1381 return rc;
1382 }
1383
1384 /**
1385 * Destroy queue pair
1386 *
1387 * @v ibdev Infiniband device
1388 * @v qp Queue pair
1389 */
1390 static void golan_destroy_qp(struct ib_device *ibdev,
1391 struct ib_queue_pair *qp)
1392 {
1393 struct golan *golan = ib_get_drvdata(ibdev);
1394 struct golan_queue_pair *golan_qp = ib_qp_get_drvdata(qp);
1395 struct golan_cmd_layout *cmd;
1396 unsigned long qpn = qp->qpn;
1397 int rc;
1398
1399 DBGC (golan, "%s in\n", __FUNCTION__);
1400
1401 if (golan_qp->state != GOLAN_IB_QPS_RESET) {
1402 if (golan_modify_qp_to_rst(ibdev, qp)) {
1403 DBGC (golan ,"%s Failed to modify QP 0x%lx to RESET\n", __FUNCTION__,
1404 qp->qpn);
1405 }
1406 }
1407
1408 if (qp->recv.cq) {
1409 golan_cq_clean(qp->recv.cq);
1410 }
1411 if (qp->send.cq && (qp->send.cq != qp->recv.cq)) {
1412 golan_cq_clean(qp->send.cq);
1413 }
1414
1415 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DESTROY_QP, 0x0,
1416 NO_MBOX, NO_MBOX,
1417 sizeof(struct golan_destroy_qp_mbox_in),
1418 sizeof(struct golan_destroy_qp_mbox_out));
1419 ((struct golan_destroy_qp_mbox_in *)(cmd->in))->qpn = cpu_to_be32(qpn);
1420 rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
1421 GOLAN_PRINT_RC_AND_CMD_STATUS;
1422 qp->qpn = 0;
1423
1424 ib_qp_set_drvdata(qp, NULL);
1425 free_dma(golan_qp->doorbell_record, sizeof(struct golan_qp_db));
1426 free_dma ( golan_qp->wqes, GOLAN_PAGE_SIZE );
1427 free(golan_qp);
1428
1429 DBGC( golan ,"%s QP 0x%lx was destroyed\n", __FUNCTION__, qpn);
1430 }
1431
1432 /**
1433 * Calculate transmission rate
1434 *
1435 * @v av Address vector
1436 * @ret golan_rate Golan rate
1437 */
1438 static unsigned int golan_rate(enum ib_rate rate) {
1439 return (((rate >= IB_RATE_2_5) && (rate <= IB_RATE_120)) ? (rate + 5) : 0);
1440 }
1441
1442 /**
1443 * Post send work queue entry
1444 *
1445 * @v ibdev Infiniband device
1446 * @v qp Queue pair
1447 * @v av Address vector
1448 * @v iobuf I/O buffer
1449 * @ret rc Return status code
1450 */
1451 static int golan_post_send(struct ib_device *ibdev,
1452 struct ib_queue_pair *qp,
1453 struct ib_address_vector *av,
1454 struct io_buffer *iobuf)
1455 {
1456 struct golan *golan = ib_get_drvdata(ibdev);
1457 struct golan_queue_pair *golan_qp = ib_qp_get_drvdata(qp);
1458 struct golan_send_wqe_ud *wqe = NULL;
1459 struct golan_av *datagram = NULL;
1460 unsigned long wqe_idx_mask;
1461 unsigned long wqe_idx;
1462 struct golan_wqe_data_seg *data = NULL;
1463 struct golan_wqe_ctrl_seg *ctrl = NULL;
1464
1465
1466 wqe_idx_mask = (qp->send.num_wqes - 1);
1467 wqe_idx = (qp->send.next_idx & wqe_idx_mask);
1468 if (qp->send.iobufs[wqe_idx]) {
1469 DBGC (golan ,"%s Send queue of QPN 0x%lx is full\n", __FUNCTION__, qp->qpn);
1470 return -ENOMEM;
1471 }
1472
1473 qp->send.iobufs[wqe_idx] = iobuf;
1474
1475 // change to this
1476 //wqe_size_in_octa_words = golan_qp->sq.wqe_size_in_wqebb >> 4;
1477
1478 wqe = &golan_qp->sq.wqes[wqe_idx].ud;
1479
1480 //CHECK HW OWNERSHIP BIT ???
1481
1482 memset(wqe, 0, sizeof(*wqe));
1483
1484 ctrl = &wqe->ctrl;
1485 ctrl->opmod_idx_opcode = cpu_to_be32(GOLAN_SEND_OPCODE |
1486 ((u32)(golan_qp->sq.next_idx) <<
1487 GOLAN_WQE_CTRL_WQE_IDX_BIT));
1488 ctrl->qpn_ds = cpu_to_be32(GOLAN_SEND_UD_WQE_SIZE >> 4) |
1489 golan_qp->doorbell_qpn;
1490 ctrl->fm_ce_se = 0x8;//10 - 0 - 0
1491 data = &wqe->data;
1492 data->byte_count = cpu_to_be32(iob_len(iobuf));
1493 data->lkey = cpu_to_be32(golan->mkey);
1494 data->addr = VIRT_2_BE64_BUS(iobuf->data);
1495
1496 datagram = &wqe->datagram;
1497 datagram->key.qkey.qkey = cpu_to_be32(av->qkey);
1498 datagram->dqp_dct = cpu_to_be32((1 << 31) | av->qpn);
1499 datagram->stat_rate_sl = ((golan_rate(av->rate) << 4) | av->sl);
1500 datagram->fl_mlid = (ibdev->lid & 0x007f); /* take only the 7 low bits of the LID */
1501 datagram->rlid = cpu_to_be16(av->lid);
1502 datagram->grh_gid_fl = cpu_to_be32(av->gid_present << 30);
1503 memcpy(datagram->rgid, av->gid.bytes, 16 /* sizeof(datagram->rgid) */);
1504
1505 /*
1506 * Make sure that descriptors are written before
1507 * updating doorbell record and ringing the doorbell
1508 */
1509 ++(qp->send.next_idx);
1510 golan_qp->sq.next_idx = (golan_qp->sq.next_idx + GOLAN_WQEBBS_PER_SEND_UD_WQE);
1511 golan_qp->doorbell_record->send_db = cpu_to_be16(golan_qp->sq.next_idx);
1512 wmb();
1513 writeq(*((__be64 *)ctrl), golan->uar.virt
1514 + ( ( golan_qp->sq.next_idx & 0x1 ) ? DB_BUFFER0_EVEN_OFFSET
1515 : DB_BUFFER0_ODD_OFFSET ) );
1516 return 0;
1517 }
1518
1519 /**
1520 * Post receive work queue entry
1521 *
1522 * @v ibdev Infiniband device
1523 * @v qp Queue pair
1524 * @v iobuf I/O buffer
1525 * @ret rc Return status code
1526 */
1527 static int golan_post_recv(struct ib_device *ibdev,
1528 struct ib_queue_pair *qp,
1529 struct io_buffer *iobuf)
1530 {
1531 struct golan *golan = ib_get_drvdata(ibdev);
1532 struct golan_queue_pair *golan_qp = ib_qp_get_drvdata(qp);
1533 struct ib_work_queue *wq = &qp->recv;
1534 struct golan_recv_wqe_ud *wqe;
1535 struct ib_global_route_header *grh;
1536 struct golan_wqe_data_seg *data;
1537 unsigned int wqe_idx_mask;
1538
1539 /* Allocate work queue entry */
1540 wqe_idx_mask = (wq->num_wqes - 1);
1541 if (wq->iobufs[wq->next_idx & wqe_idx_mask]) {
1542 DBGC (golan ,"%s Receive queue of QPN 0x%lx is full\n", __FUNCTION__, qp->qpn);
1543 return -ENOMEM;
1544 }
1545
1546 wq->iobufs[wq->next_idx & wqe_idx_mask] = iobuf;
1547 wqe = & golan_qp->rq.wqes[wq->next_idx & wqe_idx_mask];
1548
1549 memset(wqe, 0, sizeof(*wqe));
1550 data = &wqe->data[0];
1551 if ( golan_qp->rq.grh ) {
1552 grh = &golan_qp->rq.grh[wq->next_idx & wqe_idx_mask];
1553 data->byte_count = cpu_to_be32 ( sizeof ( *grh ) );
1554 data->lkey = cpu_to_be32 ( golan->mkey );
1555 data->addr = VIRT_2_BE64_BUS ( grh );
1556 data++;
1557 }
1558
1559 data->byte_count = cpu_to_be32(iob_tailroom(iobuf));
1560 data->lkey = cpu_to_be32(golan->mkey);
1561 data->addr = VIRT_2_BE64_BUS(iobuf->data);
1562
1563 ++wq->next_idx;
1564
1565 /*
1566 * Make sure that descriptors are written before
1567 * updating doorbell record and ringing the doorbell
1568 */
1569 wmb();
1570 golan_qp->doorbell_record->recv_db = cpu_to_be16(qp->recv.next_idx & 0xffff);
1571
1572 return 0;
1573 }
1574
1575 static int golan_query_vport_context ( struct ib_device *ibdev ) {
1576 struct golan *golan = ib_get_drvdata ( ibdev );
1577 struct golan_cmd_layout *cmd;
1578 struct golan_query_hca_vport_context_inbox *in;
1579 struct golan_query_hca_vport_context_data *context_data;
1580 int rc;
1581
1582 cmd = write_cmd ( golan, DEF_CMD_IDX, GOLAN_CMD_OP_QUERY_HCA_VPORT_CONTEXT,
1583 0x0, GEN_MBOX, GEN_MBOX,
1584 sizeof(struct golan_query_hca_vport_context_inbox),
1585 sizeof(struct golan_query_hca_vport_context_outbox) );
1586
1587 in = GOLAN_MBOX_IN ( cmd, in );
1588 in->port_num = (u8)ibdev->port;
1589
1590 rc = send_command_and_wait ( golan, DEF_CMD_IDX, GEN_MBOX, GEN_MBOX, __FUNCTION__ );
1591 GOLAN_CHECK_RC_AND_CMD_STATUS( err_query_vport_context_cmd );
1592
1593 context_data = (struct golan_query_hca_vport_context_data *)( GET_OUTBOX ( golan, GEN_MBOX ) );
1594
1595 ibdev->node_guid.dwords[0] = context_data->node_guid[0];
1596 ibdev->node_guid.dwords[1] = context_data->node_guid[1];
1597 ibdev->lid = be16_to_cpu( context_data->lid );
1598 ibdev->sm_lid = be16_to_cpu( context_data->sm_lid );
1599 ibdev->sm_sl = context_data->sm_sl;
1600 ibdev->port_state = context_data->port_state;
1601
1602 return 0;
1603 err_query_vport_context_cmd:
1604 DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
1605 return rc;
1606 }
1607
1608
1609 static int golan_query_vport_gid ( struct ib_device *ibdev ) {
1610 struct golan *golan = ib_get_drvdata( ibdev );
1611 struct golan_cmd_layout *cmd;
1612 struct golan_query_hca_vport_gid_inbox *in;
1613 union ib_gid *ib_gid;
1614 int rc;
1615
1616 cmd = write_cmd( golan, DEF_CMD_IDX, GOLAN_CMD_OP_QUERY_HCA_VPORT_GID,
1617 0x0, GEN_MBOX, GEN_MBOX,
1618 sizeof(struct golan_query_hca_vport_gid_inbox),
1619 sizeof(struct golan_query_hca_vport_gid_outbox) );
1620
1621 in = GOLAN_MBOX_IN ( cmd, in );
1622 in->port_num = (u8)ibdev->port;
1623 in->gid_index = 0;
1624 rc = send_command_and_wait ( golan, DEF_CMD_IDX, GEN_MBOX, GEN_MBOX, __FUNCTION__ );
1625 GOLAN_CHECK_RC_AND_CMD_STATUS( err_query_vport_gid_cmd );
1626
1627 ib_gid = (union ib_gid *)( GET_OUTBOX ( golan, GEN_MBOX ) );
1628
1629 memcpy ( &ibdev->gid, ib_gid, sizeof(ibdev->gid) );
1630
1631 return 0;
1632 err_query_vport_gid_cmd:
1633 DBGC ( golan, "%s [%d] out\n", __FUNCTION__, rc);
1634 return rc;
1635 }
1636
1637 static int golan_query_vport_pkey ( struct ib_device *ibdev ) {
1638 struct golan *golan = ib_get_drvdata ( ibdev );
1639 struct golan_cmd_layout *cmd;
1640 struct golan_query_hca_vport_pkey_inbox *in;
1641 int pkey_table_size_in_entries = (1 << (7 + golan->caps.pkey_table_size));
1642 int rc;
1643
1644 cmd = write_cmd ( golan, DEF_CMD_IDX, GOLAN_CMD_OP_QUERY_HCA_VPORT_PKEY,
1645 0x0, GEN_MBOX, GEN_MBOX,
1646 sizeof(struct golan_query_hca_vport_pkey_inbox),
1647 sizeof(struct golan_outbox_hdr) + 8 +
1648 sizeof(struct golan_query_hca_vport_pkey_data) * pkey_table_size_in_entries );
1649
1650 in = GOLAN_MBOX_IN ( cmd, in );
1651 in->port_num = (u8)ibdev->port;
1652 in->pkey_index = 0xffff;
1653 rc = send_command_and_wait ( golan, DEF_CMD_IDX, GEN_MBOX, GEN_MBOX, __FUNCTION__ );
1654 GOLAN_CHECK_RC_AND_CMD_STATUS( err_query_vport_pkey_cmd );
1655
1656 return 0;
1657 err_query_vport_pkey_cmd:
1658 DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
1659 return rc;
1660 }
1661
1662 static int golan_get_ib_info ( struct ib_device *ibdev ) {
1663 int rc;
1664
1665 rc = golan_query_vport_context ( ibdev );
1666 if ( rc != 0 ) {
1667 DBG ( "golan_get_ib_info: golan_query_vport_context Failed (rc = %d)\n",rc );
1668 goto err_query_vport_context;
1669 }
1670
1671 rc = golan_query_vport_gid ( ibdev );
1672 if ( rc != 0 ) {
1673 DBG ( "golan_get_ib_info: golan_query_vport_gid Failed (rc = %d)\n",rc );
1674 goto err_query_vport_gid;
1675 }
1676
1677 rc = golan_query_vport_pkey ( ibdev );
1678 if ( rc != 0 ) {
1679 DBG ( "golan_get_ib_info: golan_query_vport_pkey Failed (rc = %d)\n",rc );
1680 goto err_query_vport_pkey;
1681 }
1682 return rc;
1683 err_query_vport_pkey:
1684 err_query_vport_gid:
1685 err_query_vport_context:
1686 DBG ( "%s [%d] out\n", __FUNCTION__, rc);
1687 return rc;
1688 }
1689
1690 static int golan_complete(struct ib_device *ibdev,
1691 struct ib_completion_queue *cq,
1692 struct golan_cqe64 *cqe64)
1693 {
1694 struct golan *golan = ib_get_drvdata(ibdev);
1695 struct ib_work_queue *wq;
1696 struct golan_queue_pair *golan_qp;
1697 struct ib_queue_pair *qp;
1698 struct io_buffer *iobuf = NULL;
1699 struct ib_address_vector recv_dest;
1700 struct ib_address_vector recv_source;
1701 struct ib_global_route_header *grh;
1702 struct golan_err_cqe *err_cqe64;
1703 int gid_present, idx;
1704 u16 wqe_ctr;
1705 uint8_t opcode;
1706 static int error_state;
1707 uint32_t qpn = be32_to_cpu(cqe64->sop_drop_qpn) & 0xffffff;
1708 int is_send = 0;
1709 size_t len;
1710
1711 opcode = cqe64->op_own >> GOLAN_CQE_OPCODE_BIT;
1712 DBGC2( golan , "%s completion with opcode 0x%x\n", __FUNCTION__, opcode);
1713
1714 if (opcode == GOLAN_CQE_REQ || opcode == GOLAN_CQE_REQ_ERR) {
1715 is_send = 1;
1716 } else {
1717 is_send = 0;
1718 }
1719 if (opcode == GOLAN_CQE_REQ_ERR || opcode == GOLAN_CQE_RESP_ERR) {
1720 err_cqe64 = (struct golan_err_cqe *)cqe64;
1721 int i = 0;
1722 if (!error_state++) {
1723 DBGC (golan ,"\n");
1724 for ( i = 0 ; i < 16 ; i += 2 ) {
1725 DBGC (golan ,"%x %x\n",
1726 be32_to_cpu(((uint32_t *)(err_cqe64))[i]),
1727 be32_to_cpu(((uint32_t *)(err_cqe64))[i + 1]));
1728 }
1729 DBGC (golan ,"CQE with error: Syndrome(0x%x), VendorSynd(0x%x), HW_SYN(0x%x)\n",
1730 err_cqe64->syndrome, err_cqe64->vendor_err_synd,
1731 err_cqe64->hw_syndrom);
1732 }
1733 }
1734 /* Identify work queue */
1735 wq = ib_find_wq(cq, qpn, is_send);
1736 if (!wq) {
1737 DBGC (golan ,"%s unknown %s QPN 0x%x in CQN 0x%lx\n",
1738 __FUNCTION__, (is_send ? "send" : "recv"), qpn, cq->cqn);
1739 return -EINVAL;
1740 }
1741
1742 qp = wq->qp;
1743 golan_qp = ib_qp_get_drvdata ( qp );
1744
1745 wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
1746 if (is_send) {
1747 wqe_ctr &= ((GOLAN_WQEBBS_PER_SEND_UD_WQE * wq->num_wqes) - 1);
1748 idx = wqe_ctr / GOLAN_WQEBBS_PER_SEND_UD_WQE;
1749 } else {
1750 idx = wqe_ctr & (wq->num_wqes - 1);
1751 }
1752
1753 iobuf = wq->iobufs[idx];
1754 if (!iobuf) {
1755 DBGC (golan ,"%s IO Buffer 0x%x not found in QPN 0x%x\n",
1756 __FUNCTION__, idx, qpn);
1757 return -EINVAL;
1758 }
1759 wq->iobufs[idx] = NULL;
1760
1761 if (is_send) {
1762 ib_complete_send(ibdev, qp, iobuf, (opcode == GOLAN_CQE_REQ_ERR));
1763 } else {
1764 len = be32_to_cpu(cqe64->byte_cnt);
1765 memset(&recv_dest, 0, sizeof(recv_dest));
1766 recv_dest.qpn = qpn;
1767 /* Construct address vector */
1768 memset(&recv_source, 0, sizeof(recv_source));
1769 switch (qp->type) {
1770 case IB_QPT_SMI:
1771 case IB_QPT_GSI:
1772 case IB_QPT_UD:
1773 /* Locate corresponding GRH */
1774 assert ( golan_qp->rq.grh != NULL );
1775 grh = &golan_qp->rq.grh[ idx ];
1776
1777 recv_source.qpn = be32_to_cpu(cqe64->flags_rqpn) & 0xffffff;
1778 recv_source.lid = be16_to_cpu(cqe64->slid);
1779 recv_source.sl = (be32_to_cpu(cqe64->flags_rqpn) >> 24) & 0xf;
1780 gid_present = (be32_to_cpu(cqe64->flags_rqpn) >> 28) & 3;
1781 if (!gid_present) {
1782 recv_dest.gid_present = recv_source.gid_present = 0;
1783 } else {
1784 recv_dest.gid_present = recv_source.gid_present = 1;
1785 //if (recv_source.gid_present == 0x1) {
1786 memcpy(&recv_source.gid, &grh->sgid, sizeof(recv_source.gid));
1787 memcpy(&recv_dest.gid, &grh->dgid, sizeof(recv_dest.gid));
1788 //} else { // recv_source.gid_present = 0x3
1789 /* GRH is located in the upper 64 byte of the CQE128
1790 * currently not supported */
1791 //;
1792 //}
1793 }
1794 len -= sizeof ( *grh );
1795 break;
1796 case IB_QPT_RC:
1797 case IB_QPT_ETH:
1798 default:
1799 DBGC (golan ,"%s Unsupported QP type (0x%x)\n", __FUNCTION__, qp->type);
1800 return -EINVAL;
1801 }
1802 assert(len <= iob_tailroom(iobuf));
1803 iob_put(iobuf, len);
1804 ib_complete_recv(ibdev, qp, &recv_dest, &recv_source, iobuf, (opcode == GOLAN_CQE_RESP_ERR));
1805 }
1806 return 0;
1807 }
1808
1809 static int golan_is_hw_ownership(struct ib_completion_queue *cq,
1810 struct golan_cqe64 *cqe64)
1811 {
1812 return ((cqe64->op_own & GOLAN_CQE_OWNER_MASK) !=
1813 ((cq->next_idx >> ilog2(cq->num_cqes)) & 1));
1814 }
1815 static void golan_poll_cq(struct ib_device *ibdev,
1816 struct ib_completion_queue *cq)
1817 {
1818 unsigned int i;
1819 int rc = 0;
1820 unsigned int cqe_idx_mask;
1821 struct golan_cqe64 *cqe64;
1822 struct golan_completion_queue *golan_cq = ib_cq_get_drvdata(cq);
1823 struct golan *golan = ib_get_drvdata(ibdev);
1824
1825 for (i = 0; i < cq->num_cqes; ++i) {
1826 /* Look for completion entry */
1827 cqe_idx_mask = (cq->num_cqes - 1);
1828 cqe64 = &golan_cq->cqes[cq->next_idx & cqe_idx_mask];
1829 /* temporary valid only for 64 byte CQE */
1830 if (golan_is_hw_ownership(cq, cqe64) ||
1831 ((cqe64->op_own >> GOLAN_CQE_OPCODE_BIT) ==
1832 GOLAN_CQE_OPCODE_NOT_VALID)) {
1833 break; /* HW ownership */
1834 }
1835
1836 DBGC2( golan , "%s CQN 0x%lx [%ld] \n", __FUNCTION__, cq->cqn, cq->next_idx);
1837 /*
1838 * Make sure we read CQ entry contents after we've checked the
1839 * ownership bit. (PRM - 6.5.3.2)
1840 */
1841 rmb();
1842 rc = golan_complete(ibdev, cq, cqe64);
1843 if (rc != 0) {
1844 DBGC (golan ,"%s CQN 0x%lx failed to complete\n", __FUNCTION__, cq->cqn);
1845 }
1846
1847 /* Update completion queue's index */
1848 cq->next_idx++;
1849
1850 /* Update doorbell record */
1851 *(golan_cq->doorbell_record) = cpu_to_be32(cq->next_idx & 0xffffff);
1852 }
1853 }
1854
1855 static const char *golan_eqe_type_str(u8 type)
1856 {
1857 switch (type) {
1858 case GOLAN_EVENT_TYPE_COMP:
1859 return "GOLAN_EVENT_TYPE_COMP";
1860 case GOLAN_EVENT_TYPE_PATH_MIG:
1861 return "GOLAN_EVENT_TYPE_PATH_MIG";
1862 case GOLAN_EVENT_TYPE_COMM_EST:
1863 return "GOLAN_EVENT_TYPE_COMM_EST";
1864 case GOLAN_EVENT_TYPE_SQ_DRAINED:
1865 return "GOLAN_EVENT_TYPE_SQ_DRAINED";
1866 case GOLAN_EVENT_TYPE_SRQ_LAST_WQE:
1867 return "GOLAN_EVENT_TYPE_SRQ_LAST_WQE";
1868 case GOLAN_EVENT_TYPE_SRQ_RQ_LIMIT:
1869 return "GOLAN_EVENT_TYPE_SRQ_RQ_LIMIT";
1870 case GOLAN_EVENT_TYPE_CQ_ERROR:
1871 return "GOLAN_EVENT_TYPE_CQ_ERROR";
1872 case GOLAN_EVENT_TYPE_WQ_CATAS_ERROR:
1873 return "GOLAN_EVENT_TYPE_WQ_CATAS_ERROR";
1874 case GOLAN_EVENT_TYPE_PATH_MIG_FAILED:
1875 return "GOLAN_EVENT_TYPE_PATH_MIG_FAILED";
1876 case GOLAN_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
1877 return "GOLAN_EVENT_TYPE_WQ_INVAL_REQ_ERROR";
1878 case GOLAN_EVENT_TYPE_WQ_ACCESS_ERROR:
1879 return "GOLAN_EVENT_TYPE_WQ_ACCESS_ERROR";
1880 case GOLAN_EVENT_TYPE_SRQ_CATAS_ERROR:
1881 return "GOLAN_EVENT_TYPE_SRQ_CATAS_ERROR";
1882 case GOLAN_EVENT_TYPE_INTERNAL_ERROR:
1883 return "GOLAN_EVENT_TYPE_INTERNAL_ERROR";
1884 case GOLAN_EVENT_TYPE_PORT_CHANGE:
1885 return "GOLAN_EVENT_TYPE_PORT_CHANGE";
1886 case GOLAN_EVENT_TYPE_GPIO_EVENT:
1887 return "GOLAN_EVENT_TYPE_GPIO_EVENT";
1888 case GOLAN_EVENT_TYPE_REMOTE_CONFIG:
1889 return "GOLAN_EVENT_TYPE_REMOTE_CONFIG";
1890 case GOLAN_EVENT_TYPE_DB_BF_CONGESTION:
1891 return "GOLAN_EVENT_TYPE_DB_BF_CONGESTION";
1892 case GOLAN_EVENT_TYPE_STALL_EVENT:
1893 return "GOLAN_EVENT_TYPE_STALL_EVENT";
1894 case GOLAN_EVENT_TYPE_CMD:
1895 return "GOLAN_EVENT_TYPE_CMD";
1896 case GOLAN_EVENT_TYPE_PAGE_REQUEST:
1897 return "GOLAN_EVENT_TYPE_PAGE_REQUEST";
1898 default:
1899 return "Unrecognized event";
1900 }
1901 }
1902
1903 static const char *golan_eqe_port_subtype_str(u8 subtype)
1904 {
1905 switch (subtype) {
1906 case GOLAN_PORT_CHANGE_SUBTYPE_DOWN:
1907 return "GOLAN_PORT_CHANGE_SUBTYPE_DOWN";
1908 case GOLAN_PORT_CHANGE_SUBTYPE_ACTIVE:
1909 return "GOLAN_PORT_CHANGE_SUBTYPE_ACTIVE";
1910 case GOLAN_PORT_CHANGE_SUBTYPE_INITIALIZED:
1911 return "GOLAN_PORT_CHANGE_SUBTYPE_INITIALIZED";
1912 case GOLAN_PORT_CHANGE_SUBTYPE_LID:
1913 return "GOLAN_PORT_CHANGE_SUBTYPE_LID";
1914 case GOLAN_PORT_CHANGE_SUBTYPE_PKEY:
1915 return "GOLAN_PORT_CHANGE_SUBTYPE_PKEY";
1916 case GOLAN_PORT_CHANGE_SUBTYPE_GUID:
1917 return "GOLAN_PORT_CHANGE_SUBTYPE_GUID";
1918 case GOLAN_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
1919 return "GOLAN_PORT_CHANGE_SUBTYPE_CLIENT_REREG";
1920 default:
1921 return "Unrecognized event";
1922 }
1923 }
1924
1925 /**
1926 * Update Infiniband parameters using Commands
1927 *
1928 * @v ibdev Infiniband device
1929 * @ret rc Return status code
1930 */
1931 static int golan_ib_update ( struct ib_device *ibdev ) {
1932 int rc;
1933
1934 /* Get IB parameters */
1935 if ( ( rc = golan_get_ib_info ( ibdev ) ) != 0 )
1936 return rc;
1937
1938 /* Notify Infiniband core of potential link state change */
1939 ib_link_state_changed ( ibdev );
1940
1941 return 0;
1942 }
1943
1944 static inline void golan_handle_port_event(struct golan *golan, struct golan_eqe *eqe)
1945 {
1946 struct ib_device *ibdev;
1947 u8 port;
1948
1949 port = (eqe->data.port.port >> 4) & 0xf;
1950 ibdev = golan->ports[port - 1].ibdev;
1951
1952 if ( ! ib_is_open ( ibdev ) )
1953 return;
1954
1955 switch (eqe->sub_type) {
1956 case GOLAN_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
1957 case GOLAN_PORT_CHANGE_SUBTYPE_ACTIVE:
1958 golan_ib_update ( ibdev );
1959 /* Fall through */
1960 case GOLAN_PORT_CHANGE_SUBTYPE_DOWN:
1961 case GOLAN_PORT_CHANGE_SUBTYPE_LID:
1962 case GOLAN_PORT_CHANGE_SUBTYPE_PKEY:
1963 case GOLAN_PORT_CHANGE_SUBTYPE_GUID:
1964 case GOLAN_PORT_CHANGE_SUBTYPE_INITIALIZED:
1965 DBGC( golan , "%s event %s(%d) (sub event %s(%d))arrived on port %d\n",
1966 __FUNCTION__, golan_eqe_type_str(eqe->type), eqe->type,
1967 golan_eqe_port_subtype_str(eqe->sub_type),
1968 eqe->sub_type, port);
1969 break;
1970 default:
1971 DBGC (golan ,"%s Port event with unrecognized subtype: port %d, sub_type %d\n",
1972 __FUNCTION__, port, eqe->sub_type);
1973 }
1974 }
1975
1976 static struct golan_eqe *golan_next_eqe_sw(struct golan_event_queue *eq)
1977 {
1978 uint32_t entry = (eq->cons_index & (GOLAN_NUM_EQES - 1));
1979 struct golan_eqe *eqe = &(eq->eqes[entry]);
1980 return ((eqe->owner != ((eq->cons_index >> ilog2(GOLAN_NUM_EQES)) & 1)) ? NULL : eqe);
1981 }
1982
1983
1984 /**
1985 * Poll event queue
1986 *
1987 * @v ibdev Infiniband device
1988 */
1989 static void golan_poll_eq(struct ib_device *ibdev)
1990 {
1991 struct golan *golan = ib_get_drvdata(ibdev);
1992 struct golan_event_queue *eq = &(golan->eq);
1993 struct golan_eqe *eqe;
1994 u32 cqn;
1995 int counter = 0;
1996
1997 while ((eqe = golan_next_eqe_sw(eq)) && (counter < GOLAN_NUM_EQES)) {
1998 /*
1999 * Make sure we read EQ entry contents after we've
2000 * checked the ownership bit.
2001 */
2002 rmb();
2003
2004 DBGC( golan , "%s eqn %d, eqe type %s\n", __FUNCTION__, eq->eqn,
2005 golan_eqe_type_str(eqe->type));
2006 switch (eqe->type) {
2007 case GOLAN_EVENT_TYPE_COMP:
2008 /* We dont need to handle completion events since we
2009 * poll all the CQs after polling the EQ */
2010 break;
2011 case GOLAN_EVENT_TYPE_PATH_MIG:
2012 case GOLAN_EVENT_TYPE_COMM_EST:
2013 case GOLAN_EVENT_TYPE_SQ_DRAINED:
2014 case GOLAN_EVENT_TYPE_SRQ_LAST_WQE:
2015 case GOLAN_EVENT_TYPE_WQ_CATAS_ERROR:
2016 case GOLAN_EVENT_TYPE_PATH_MIG_FAILED:
2017 case GOLAN_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
2018 case GOLAN_EVENT_TYPE_WQ_ACCESS_ERROR:
2019 case GOLAN_EVENT_TYPE_SRQ_RQ_LIMIT:
2020 case GOLAN_EVENT_TYPE_SRQ_CATAS_ERROR:
2021 DBGC( golan , "%s event %s(%d) arrived\n", __FUNCTION__,
2022 golan_eqe_type_str(eqe->type), eqe->type);
2023 break;
2024 case GOLAN_EVENT_TYPE_CMD:
2025 // golan_cmd_comp_handler(be32_to_cpu(eqe->data.cmd.vector));
2026 break;
2027 case GOLAN_EVENT_TYPE_PORT_CHANGE:
2028 golan_handle_port_event(golan, eqe);
2029 break;
2030 case GOLAN_EVENT_TYPE_CQ_ERROR:
2031 cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
2032 DBGC (golan ,"CQ error on CQN 0x%x, syndrom 0x%x\n",
2033 cqn, eqe->data.cq_err.syndrome);
2034 // mlx5_cq_event(dev, cqn, eqe->type);
2035 break;
2036 /*
2037 * currently the driver do not support dynamic memory request
2038 * during FW run, a follow up change will allocate FW pages once and
2039 * never release them till driver shutdown, this change will not support
2040 * this request as currently this request is not issued anyway.
2041 case GOLAN_EVENT_TYPE_PAGE_REQUEST:
2042 {
2043 // we should check if we get this event while we
2044 // waiting for a command
2045 u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
2046 s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages);
2047
2048 DBGC (golan ,"%s page request for func 0x%x, napges %d\n",
2049 __FUNCTION__, func_id, npages);
2050 golan_provide_pages(golan, npages, func_id);
2051 }
2052 break;
2053 */
2054 default:
2055 DBGC (golan ,"%s Unhandled event 0x%x on EQ 0x%x\n", __FUNCTION__,
2056 eqe->type, eq->eqn);
2057 break;
2058 }
2059
2060 ++eq->cons_index;
2061 golan_eq_update_ci(eq, GOLAN_EQ_UNARMED);
2062 ++counter;
2063 }
2064 }
2065
2066 /**
2067 * Attach to multicast group
2068 *
2069 * @v ibdev Infiniband device
2070 * @v qp Queue pair
2071 * @v gid Multicast GID
2072 * @ret rc Return status code
2073 */
2074 static int golan_mcast_attach(struct ib_device *ibdev,
2075 struct ib_queue_pair *qp,
2076 union ib_gid *gid)
2077 {
2078 struct golan *golan = ib_get_drvdata(ibdev);
2079 struct golan_cmd_layout *cmd;
2080 int rc;
2081
2082 if ( qp == NULL ) {
2083 DBGC( golan, "%s: Invalid pointer, could not attach QPN to MCG\n",
2084 __FUNCTION__ );
2085 return -EFAULT;
2086 }
2087
2088 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_ATTACH_TO_MCG, 0x0,
2089 GEN_MBOX, NO_MBOX,
2090 sizeof(struct golan_attach_mcg_mbox_in),
2091 sizeof(struct golan_attach_mcg_mbox_out));
2092 ((struct golan_attach_mcg_mbox_in *)(cmd->in))->qpn = cpu_to_be32(qp->qpn);
2093
2094 memcpy(GET_INBOX(golan, GEN_MBOX), gid, sizeof(*gid));
2095
2096 rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
2097 GOLAN_CHECK_RC_AND_CMD_STATUS( err_attach_to_mcg_cmd );
2098
2099 DBGC( golan , "%s: QPN 0x%lx was attached to MCG\n", __FUNCTION__, qp->qpn);
2100 return 0;
2101 err_attach_to_mcg_cmd:
2102 DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
2103 return rc;
2104 }
2105
2106 /**
2107 * Detach from multicast group
2108 *
2109 * @v ibdev Infiniband device
2110 * @v qp Queue pair
2111 * @v gid Multicast GID
2112 * @ret rc Return status code
2113 */
2114 static void golan_mcast_detach(struct ib_device *ibdev,
2115 struct ib_queue_pair *qp,
2116 union ib_gid *gid)
2117 {
2118 struct golan *golan = ib_get_drvdata(ibdev);
2119 struct golan_cmd_layout *cmd;
2120 int rc;
2121
2122 cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DETACH_FROM_MCG, 0x0,
2123 GEN_MBOX, NO_MBOX,
2124 sizeof(struct golan_detach_mcg_mbox_in),
2125 sizeof(struct golan_detach_mcg_mbox_out));
2126 ((struct golan_detach_mcg_mbox_in *)(cmd->in))->qpn = cpu_to_be32(qp->qpn);
2127
2128 memcpy(GET_INBOX(golan, GEN_MBOX), gid, sizeof(*gid));
2129
2130 rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
2131 GOLAN_PRINT_RC_AND_CMD_STATUS;
2132
2133 DBGC( golan , "%s: QPN 0x%lx was detached from MCG\n", __FUNCTION__, qp->qpn);
2134 }
2135
2136 /**
2137 * Inform embedded subnet management agent of a received MAD
2138 *
2139 * @v ibdev Infiniband device
2140 * @v mad MAD
2141 * @ret rc Return status code
2142 */
2143 static int golan_inform_sma(struct ib_device *ibdev,
2144 union ib_mad *mad)
2145 {
2146 if (!ibdev || !mad) {
2147 return 1;
2148 }
2149
2150 return 0;
2151 }
2152
2153 static int golan_register_ibdev(struct golan_port *port)
2154 {
2155 struct ib_device *ibdev = port->ibdev;
2156 int rc;
2157
2158 golan_get_ib_info ( ibdev );
2159 /* Register Infiniband device */
2160 if ((rc = register_ibdev(ibdev)) != 0) {
2161 DBG ( "%s port %d could not register IB device: (rc = %d)\n",
2162 __FUNCTION__, ibdev->port, rc);
2163 return rc;
2164 }
2165
2166 port->netdev = ipoib_netdev( ibdev );
2167
2168 return 0;
2169 }
2170
2171 static inline void golan_bring_down(struct golan *golan)
2172 {
2173 DBGC(golan, "%s: start\n", __FUNCTION__);
2174
2175 if (~golan->flags & GOLAN_OPEN) {
2176 DBGC(golan, "%s: end (already closed)\n", __FUNCTION__);
2177 return;
2178 }
2179
2180 golan_destroy_mkey(golan);
2181 golan_dealloc_pd(golan);
2182 golan_destory_eq(golan);
2183 golan_dealloc_uar(golan);
2184 golan_teardown_hca(golan, GOLAN_TEARDOWN_GRACEFUL);
2185 golan_handle_pages(golan, GOLAN_REG_PAGES , GOLAN_PAGES_TAKE);
2186 golan_disable_hca(golan);
2187 golan_cmd_uninit(golan);
2188 golan->flags &= ~GOLAN_OPEN;
2189 DBGC(golan, "%s: end\n", __FUNCTION__);
2190 }
2191
2192 static int golan_set_link_speed ( struct golan *golan ){
2193 mlx_status status;
2194 int i = 0;
2195 int utils_inited = 0;
2196
2197 if ( ! golan->utils ) {
2198 utils_inited = 1;
2199 status = init_mlx_utils ( & golan->utils, golan->pci );
2200 MLX_CHECK_STATUS ( golan->pci, status, utils_init_err, "mlx_utils_init failed" );
2201 }
2202
2203 for ( i = 0; i < golan->caps.num_ports; ++i ) {
2204 status = mlx_set_link_speed ( golan->utils, i + 1, LINK_SPEED_IB, LINK_SPEED_SDR );
2205 MLX_CHECK_STATUS ( golan->pci, status, set_link_speed_err, "mlx_set_link_speed failed" );
2206 }
2207
2208 set_link_speed_err:
2209 if ( utils_inited )
2210 free_mlx_utils ( & golan->utils );
2211 utils_init_err:
2212 return status;
2213 }
2214
2215 static inline int golan_bring_up(struct golan *golan)
2216 {
2217 int rc = 0;
2218 DBGC(golan, "%s\n", __FUNCTION__);
2219
2220 if (golan->flags & GOLAN_OPEN)
2221 return 0;
2222
2223 if (( rc = golan_cmd_init(golan) ))
2224 goto out;
2225
2226 if (( rc = golan_core_enable_hca(golan) ))
2227 goto cmd_uninit;
2228
2229 /* Query for need for boot pages */
2230 if (( rc = golan_handle_pages(golan, GOLAN_BOOT_PAGES, GOLAN_PAGES_GIVE) ))
2231 goto disable;
2232
2233 if (( rc = golan_qry_hca_cap(golan) ))
2234 goto pages;
2235
2236 if (( rc = golan_set_hca_cap(golan) ))
2237 goto pages;
2238
2239 if (( rc = golan_handle_pages(golan, GOLAN_INIT_PAGES, GOLAN_PAGES_GIVE) ))
2240 goto pages;
2241
2242 if (( rc = golan_set_link_speed ( golan ) ))
2243 goto pages_teardown;
2244
2245 //Reg Init?
2246 if (( rc = golan_hca_init(golan) ))
2247 goto pages_2;
2248
2249 if (( rc = golan_alloc_uar(golan) ))
2250 goto teardown;
2251
2252 if (( rc = golan_create_eq(golan) ))
2253 goto de_uar;
2254
2255 if (( rc = golan_alloc_pd(golan) ))
2256 goto de_eq;
2257
2258 if (( rc = golan_create_mkey(golan) ))
2259 goto de_pd;
2260
2261 golan->flags |= GOLAN_OPEN;
2262 return 0;
2263
2264 golan_destroy_mkey(golan);
2265 de_pd:
2266 golan_dealloc_pd(golan);
2267 de_eq:
2268 golan_destory_eq(golan);
2269 de_uar:
2270 golan_dealloc_uar(golan);
2271 teardown:
2272 golan_teardown_hca(golan, GOLAN_TEARDOWN_GRACEFUL);
2273 pages_2:
2274 pages_teardown:
2275 golan_handle_pages(golan, GOLAN_INIT_PAGES, GOLAN_PAGES_TAKE);
2276 pages:
2277 golan_handle_pages(golan, GOLAN_BOOT_PAGES, GOLAN_PAGES_TAKE);
2278 disable:
2279 golan_disable_hca(golan);
2280 cmd_uninit:
2281 golan_cmd_uninit(golan);
2282 out:
2283 return rc;
2284 }
2285
2286 /**
2287 * Close Infiniband link
2288 *
2289 * @v ibdev Infiniband device
2290 */
2291 static void golan_ib_close ( struct ib_device *ibdev ) {
2292 struct golan *golan = NULL;
2293
2294 DBG ( "%s start\n", __FUNCTION__ );
2295 if ( ! ibdev )
2296 return;
2297 golan = ib_get_drvdata ( ibdev );
2298 golan_bring_down ( golan );
2299 DBG ( "%s end\n", __FUNCTION__ );
2300 }
2301
2302 /**
2303 * Initialise Infiniband link
2304 *
2305 * @v ibdev Infiniband device
2306 * @ret rc Return status code
2307 */
2308 static int golan_ib_open ( struct ib_device *ibdev ) {
2309 struct golan *golan = NULL;
2310 DBG ( "%s start\n", __FUNCTION__ );
2311
2312 if ( ! ibdev )
2313 return -EINVAL;
2314 golan = ib_get_drvdata ( ibdev );
2315 golan_bring_up ( golan );
2316 golan_ib_update ( ibdev );
2317
2318 DBG ( "%s end\n", __FUNCTION__ );
2319 return 0;
2320 }
2321
2322 /** Golan Infiniband operations */
2323 static struct ib_device_operations golan_ib_operations = {
2324 .create_cq = golan_create_cq,
2325 .destroy_cq = golan_destroy_cq,
2326 .create_qp = golan_create_qp,
2327 .modify_qp = golan_modify_qp,
2328 .destroy_qp = golan_destroy_qp,
2329 .post_send = golan_post_send,
2330 .post_recv = golan_post_recv,
2331 .poll_cq = golan_poll_cq,
2332 .poll_eq = golan_poll_eq,
2333 .open = golan_ib_open,
2334 .close = golan_ib_close,
2335 .mcast_attach = golan_mcast_attach,
2336 .mcast_detach = golan_mcast_detach,
2337 .set_port_info = golan_inform_sma,
2338 .set_pkey_table = golan_inform_sma,
2339 };
2340
2341 static int golan_probe_normal ( struct pci_device *pci ) {
2342 struct golan *golan;
2343 struct ib_device *ibdev;
2344 struct golan_port *port;
2345 int i;
2346 int rc = 0;
2347
2348 golan = golan_alloc();
2349 if ( !golan ) {
2350 rc = -ENOMEM;
2351 goto err_golan_alloc;
2352 }
2353
2354 /* at POST stage some BIOSes have limited available dynamic memory */
2355 if ( golan_init_fw_areas ( golan ) ) {
2356 rc = -ENOMEM;
2357 goto err_golan_golan_init_pages;
2358 }
2359
2360 /* Setup PCI bus and HCA BAR */
2361 pci_set_drvdata( pci, golan );
2362 golan->pci = pci;
2363 golan_pci_init( golan );
2364 /* config command queues */
2365 if ( golan_bring_up( golan ) ) {
2366 DBGC (golan ,"golan bringup failed\n");
2367 rc = -1;
2368 goto err_golan_bringup;
2369 }
2370
2371 if ( ! DEVICE_IS_CIB ( pci->device ) ) {
2372 if ( init_mlx_utils ( & golan->utils, pci ) ) {
2373 rc = -1;
2374 goto err_utils_init;
2375 }
2376 }
2377 /* Allocate Infiniband devices */
2378 for (i = 0; i < golan->caps.num_ports; ++i) {
2379 ibdev = alloc_ibdev( 0 );
2380 if ( !ibdev ) {
2381 rc = -ENOMEM;
2382 goto err_golan_probe_alloc_ibdev;
2383 }
2384 golan->ports[i].ibdev = ibdev;
2385 golan->ports[i].vep_number = 0;
2386 ibdev->op = &golan_ib_operations;
2387 ibdev->dev = &pci->dev;
2388 ibdev->port = (GOLAN_PORT_BASE + i);
2389 ib_set_drvdata( ibdev, golan );
2390 }
2391
2392 /* Register devices */
2393 for ( i = 0; i < golan->caps.num_ports; ++i ) {
2394 port = &golan->ports[i];
2395 if ((rc = golan_register_ibdev ( port ) ) != 0 ) {
2396 goto err_golan_probe_register_ibdev;
2397 }
2398 }
2399
2400 golan_bring_down ( golan );
2401
2402 return 0;
2403
2404 i = golan->caps.num_ports;
2405 err_golan_probe_register_ibdev:
2406 for ( i-- ; ( signed int ) i >= 0 ; i-- )
2407 unregister_ibdev ( golan->ports[i].ibdev );
2408
2409 i = golan->caps.num_ports;
2410 err_golan_probe_alloc_ibdev:
2411 for ( i-- ; ( signed int ) i >= 0 ; i-- )
2412 ibdev_put ( golan->ports[i].ibdev );
2413 if ( ! DEVICE_IS_CIB ( pci->device ) ) {
2414 free_mlx_utils ( & golan->utils );
2415 }
2416 err_utils_init:
2417 golan_bring_down ( golan );
2418 err_golan_bringup:
2419 iounmap( golan->iseg );
2420 golan_free_fw_areas ( golan );
2421 err_golan_golan_init_pages:
2422 free ( golan );
2423 err_golan_alloc:
2424 DBGC (golan ,"%s rc = %d\n", __FUNCTION__, rc);
2425 return rc;
2426 }
2427
2428 static void golan_remove_normal ( struct pci_device *pci ) {
2429 struct golan *golan = pci_get_drvdata(pci);
2430 struct golan_port *port;
2431 int i;
2432
2433 DBGC(golan, "%s\n", __FUNCTION__);
2434
2435 for ( i = ( golan->caps.num_ports - 1 ) ; i >= 0 ; i-- ) {
2436 port = &golan->ports[i];
2437 unregister_ibdev ( port->ibdev );
2438 }
2439 for ( i = ( golan->caps.num_ports - 1 ) ; i >= 0 ; i-- ) {
2440 netdev_nullify ( golan->ports[i].netdev );
2441 }
2442 for ( i = ( golan->caps.num_ports - 1 ) ; i >= 0 ; i-- ) {
2443 ibdev_put ( golan->ports[i].ibdev );
2444 }
2445 if ( ! DEVICE_IS_CIB ( pci->device ) ) {
2446 free_mlx_utils ( & golan->utils );
2447 }
2448 iounmap( golan->iseg );
2449 golan_free_fw_areas ( golan );
2450 free(golan);
2451 }
2452
2453 /***************************************************************************
2454 * NODNIC operations
2455 **************************************************************************/
2456 static mlx_status shomron_tx_uar_send_db ( struct ib_device *ibdev,
2457 struct nodnic_send_wqbb *wqbb ) {
2458 mlx_status status = MLX_SUCCESS;
2459 struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
2460 struct shomron_nodnic_eth_send_wqe *eth_wqe =
2461 ( struct shomron_nodnic_eth_send_wqe * )wqbb;
2462 struct shomronprm_wqe_segment_ctrl_send *ctrl;
2463
2464 if ( ! eth_wqe || ! flexboot_nodnic->device_priv.uar.virt ) {
2465 DBG("%s: Invalid parameters\n",__FUNCTION__);
2466 status = MLX_FAILED;
2467 goto err;
2468 }
2469 wmb();
2470 ctrl = & eth_wqe->ctrl;
2471 writeq(*((__be64 *)ctrl), flexboot_nodnic->device_priv.uar.virt +
2472 ( ( MLX_GET ( ctrl, wqe_index ) & 0x1 ) ? DB_BUFFER0_ODD_OFFSET
2473 : DB_BUFFER0_EVEN_OFFSET ) );
2474 err:
2475 return status;
2476 }
2477
2478 static mlx_status shomron_fill_eth_send_wqe ( struct ib_device *ibdev,
2479 struct ib_queue_pair *qp, struct ib_address_vector *av __unused,
2480 struct io_buffer *iobuf, struct nodnic_send_wqbb *wqbb,
2481 unsigned long wqe_index ) {
2482 mlx_status status = MLX_SUCCESS;
2483 struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
2484 struct shomron_nodnic_eth_send_wqe *eth_wqe = NULL;
2485 struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1];
2486 struct flexboot_nodnic_queue_pair *flexboot_nodnic_qp =
2487 ib_qp_get_drvdata ( qp );
2488 nodnic_qp *nodnic_qp = flexboot_nodnic_qp->nodnic_queue_pair;
2489 struct nodnic_send_ring *send_ring = &nodnic_qp->send;
2490 mlx_uint32 qpn = 0;
2491
2492 eth_wqe = (struct shomron_nodnic_eth_send_wqe *)wqbb;
2493 memset ( ( ( ( void * ) eth_wqe ) ), 0,
2494 ( sizeof ( *eth_wqe ) ) );
2495
2496 status = nodnic_port_get_qpn(&port->port_priv, &send_ring->nodnic_ring,
2497 &qpn);
2498 if ( status != MLX_SUCCESS ) {
2499 DBG("nodnic_port_get_qpn failed\n");
2500 goto err;
2501 }
2502
2503 #define SHOMRON_GENERATE_CQE 0x3
2504 #define SHOMRON_INLINE_HEADERS_SIZE 18
2505 #define SHOMRON_INLINE_HEADERS_OFFSET 32
2506 MLX_FILL_2 ( &eth_wqe->ctrl, 0, opcode, FLEXBOOT_NODNIC_OPCODE_SEND,
2507 wqe_index, wqe_index & 0xFFFF);
2508 MLX_FILL_2 ( &eth_wqe->ctrl, 1, ds, 0x4 , qpn, qpn );
2509 MLX_FILL_1 ( &eth_wqe->ctrl, 2,
2510 ce, SHOMRON_GENERATE_CQE /* generate completion */
2511 );
2512 MLX_FILL_2 ( &eth_wqe->ctrl, 7,
2513 inline_headers1,
2514 cpu_to_be16(*(mlx_uint16 *)iobuf->data),
2515 inline_headers_size, SHOMRON_INLINE_HEADERS_SIZE
2516 );
2517 memcpy((void *)&eth_wqe->ctrl + SHOMRON_INLINE_HEADERS_OFFSET,
2518 iobuf->data + 2, SHOMRON_INLINE_HEADERS_SIZE - 2);
2519 iob_pull(iobuf, SHOMRON_INLINE_HEADERS_SIZE);
2520 MLX_FILL_1 ( &eth_wqe->data[0], 0,
2521 byte_count, iob_len ( iobuf ) );
2522 MLX_FILL_1 ( &eth_wqe->data[0], 1, l_key,
2523 flexboot_nodnic->device_priv.lkey );
2524 MLX_FILL_H ( &eth_wqe->data[0], 2,
2525 local_address_h, virt_to_bus ( iobuf->data ) );
2526 MLX_FILL_1 ( &eth_wqe->data[0], 3,
2527 local_address_l, virt_to_bus ( iobuf->data ) );
2528 err:
2529 return status;
2530 }
2531
2532 static mlx_status shomron_fill_completion( void *cqe, struct cqe_data *cqe_data ) {
2533 union shomronprm_completion_entry *cq_entry;
2534 uint32_t opcode;
2535
2536 cq_entry = (union shomronprm_completion_entry *)cqe;
2537 cqe_data->owner = MLX_GET ( &cq_entry->normal, owner );
2538 opcode = MLX_GET ( &cq_entry->normal, opcode );
2539 #define FLEXBOOT_NODNIC_OPCODE_CQ_SEND 0
2540 #define FLEXBOOT_NODNIC_OPCODE_CQ_RECV 2
2541 #define FLEXBOOT_NODNIC_OPCODE_CQ_SEND_ERR 13
2542 #define FLEXBOOT_NODNIC_OPCODE_CQ_RECV_ERR 14
2543 cqe_data->is_error =
2544 ( opcode >= FLEXBOOT_NODNIC_OPCODE_CQ_RECV_ERR);
2545 if ( cqe_data->is_error ) {
2546 cqe_data->syndrome = MLX_GET ( &cq_entry->error, syndrome );
2547 cqe_data->vendor_err_syndrome =
2548 MLX_GET ( &cq_entry->error, vendor_error_syndrome );
2549 cqe_data->is_send =
2550 (opcode == FLEXBOOT_NODNIC_OPCODE_CQ_SEND_ERR);
2551 } else {
2552 cqe_data->is_send =
2553 (opcode == FLEXBOOT_NODNIC_OPCODE_CQ_SEND);
2554 cqe_data->wqe_counter = MLX_GET ( &cq_entry->normal, wqe_counter );
2555 cqe_data->byte_cnt = MLX_GET ( &cq_entry->normal, byte_cnt );
2556
2557 }
2558 if ( cqe_data->is_send == TRUE )
2559 cqe_data->qpn = MLX_GET ( &cq_entry->normal, qpn );
2560 else
2561 cqe_data->qpn = MLX_GET ( &cq_entry->normal, srqn );
2562
2563 return 0;
2564 }
2565
2566 static mlx_status shomron_cqe_set_owner ( void *cq, unsigned int num_cqes ) {
2567 unsigned int i = 0;
2568 union shomronprm_completion_entry *cq_list;
2569
2570 cq_list = (union shomronprm_completion_entry *)cq;
2571 for ( ; i < num_cqes ; i++ )
2572 MLX_FILL_1 ( &cq_list[i].normal, 15, owner, 1 );
2573 return 0;
2574 }
2575
2576 static mlx_size shomron_get_cqe_size () {
2577 return sizeof ( union shomronprm_completion_entry );
2578 }
2579
2580 struct flexboot_nodnic_callbacks shomron_nodnic_callbacks = {
2581 .get_cqe_size = shomron_get_cqe_size,
2582 .fill_send_wqe[IB_QPT_ETH] = shomron_fill_eth_send_wqe,
2583 .fill_completion = shomron_fill_completion,
2584 .cqe_set_owner = shomron_cqe_set_owner,
2585 .irq = flexboot_nodnic_eth_irq,
2586 .tx_uar_send_doorbell_fn = shomron_tx_uar_send_db,
2587 };
2588
2589 static int shomron_nodnic_is_supported ( struct pci_device *pci ) {
2590 if ( DEVICE_IS_CIB ( pci->device ) )
2591 return 0;
2592
2593 return flexboot_nodnic_is_supported ( pci );
2594 }
2595 /**************************************************************************/
2596
2597 static int golan_probe ( struct pci_device *pci ) {
2598 int rc = -ENOTSUP;
2599
2600 DBG ( "%s: start\n", __FUNCTION__ );
2601
2602 if ( ! pci ) {
2603 DBG ( "%s: PCI is NULL\n", __FUNCTION__ );
2604 rc = -EINVAL;
2605 goto probe_done;
2606 }
2607
2608 if ( shomron_nodnic_is_supported ( pci ) ) {
2609 DBG ( "%s: Using NODNIC driver\n", __FUNCTION__ );
2610 rc = flexboot_nodnic_probe ( pci, &shomron_nodnic_callbacks, NULL );
2611 } else {
2612 DBG ( "%s: Using normal driver\n", __FUNCTION__ );
2613 rc = golan_probe_normal ( pci );
2614 }
2615
2616 probe_done:
2617 DBG ( "%s: rc = %d\n", __FUNCTION__, rc );
2618 return rc;
2619 }
2620
2621 static void golan_remove ( struct pci_device *pci ) {
2622 DBG ( "%s: start\n", __FUNCTION__ );
2623
2624 if ( ! shomron_nodnic_is_supported ( pci ) ) {
2625 DBG ( "%s: Using normal driver remove\n", __FUNCTION__ );
2626 golan_remove_normal ( pci );
2627 return;
2628 }
2629
2630 DBG ( "%s: Using NODNIC driver remove\n", __FUNCTION__ );
2631
2632 flexboot_nodnic_remove ( pci );
2633
2634 DBG ( "%s: end\n", __FUNCTION__ );
2635 }
2636
2637 static struct pci_device_id golan_nics[] = {
2638 PCI_ROM ( 0x15b3, 0x1011, "ConnectIB", "ConnectIB HCA driver: DevID 4113", 0 ),
2639 PCI_ROM ( 0x15b3, 0x1013, "ConnectX-4", "ConnectX-4 HCA driver, DevID 4115", 0 ),
2640 PCI_ROM ( 0x15b3, 0x1015, "ConnectX-4Lx", "ConnectX-4Lx HCA driver, DevID 4117", 0 ),
2641 PCI_ROM ( 0x15b3, 0x1017, "ConnectX-5", "ConnectX-5 HCA driver, DevID 4119", 0 ),
2642 PCI_ROM ( 0x15b3, 0x1019, "ConnectX-5EX", "ConnectX-5EX HCA driver, DevID 4121", 0 ),
2643 };
2644
2645 struct pci_driver golan_driver __pci_driver = {
2646 .ids = golan_nics,
2647 .id_count = (sizeof(golan_nics) / sizeof(golan_nics[0])),
2648 .probe = golan_probe,
2649 .remove = golan_remove,
2650 };