2 * QEMU paravirtual RDMA - Command channel
4 * Copyright (C) 2018 Oracle
5 * Copyright (C) 2018 Red Hat Inc
8 * Yuval Shaia <yuval.shaia@oracle.com>
9 * Marcel Apfelbaum <marcel@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include "qemu/osdep.h"
18 #include "hw/pci/pci.h"
19 #include "hw/pci/pci_ids.h"
21 #include "../rdma_backend.h"
22 #include "../rdma_rm.h"
23 #include "../rdma_utils.h"
27 #include "standard-headers/rdma/vmw_pvrdma-abi.h"
29 static void *pvrdma_map_to_pdir(PCIDevice
*pdev
, uint64_t pdir_dma
,
30 uint32_t nchunks
, size_t length
)
33 int tbl_idx
, dir_idx
, addr_idx
;
34 void *host_virt
= NULL
, *curr_page
;
37 rdma_error_report("Got nchunks=0");
41 length
= ROUND_UP(length
, TARGET_PAGE_SIZE
);
42 if (nchunks
* TARGET_PAGE_SIZE
!= length
) {
43 rdma_error_report("Invalid nchunks/length (%u, %lu)", nchunks
,
44 (unsigned long)length
);
48 dir
= rdma_pci_dma_map(pdev
, pdir_dma
, TARGET_PAGE_SIZE
);
50 rdma_error_report("Failed to map to page directory");
54 tbl
= rdma_pci_dma_map(pdev
, dir
[0], TARGET_PAGE_SIZE
);
56 rdma_error_report("Failed to map to page table 0");
60 curr_page
= rdma_pci_dma_map(pdev
, (dma_addr_t
)tbl
[0], TARGET_PAGE_SIZE
);
62 rdma_error_report("Failed to map the page 0");
66 host_virt
= mremap(curr_page
, 0, length
, MREMAP_MAYMOVE
);
67 if (host_virt
== MAP_FAILED
) {
69 rdma_error_report("Failed to remap memory for host_virt");
72 trace_pvrdma_map_to_pdir_host_virt(curr_page
, host_virt
);
74 rdma_pci_dma_unmap(pdev
, curr_page
, TARGET_PAGE_SIZE
);
79 while (addr_idx
< nchunks
) {
80 if (tbl_idx
== TARGET_PAGE_SIZE
/ sizeof(uint64_t)) {
83 rdma_pci_dma_unmap(pdev
, tbl
, TARGET_PAGE_SIZE
);
84 tbl
= rdma_pci_dma_map(pdev
, dir
[dir_idx
], TARGET_PAGE_SIZE
);
86 rdma_error_report("Failed to map to page table %d", dir_idx
);
87 goto out_unmap_host_virt
;
91 curr_page
= rdma_pci_dma_map(pdev
, (dma_addr_t
)tbl
[tbl_idx
],
94 rdma_error_report("Failed to map to page %d, dir %d", tbl_idx
,
96 goto out_unmap_host_virt
;
99 mremap(curr_page
, 0, TARGET_PAGE_SIZE
, MREMAP_MAYMOVE
| MREMAP_FIXED
,
100 host_virt
+ TARGET_PAGE_SIZE
* addr_idx
);
102 trace_pvrdma_map_to_pdir_next_page(addr_idx
, curr_page
, host_virt
+
103 TARGET_PAGE_SIZE
* addr_idx
);
105 rdma_pci_dma_unmap(pdev
, curr_page
, TARGET_PAGE_SIZE
);
115 munmap(host_virt
, length
);
119 rdma_pci_dma_unmap(pdev
, tbl
, TARGET_PAGE_SIZE
);
122 rdma_pci_dma_unmap(pdev
, dir
, TARGET_PAGE_SIZE
);
127 static int query_port(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
128 union pvrdma_cmd_resp
*rsp
)
130 struct pvrdma_cmd_query_port
*cmd
= &req
->query_port
;
131 struct pvrdma_cmd_query_port_resp
*resp
= &rsp
->query_port_resp
;
132 struct pvrdma_port_attr attrs
= {};
134 if (cmd
->port_num
> MAX_PORTS
) {
138 if (rdma_backend_query_port(&dev
->backend_dev
,
139 (struct ibv_port_attr
*)&attrs
)) {
143 memset(resp
, 0, sizeof(*resp
));
145 resp
->attrs
.state
= dev
->func0
->device_active ? attrs
.state
:
147 resp
->attrs
.max_mtu
= attrs
.max_mtu
;
148 resp
->attrs
.active_mtu
= attrs
.active_mtu
;
149 resp
->attrs
.phys_state
= attrs
.phys_state
;
150 resp
->attrs
.gid_tbl_len
= MIN(MAX_PORT_GIDS
, attrs
.gid_tbl_len
);
151 resp
->attrs
.max_msg_sz
= 1024;
152 resp
->attrs
.pkey_tbl_len
= MIN(MAX_PORT_PKEYS
, attrs
.pkey_tbl_len
);
153 resp
->attrs
.active_width
= 1;
154 resp
->attrs
.active_speed
= 1;
159 static int query_pkey(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
160 union pvrdma_cmd_resp
*rsp
)
162 struct pvrdma_cmd_query_pkey
*cmd
= &req
->query_pkey
;
163 struct pvrdma_cmd_query_pkey_resp
*resp
= &rsp
->query_pkey_resp
;
165 if (cmd
->port_num
> MAX_PORTS
) {
169 if (cmd
->index
> MAX_PKEYS
) {
173 memset(resp
, 0, sizeof(*resp
));
175 resp
->pkey
= PVRDMA_PKEY
;
180 static int create_pd(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
181 union pvrdma_cmd_resp
*rsp
)
183 struct pvrdma_cmd_create_pd
*cmd
= &req
->create_pd
;
184 struct pvrdma_cmd_create_pd_resp
*resp
= &rsp
->create_pd_resp
;
187 memset(resp
, 0, sizeof(*resp
));
188 rc
= rdma_rm_alloc_pd(&dev
->rdma_dev_res
, &dev
->backend_dev
,
189 &resp
->pd_handle
, cmd
->ctx_handle
);
194 static int destroy_pd(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
195 union pvrdma_cmd_resp
*rsp
)
197 struct pvrdma_cmd_destroy_pd
*cmd
= &req
->destroy_pd
;
199 rdma_rm_dealloc_pd(&dev
->rdma_dev_res
, cmd
->pd_handle
);
204 static int create_mr(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
205 union pvrdma_cmd_resp
*rsp
)
207 struct pvrdma_cmd_create_mr
*cmd
= &req
->create_mr
;
208 struct pvrdma_cmd_create_mr_resp
*resp
= &rsp
->create_mr_resp
;
209 PCIDevice
*pci_dev
= PCI_DEVICE(dev
);
210 void *host_virt
= NULL
;
213 memset(resp
, 0, sizeof(*resp
));
215 if (!(cmd
->flags
& PVRDMA_MR_FLAG_DMA
)) {
216 host_virt
= pvrdma_map_to_pdir(pci_dev
, cmd
->pdir_dma
, cmd
->nchunks
,
219 rdma_error_report("Failed to map to pdir");
224 rc
= rdma_rm_alloc_mr(&dev
->rdma_dev_res
, cmd
->pd_handle
, cmd
->start
,
225 cmd
->length
, host_virt
, cmd
->access_flags
,
226 &resp
->mr_handle
, &resp
->lkey
, &resp
->rkey
);
227 if (rc
&& host_virt
) {
228 munmap(host_virt
, cmd
->length
);
234 static int destroy_mr(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
235 union pvrdma_cmd_resp
*rsp
)
237 struct pvrdma_cmd_destroy_mr
*cmd
= &req
->destroy_mr
;
239 rdma_rm_dealloc_mr(&dev
->rdma_dev_res
, cmd
->mr_handle
);
244 static int create_cq_ring(PCIDevice
*pci_dev
, PvrdmaRing
**ring
,
245 uint64_t pdir_dma
, uint32_t nchunks
, uint32_t cqe
)
247 uint64_t *dir
= NULL
, *tbl
= NULL
;
250 char ring_name
[MAX_RING_NAME_SZ
];
252 if (!nchunks
|| nchunks
> PVRDMA_MAX_FAST_REG_PAGES
) {
253 rdma_error_report("Got invalid nchunks: %d", nchunks
);
257 dir
= rdma_pci_dma_map(pci_dev
, pdir_dma
, TARGET_PAGE_SIZE
);
259 rdma_error_report("Failed to map to CQ page directory");
263 tbl
= rdma_pci_dma_map(pci_dev
, dir
[0], TARGET_PAGE_SIZE
);
265 rdma_error_report("Failed to map to CQ page table");
269 r
= g_malloc(sizeof(*r
));
272 r
->ring_state
= (PvrdmaRingState
*)
273 rdma_pci_dma_map(pci_dev
, tbl
[0], TARGET_PAGE_SIZE
);
275 if (!r
->ring_state
) {
276 rdma_error_report("Failed to map to CQ ring state");
280 sprintf(ring_name
, "cq_ring_%" PRIx64
, pdir_dma
);
281 rc
= pvrdma_ring_init(r
, ring_name
, pci_dev
, &r
->ring_state
[1],
282 cqe
, sizeof(struct pvrdma_cqe
),
283 /* first page is ring state */
284 (dma_addr_t
*)&tbl
[1], nchunks
- 1);
286 goto out_unmap_ring_state
;
291 out_unmap_ring_state
:
292 /* ring_state was in slot 1, not 0 so need to jump back */
293 rdma_pci_dma_unmap(pci_dev
, --r
->ring_state
, TARGET_PAGE_SIZE
);
299 rdma_pci_dma_unmap(pci_dev
, tbl
, TARGET_PAGE_SIZE
);
300 rdma_pci_dma_unmap(pci_dev
, dir
, TARGET_PAGE_SIZE
);
305 static void destroy_cq_ring(PvrdmaRing
*ring
)
307 pvrdma_ring_free(ring
);
308 /* ring_state was in slot 1, not 0 so need to jump back */
309 rdma_pci_dma_unmap(ring
->dev
, --ring
->ring_state
, TARGET_PAGE_SIZE
);
313 static int create_cq(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
314 union pvrdma_cmd_resp
*rsp
)
316 struct pvrdma_cmd_create_cq
*cmd
= &req
->create_cq
;
317 struct pvrdma_cmd_create_cq_resp
*resp
= &rsp
->create_cq_resp
;
318 PvrdmaRing
*ring
= NULL
;
321 memset(resp
, 0, sizeof(*resp
));
323 resp
->cqe
= cmd
->cqe
;
325 rc
= create_cq_ring(PCI_DEVICE(dev
), &ring
, cmd
->pdir_dma
, cmd
->nchunks
,
331 rc
= rdma_rm_alloc_cq(&dev
->rdma_dev_res
, &dev
->backend_dev
, cmd
->cqe
,
332 &resp
->cq_handle
, ring
);
334 destroy_cq_ring(ring
);
337 resp
->cqe
= cmd
->cqe
;
342 static int destroy_cq(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
343 union pvrdma_cmd_resp
*rsp
)
345 struct pvrdma_cmd_destroy_cq
*cmd
= &req
->destroy_cq
;
349 cq
= rdma_rm_get_cq(&dev
->rdma_dev_res
, cmd
->cq_handle
);
351 rdma_error_report("Got invalid CQ handle");
355 ring
= (PvrdmaRing
*)cq
->opaque
;
356 destroy_cq_ring(ring
);
358 rdma_rm_dealloc_cq(&dev
->rdma_dev_res
, cmd
->cq_handle
);
363 static int create_qp_rings(PCIDevice
*pci_dev
, uint64_t pdir_dma
,
364 PvrdmaRing
**rings
, uint32_t scqe
, uint32_t smax_sge
,
365 uint32_t spages
, uint32_t rcqe
, uint32_t rmax_sge
,
366 uint32_t rpages
, uint8_t is_srq
)
368 uint64_t *dir
= NULL
, *tbl
= NULL
;
371 char ring_name
[MAX_RING_NAME_SZ
];
374 if (!spages
|| spages
> PVRDMA_MAX_FAST_REG_PAGES
) {
375 rdma_error_report("Got invalid send page count for QP ring: %d",
380 if (!is_srq
&& (!rpages
|| rpages
> PVRDMA_MAX_FAST_REG_PAGES
)) {
381 rdma_error_report("Got invalid recv page count for QP ring: %d",
386 dir
= rdma_pci_dma_map(pci_dev
, pdir_dma
, TARGET_PAGE_SIZE
);
388 rdma_error_report("Failed to map to QP page directory");
392 tbl
= rdma_pci_dma_map(pci_dev
, dir
[0], TARGET_PAGE_SIZE
);
394 rdma_error_report("Failed to map to QP page table");
399 sr
= g_malloc(2 * sizeof(*rr
));
402 sr
= g_malloc(sizeof(*sr
));
407 /* Create send ring */
408 sr
->ring_state
= (PvrdmaRingState
*)
409 rdma_pci_dma_map(pci_dev
, tbl
[0], TARGET_PAGE_SIZE
);
410 if (!sr
->ring_state
) {
411 rdma_error_report("Failed to map to QP ring state");
412 goto out_free_sr_mem
;
415 wqe_sz
= pow2ceil(sizeof(struct pvrdma_sq_wqe_hdr
) +
416 sizeof(struct pvrdma_sge
) * smax_sge
- 1);
418 sprintf(ring_name
, "qp_sring_%" PRIx64
, pdir_dma
);
419 rc
= pvrdma_ring_init(sr
, ring_name
, pci_dev
, sr
->ring_state
,
420 scqe
, wqe_sz
, (dma_addr_t
*)&tbl
[1], spages
);
422 goto out_unmap_ring_state
;
426 /* Create recv ring */
427 rr
->ring_state
= &sr
->ring_state
[1];
428 wqe_sz
= pow2ceil(sizeof(struct pvrdma_rq_wqe_hdr
) +
429 sizeof(struct pvrdma_sge
) * rmax_sge
- 1);
430 sprintf(ring_name
, "qp_rring_%" PRIx64
, pdir_dma
);
431 rc
= pvrdma_ring_init(rr
, ring_name
, pci_dev
, rr
->ring_state
,
432 rcqe
, wqe_sz
, (dma_addr_t
*)&tbl
[1 + spages
],
442 pvrdma_ring_free(sr
);
444 out_unmap_ring_state
:
445 rdma_pci_dma_unmap(pci_dev
, sr
->ring_state
, TARGET_PAGE_SIZE
);
451 rdma_pci_dma_unmap(pci_dev
, tbl
, TARGET_PAGE_SIZE
);
452 rdma_pci_dma_unmap(pci_dev
, dir
, TARGET_PAGE_SIZE
);
457 static void destroy_qp_rings(PvrdmaRing
*ring
, uint8_t is_srq
)
459 pvrdma_ring_free(&ring
[0]);
461 pvrdma_ring_free(&ring
[1]);
464 rdma_pci_dma_unmap(ring
->dev
, ring
->ring_state
, TARGET_PAGE_SIZE
);
468 static int create_qp(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
469 union pvrdma_cmd_resp
*rsp
)
471 struct pvrdma_cmd_create_qp
*cmd
= &req
->create_qp
;
472 struct pvrdma_cmd_create_qp_resp
*resp
= &rsp
->create_qp_resp
;
473 PvrdmaRing
*rings
= NULL
;
476 memset(resp
, 0, sizeof(*resp
));
478 rc
= create_qp_rings(PCI_DEVICE(dev
), cmd
->pdir_dma
, &rings
,
479 cmd
->max_send_wr
, cmd
->max_send_sge
, cmd
->send_chunks
,
480 cmd
->max_recv_wr
, cmd
->max_recv_sge
,
481 cmd
->total_chunks
- cmd
->send_chunks
- 1, cmd
->is_srq
);
486 rc
= rdma_rm_alloc_qp(&dev
->rdma_dev_res
, cmd
->pd_handle
, cmd
->qp_type
,
487 cmd
->max_send_wr
, cmd
->max_send_sge
,
488 cmd
->send_cq_handle
, cmd
->max_recv_wr
,
489 cmd
->max_recv_sge
, cmd
->recv_cq_handle
, rings
,
490 &resp
->qpn
, cmd
->is_srq
, cmd
->srq_handle
);
492 destroy_qp_rings(rings
, cmd
->is_srq
);
496 resp
->max_send_wr
= cmd
->max_send_wr
;
497 resp
->max_recv_wr
= cmd
->max_recv_wr
;
498 resp
->max_send_sge
= cmd
->max_send_sge
;
499 resp
->max_recv_sge
= cmd
->max_recv_sge
;
500 resp
->max_inline_data
= cmd
->max_inline_data
;
505 static int modify_qp(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
506 union pvrdma_cmd_resp
*rsp
)
508 struct pvrdma_cmd_modify_qp
*cmd
= &req
->modify_qp
;
511 /* No need to verify sgid_index since it is u8 */
513 rc
= rdma_rm_modify_qp(&dev
->rdma_dev_res
, &dev
->backend_dev
,
514 cmd
->qp_handle
, cmd
->attr_mask
,
515 cmd
->attrs
.ah_attr
.grh
.sgid_index
,
516 (union ibv_gid
*)&cmd
->attrs
.ah_attr
.grh
.dgid
,
517 cmd
->attrs
.dest_qp_num
,
518 (enum ibv_qp_state
)cmd
->attrs
.qp_state
,
519 cmd
->attrs
.qkey
, cmd
->attrs
.rq_psn
,
525 static int query_qp(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
526 union pvrdma_cmd_resp
*rsp
)
528 struct pvrdma_cmd_query_qp
*cmd
= &req
->query_qp
;
529 struct pvrdma_cmd_query_qp_resp
*resp
= &rsp
->query_qp_resp
;
530 struct ibv_qp_init_attr init_attr
;
533 memset(resp
, 0, sizeof(*resp
));
535 rc
= rdma_rm_query_qp(&dev
->rdma_dev_res
, &dev
->backend_dev
, cmd
->qp_handle
,
536 (struct ibv_qp_attr
*)&resp
->attrs
, cmd
->attr_mask
,
542 static int destroy_qp(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
543 union pvrdma_cmd_resp
*rsp
)
545 struct pvrdma_cmd_destroy_qp
*cmd
= &req
->destroy_qp
;
549 qp
= rdma_rm_get_qp(&dev
->rdma_dev_res
, cmd
->qp_handle
);
554 ring
= (PvrdmaRing
*)qp
->opaque
;
555 destroy_qp_rings(ring
, qp
->is_srq
);
556 rdma_rm_dealloc_qp(&dev
->rdma_dev_res
, cmd
->qp_handle
);
561 static int create_bind(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
562 union pvrdma_cmd_resp
*rsp
)
564 struct pvrdma_cmd_create_bind
*cmd
= &req
->create_bind
;
566 union ibv_gid
*gid
= (union ibv_gid
*)&cmd
->new_gid
;
568 if (cmd
->index
>= MAX_PORT_GIDS
) {
572 rc
= rdma_rm_add_gid(&dev
->rdma_dev_res
, &dev
->backend_dev
,
573 dev
->backend_eth_device_name
, gid
, cmd
->index
);
578 static int destroy_bind(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
579 union pvrdma_cmd_resp
*rsp
)
583 struct pvrdma_cmd_destroy_bind
*cmd
= &req
->destroy_bind
;
585 if (cmd
->index
>= MAX_PORT_GIDS
) {
589 rc
= rdma_rm_del_gid(&dev
->rdma_dev_res
, &dev
->backend_dev
,
590 dev
->backend_eth_device_name
, cmd
->index
);
595 static int create_uc(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
596 union pvrdma_cmd_resp
*rsp
)
598 struct pvrdma_cmd_create_uc
*cmd
= &req
->create_uc
;
599 struct pvrdma_cmd_create_uc_resp
*resp
= &rsp
->create_uc_resp
;
602 memset(resp
, 0, sizeof(*resp
));
603 rc
= rdma_rm_alloc_uc(&dev
->rdma_dev_res
, cmd
->pfn
, &resp
->ctx_handle
);
608 static int destroy_uc(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
609 union pvrdma_cmd_resp
*rsp
)
611 struct pvrdma_cmd_destroy_uc
*cmd
= &req
->destroy_uc
;
613 rdma_rm_dealloc_uc(&dev
->rdma_dev_res
, cmd
->ctx_handle
);
618 static int create_srq_ring(PCIDevice
*pci_dev
, PvrdmaRing
**ring
,
619 uint64_t pdir_dma
, uint32_t max_wr
,
620 uint32_t max_sge
, uint32_t nchunks
)
622 uint64_t *dir
= NULL
, *tbl
= NULL
;
625 char ring_name
[MAX_RING_NAME_SZ
];
628 if (!nchunks
|| nchunks
> PVRDMA_MAX_FAST_REG_PAGES
) {
629 rdma_error_report("Got invalid page count for SRQ ring: %d",
634 dir
= rdma_pci_dma_map(pci_dev
, pdir_dma
, TARGET_PAGE_SIZE
);
636 rdma_error_report("Failed to map to SRQ page directory");
640 tbl
= rdma_pci_dma_map(pci_dev
, dir
[0], TARGET_PAGE_SIZE
);
642 rdma_error_report("Failed to map to SRQ page table");
646 r
= g_malloc(sizeof(*r
));
649 r
->ring_state
= (PvrdmaRingState
*)
650 rdma_pci_dma_map(pci_dev
, tbl
[0], TARGET_PAGE_SIZE
);
651 if (!r
->ring_state
) {
652 rdma_error_report("Failed to map tp SRQ ring state");
653 goto out_free_ring_mem
;
656 wqe_sz
= pow2ceil(sizeof(struct pvrdma_rq_wqe_hdr
) +
657 sizeof(struct pvrdma_sge
) * max_sge
- 1);
658 sprintf(ring_name
, "srq_ring_%" PRIx64
, pdir_dma
);
659 rc
= pvrdma_ring_init(r
, ring_name
, pci_dev
, &r
->ring_state
[1], max_wr
,
660 wqe_sz
, (dma_addr_t
*)&tbl
[1], nchunks
- 1);
662 goto out_unmap_ring_state
;
667 out_unmap_ring_state
:
668 rdma_pci_dma_unmap(pci_dev
, r
->ring_state
, TARGET_PAGE_SIZE
);
674 rdma_pci_dma_unmap(pci_dev
, tbl
, TARGET_PAGE_SIZE
);
675 rdma_pci_dma_unmap(pci_dev
, dir
, TARGET_PAGE_SIZE
);
680 static void destroy_srq_ring(PvrdmaRing
*ring
)
682 pvrdma_ring_free(ring
);
683 rdma_pci_dma_unmap(ring
->dev
, ring
->ring_state
, TARGET_PAGE_SIZE
);
687 static int create_srq(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
688 union pvrdma_cmd_resp
*rsp
)
690 struct pvrdma_cmd_create_srq
*cmd
= &req
->create_srq
;
691 struct pvrdma_cmd_create_srq_resp
*resp
= &rsp
->create_srq_resp
;
692 PvrdmaRing
*ring
= NULL
;
695 memset(resp
, 0, sizeof(*resp
));
697 rc
= create_srq_ring(PCI_DEVICE(dev
), &ring
, cmd
->pdir_dma
,
698 cmd
->attrs
.max_wr
, cmd
->attrs
.max_sge
,
704 rc
= rdma_rm_alloc_srq(&dev
->rdma_dev_res
, cmd
->pd_handle
,
705 cmd
->attrs
.max_wr
, cmd
->attrs
.max_sge
,
706 cmd
->attrs
.srq_limit
, &resp
->srqn
, ring
);
708 destroy_srq_ring(ring
);
715 static int query_srq(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
716 union pvrdma_cmd_resp
*rsp
)
718 struct pvrdma_cmd_query_srq
*cmd
= &req
->query_srq
;
719 struct pvrdma_cmd_query_srq_resp
*resp
= &rsp
->query_srq_resp
;
721 memset(resp
, 0, sizeof(*resp
));
723 return rdma_rm_query_srq(&dev
->rdma_dev_res
, cmd
->srq_handle
,
724 (struct ibv_srq_attr
*)&resp
->attrs
);
727 static int modify_srq(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
728 union pvrdma_cmd_resp
*rsp
)
730 struct pvrdma_cmd_modify_srq
*cmd
= &req
->modify_srq
;
732 /* Only support SRQ limit */
733 if (!(cmd
->attr_mask
& IBV_SRQ_LIMIT
) ||
734 (cmd
->attr_mask
& IBV_SRQ_MAX_WR
))
737 return rdma_rm_modify_srq(&dev
->rdma_dev_res
, cmd
->srq_handle
,
738 (struct ibv_srq_attr
*)&cmd
->attrs
,
742 static int destroy_srq(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
743 union pvrdma_cmd_resp
*rsp
)
745 struct pvrdma_cmd_destroy_srq
*cmd
= &req
->destroy_srq
;
749 srq
= rdma_rm_get_srq(&dev
->rdma_dev_res
, cmd
->srq_handle
);
754 ring
= (PvrdmaRing
*)srq
->opaque
;
755 destroy_srq_ring(ring
);
756 rdma_rm_dealloc_srq(&dev
->rdma_dev_res
, cmd
->srq_handle
);
764 int (*exec
)(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
765 union pvrdma_cmd_resp
*rsp
);
768 static struct cmd_handler cmd_handlers
[] = {
769 {PVRDMA_CMD_QUERY_PORT
, PVRDMA_CMD_QUERY_PORT_RESP
, query_port
},
770 {PVRDMA_CMD_QUERY_PKEY
, PVRDMA_CMD_QUERY_PKEY_RESP
, query_pkey
},
771 {PVRDMA_CMD_CREATE_PD
, PVRDMA_CMD_CREATE_PD_RESP
, create_pd
},
772 {PVRDMA_CMD_DESTROY_PD
, PVRDMA_CMD_DESTROY_PD_RESP_NOOP
, destroy_pd
},
773 {PVRDMA_CMD_CREATE_MR
, PVRDMA_CMD_CREATE_MR_RESP
, create_mr
},
774 {PVRDMA_CMD_DESTROY_MR
, PVRDMA_CMD_DESTROY_MR_RESP_NOOP
, destroy_mr
},
775 {PVRDMA_CMD_CREATE_CQ
, PVRDMA_CMD_CREATE_CQ_RESP
, create_cq
},
776 {PVRDMA_CMD_RESIZE_CQ
, PVRDMA_CMD_RESIZE_CQ_RESP
, NULL
},
777 {PVRDMA_CMD_DESTROY_CQ
, PVRDMA_CMD_DESTROY_CQ_RESP_NOOP
, destroy_cq
},
778 {PVRDMA_CMD_CREATE_QP
, PVRDMA_CMD_CREATE_QP_RESP
, create_qp
},
779 {PVRDMA_CMD_MODIFY_QP
, PVRDMA_CMD_MODIFY_QP_RESP
, modify_qp
},
780 {PVRDMA_CMD_QUERY_QP
, PVRDMA_CMD_QUERY_QP_RESP
, query_qp
},
781 {PVRDMA_CMD_DESTROY_QP
, PVRDMA_CMD_DESTROY_QP_RESP
, destroy_qp
},
782 {PVRDMA_CMD_CREATE_UC
, PVRDMA_CMD_CREATE_UC_RESP
, create_uc
},
783 {PVRDMA_CMD_DESTROY_UC
, PVRDMA_CMD_DESTROY_UC_RESP_NOOP
, destroy_uc
},
784 {PVRDMA_CMD_CREATE_BIND
, PVRDMA_CMD_CREATE_BIND_RESP_NOOP
, create_bind
},
785 {PVRDMA_CMD_DESTROY_BIND
, PVRDMA_CMD_DESTROY_BIND_RESP_NOOP
, destroy_bind
},
786 {PVRDMA_CMD_CREATE_SRQ
, PVRDMA_CMD_CREATE_SRQ_RESP
, create_srq
},
787 {PVRDMA_CMD_QUERY_SRQ
, PVRDMA_CMD_QUERY_SRQ_RESP
, query_srq
},
788 {PVRDMA_CMD_MODIFY_SRQ
, PVRDMA_CMD_MODIFY_SRQ_RESP
, modify_srq
},
789 {PVRDMA_CMD_DESTROY_SRQ
, PVRDMA_CMD_DESTROY_SRQ_RESP
, destroy_srq
},
792 int pvrdma_exec_cmd(PVRDMADev
*dev
)
797 dsr_info
= &dev
->dsr_info
;
799 if (dsr_info
->req
->hdr
.cmd
>= sizeof(cmd_handlers
) /
800 sizeof(struct cmd_handler
)) {
801 rdma_error_report("Unsupported command");
805 if (!cmd_handlers
[dsr_info
->req
->hdr
.cmd
].exec
) {
806 rdma_error_report("Unsupported command (not implemented yet)");
810 err
= cmd_handlers
[dsr_info
->req
->hdr
.cmd
].exec(dev
, dsr_info
->req
,
812 dsr_info
->rsp
->hdr
.response
= dsr_info
->req
->hdr
.response
;
813 dsr_info
->rsp
->hdr
.ack
= cmd_handlers
[dsr_info
->req
->hdr
.cmd
].ack
;
814 dsr_info
->rsp
->hdr
.err
= err
< 0 ?
-err
: 0;
816 trace_pvrdma_exec_cmd(dsr_info
->req
->hdr
.cmd
, dsr_info
->rsp
->hdr
.err
);
818 dev
->stats
.commands
++;
821 set_reg_val(dev
, PVRDMA_REG_ERR
, err
);
822 post_interrupt(dev
, INTR_VEC_CMD_RING
);
824 return (err
== 0) ?
0 : -EINVAL
;