Merge tag 'pull-ppc-20220526' of https://gitlab.com/danielhb/qemu into staging
[qemu.git] / hw / rdma / vmw / pvrdma_cmd.c
1 /*
2 * QEMU paravirtual RDMA - Command channel
3 *
4 * Copyright (C) 2018 Oracle
5 * Copyright (C) 2018 Red Hat Inc
6 *
7 * Authors:
8 * Yuval Shaia <yuval.shaia@oracle.com>
9 * Marcel Apfelbaum <marcel@redhat.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
13 *
14 */
15
16 #include "qemu/osdep.h"
17 #include "cpu.h"
18 #include "hw/pci/pci.h"
19 #include "hw/pci/pci_ids.h"
20
21 #include "../rdma_backend.h"
22 #include "../rdma_rm.h"
23 #include "../rdma_utils.h"
24
25 #include "trace.h"
26 #include "pvrdma.h"
27 #include "standard-headers/rdma/vmw_pvrdma-abi.h"
28
29 static void *pvrdma_map_to_pdir(PCIDevice *pdev, uint64_t pdir_dma,
30 uint32_t nchunks, size_t length)
31 {
32 uint64_t *dir, *tbl;
33 int tbl_idx, dir_idx, addr_idx;
34 void *host_virt = NULL, *curr_page;
35
36 if (!nchunks) {
37 rdma_error_report("Got nchunks=0");
38 return NULL;
39 }
40
41 length = ROUND_UP(length, TARGET_PAGE_SIZE);
42 if (nchunks * TARGET_PAGE_SIZE != length) {
43 rdma_error_report("Invalid nchunks/length (%u, %lu)", nchunks,
44 (unsigned long)length);
45 return NULL;
46 }
47
48 dir = rdma_pci_dma_map(pdev, pdir_dma, TARGET_PAGE_SIZE);
49 if (!dir) {
50 rdma_error_report("Failed to map to page directory");
51 return NULL;
52 }
53
54 tbl = rdma_pci_dma_map(pdev, dir[0], TARGET_PAGE_SIZE);
55 if (!tbl) {
56 rdma_error_report("Failed to map to page table 0");
57 goto out_unmap_dir;
58 }
59
60 curr_page = rdma_pci_dma_map(pdev, (dma_addr_t)tbl[0], TARGET_PAGE_SIZE);
61 if (!curr_page) {
62 rdma_error_report("Failed to map the page 0");
63 goto out_unmap_tbl;
64 }
65
66 host_virt = mremap(curr_page, 0, length, MREMAP_MAYMOVE);
67 if (host_virt == MAP_FAILED) {
68 host_virt = NULL;
69 rdma_error_report("Failed to remap memory for host_virt");
70 goto out_unmap_tbl;
71 }
72 trace_pvrdma_map_to_pdir_host_virt(curr_page, host_virt);
73
74 rdma_pci_dma_unmap(pdev, curr_page, TARGET_PAGE_SIZE);
75
76 dir_idx = 0;
77 tbl_idx = 1;
78 addr_idx = 1;
79 while (addr_idx < nchunks) {
80 if (tbl_idx == TARGET_PAGE_SIZE / sizeof(uint64_t)) {
81 tbl_idx = 0;
82 dir_idx++;
83 rdma_pci_dma_unmap(pdev, tbl, TARGET_PAGE_SIZE);
84 tbl = rdma_pci_dma_map(pdev, dir[dir_idx], TARGET_PAGE_SIZE);
85 if (!tbl) {
86 rdma_error_report("Failed to map to page table %d", dir_idx);
87 goto out_unmap_host_virt;
88 }
89 }
90
91 curr_page = rdma_pci_dma_map(pdev, (dma_addr_t)tbl[tbl_idx],
92 TARGET_PAGE_SIZE);
93 if (!curr_page) {
94 rdma_error_report("Failed to map to page %d, dir %d", tbl_idx,
95 dir_idx);
96 goto out_unmap_host_virt;
97 }
98
99 mremap(curr_page, 0, TARGET_PAGE_SIZE, MREMAP_MAYMOVE | MREMAP_FIXED,
100 host_virt + TARGET_PAGE_SIZE * addr_idx);
101
102 trace_pvrdma_map_to_pdir_next_page(addr_idx, curr_page, host_virt +
103 TARGET_PAGE_SIZE * addr_idx);
104
105 rdma_pci_dma_unmap(pdev, curr_page, TARGET_PAGE_SIZE);
106
107 addr_idx++;
108
109 tbl_idx++;
110 }
111
112 goto out_unmap_tbl;
113
114 out_unmap_host_virt:
115 munmap(host_virt, length);
116 host_virt = NULL;
117
118 out_unmap_tbl:
119 rdma_pci_dma_unmap(pdev, tbl, TARGET_PAGE_SIZE);
120
121 out_unmap_dir:
122 rdma_pci_dma_unmap(pdev, dir, TARGET_PAGE_SIZE);
123
124 return host_virt;
125 }
126
127 static int query_port(PVRDMADev *dev, union pvrdma_cmd_req *req,
128 union pvrdma_cmd_resp *rsp)
129 {
130 struct pvrdma_cmd_query_port *cmd = &req->query_port;
131 struct pvrdma_cmd_query_port_resp *resp = &rsp->query_port_resp;
132 struct pvrdma_port_attr attrs = {};
133
134 if (cmd->port_num > MAX_PORTS) {
135 return -EINVAL;
136 }
137
138 if (rdma_backend_query_port(&dev->backend_dev,
139 (struct ibv_port_attr *)&attrs)) {
140 return -ENOMEM;
141 }
142
143 memset(resp, 0, sizeof(*resp));
144
145 resp->attrs.state = dev->func0->device_active ? attrs.state :
146 PVRDMA_PORT_DOWN;
147 resp->attrs.max_mtu = attrs.max_mtu;
148 resp->attrs.active_mtu = attrs.active_mtu;
149 resp->attrs.phys_state = attrs.phys_state;
150 resp->attrs.gid_tbl_len = MIN(MAX_PORT_GIDS, attrs.gid_tbl_len);
151 resp->attrs.max_msg_sz = 1024;
152 resp->attrs.pkey_tbl_len = MIN(MAX_PORT_PKEYS, attrs.pkey_tbl_len);
153 resp->attrs.active_width = 1;
154 resp->attrs.active_speed = 1;
155
156 return 0;
157 }
158
159 static int query_pkey(PVRDMADev *dev, union pvrdma_cmd_req *req,
160 union pvrdma_cmd_resp *rsp)
161 {
162 struct pvrdma_cmd_query_pkey *cmd = &req->query_pkey;
163 struct pvrdma_cmd_query_pkey_resp *resp = &rsp->query_pkey_resp;
164
165 if (cmd->port_num > MAX_PORTS) {
166 return -EINVAL;
167 }
168
169 if (cmd->index > MAX_PKEYS) {
170 return -EINVAL;
171 }
172
173 memset(resp, 0, sizeof(*resp));
174
175 resp->pkey = PVRDMA_PKEY;
176
177 return 0;
178 }
179
180 static int create_pd(PVRDMADev *dev, union pvrdma_cmd_req *req,
181 union pvrdma_cmd_resp *rsp)
182 {
183 struct pvrdma_cmd_create_pd *cmd = &req->create_pd;
184 struct pvrdma_cmd_create_pd_resp *resp = &rsp->create_pd_resp;
185 int rc;
186
187 memset(resp, 0, sizeof(*resp));
188 rc = rdma_rm_alloc_pd(&dev->rdma_dev_res, &dev->backend_dev,
189 &resp->pd_handle, cmd->ctx_handle);
190
191 return rc;
192 }
193
194 static int destroy_pd(PVRDMADev *dev, union pvrdma_cmd_req *req,
195 union pvrdma_cmd_resp *rsp)
196 {
197 struct pvrdma_cmd_destroy_pd *cmd = &req->destroy_pd;
198
199 rdma_rm_dealloc_pd(&dev->rdma_dev_res, cmd->pd_handle);
200
201 return 0;
202 }
203
204 static int create_mr(PVRDMADev *dev, union pvrdma_cmd_req *req,
205 union pvrdma_cmd_resp *rsp)
206 {
207 struct pvrdma_cmd_create_mr *cmd = &req->create_mr;
208 struct pvrdma_cmd_create_mr_resp *resp = &rsp->create_mr_resp;
209 PCIDevice *pci_dev = PCI_DEVICE(dev);
210 void *host_virt = NULL;
211 int rc = 0;
212
213 memset(resp, 0, sizeof(*resp));
214
215 if (!(cmd->flags & PVRDMA_MR_FLAG_DMA)) {
216 host_virt = pvrdma_map_to_pdir(pci_dev, cmd->pdir_dma, cmd->nchunks,
217 cmd->length);
218 if (!host_virt) {
219 rdma_error_report("Failed to map to pdir");
220 return -EINVAL;
221 }
222 }
223
224 rc = rdma_rm_alloc_mr(&dev->rdma_dev_res, cmd->pd_handle, cmd->start,
225 cmd->length, host_virt, cmd->access_flags,
226 &resp->mr_handle, &resp->lkey, &resp->rkey);
227 if (rc && host_virt) {
228 munmap(host_virt, cmd->length);
229 }
230
231 return rc;
232 }
233
234 static int destroy_mr(PVRDMADev *dev, union pvrdma_cmd_req *req,
235 union pvrdma_cmd_resp *rsp)
236 {
237 struct pvrdma_cmd_destroy_mr *cmd = &req->destroy_mr;
238
239 rdma_rm_dealloc_mr(&dev->rdma_dev_res, cmd->mr_handle);
240
241 return 0;
242 }
243
244 static int create_cq_ring(PCIDevice *pci_dev , PvrdmaRing **ring,
245 uint64_t pdir_dma, uint32_t nchunks, uint32_t cqe)
246 {
247 uint64_t *dir = NULL, *tbl = NULL;
248 PvrdmaRing *r;
249 int rc = -EINVAL;
250 char ring_name[MAX_RING_NAME_SZ];
251
252 if (!nchunks || nchunks > PVRDMA_MAX_FAST_REG_PAGES) {
253 rdma_error_report("Got invalid nchunks: %d", nchunks);
254 return rc;
255 }
256
257 dir = rdma_pci_dma_map(pci_dev, pdir_dma, TARGET_PAGE_SIZE);
258 if (!dir) {
259 rdma_error_report("Failed to map to CQ page directory");
260 goto out;
261 }
262
263 tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE);
264 if (!tbl) {
265 rdma_error_report("Failed to map to CQ page table");
266 goto out;
267 }
268
269 r = g_malloc(sizeof(*r));
270 *ring = r;
271
272 r->ring_state = (PvrdmaRingState *)
273 rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
274
275 if (!r->ring_state) {
276 rdma_error_report("Failed to map to CQ ring state");
277 goto out_free_ring;
278 }
279
280 sprintf(ring_name, "cq_ring_%" PRIx64, pdir_dma);
281 rc = pvrdma_ring_init(r, ring_name, pci_dev, &r->ring_state[1],
282 cqe, sizeof(struct pvrdma_cqe),
283 /* first page is ring state */
284 (dma_addr_t *)&tbl[1], nchunks - 1);
285 if (rc) {
286 goto out_unmap_ring_state;
287 }
288
289 goto out;
290
291 out_unmap_ring_state:
292 /* ring_state was in slot 1, not 0 so need to jump back */
293 rdma_pci_dma_unmap(pci_dev, --r->ring_state, TARGET_PAGE_SIZE);
294
295 out_free_ring:
296 g_free(r);
297
298 out:
299 rdma_pci_dma_unmap(pci_dev, tbl, TARGET_PAGE_SIZE);
300 rdma_pci_dma_unmap(pci_dev, dir, TARGET_PAGE_SIZE);
301
302 return rc;
303 }
304
305 static void destroy_cq_ring(PvrdmaRing *ring)
306 {
307 pvrdma_ring_free(ring);
308 /* ring_state was in slot 1, not 0 so need to jump back */
309 rdma_pci_dma_unmap(ring->dev, --ring->ring_state, TARGET_PAGE_SIZE);
310 g_free(ring);
311 }
312
313 static int create_cq(PVRDMADev *dev, union pvrdma_cmd_req *req,
314 union pvrdma_cmd_resp *rsp)
315 {
316 struct pvrdma_cmd_create_cq *cmd = &req->create_cq;
317 struct pvrdma_cmd_create_cq_resp *resp = &rsp->create_cq_resp;
318 PvrdmaRing *ring = NULL;
319 int rc;
320
321 memset(resp, 0, sizeof(*resp));
322
323 resp->cqe = cmd->cqe;
324
325 rc = create_cq_ring(PCI_DEVICE(dev), &ring, cmd->pdir_dma, cmd->nchunks,
326 cmd->cqe);
327 if (rc) {
328 return rc;
329 }
330
331 rc = rdma_rm_alloc_cq(&dev->rdma_dev_res, &dev->backend_dev, cmd->cqe,
332 &resp->cq_handle, ring);
333 if (rc) {
334 destroy_cq_ring(ring);
335 }
336
337 resp->cqe = cmd->cqe;
338
339 return rc;
340 }
341
342 static int destroy_cq(PVRDMADev *dev, union pvrdma_cmd_req *req,
343 union pvrdma_cmd_resp *rsp)
344 {
345 struct pvrdma_cmd_destroy_cq *cmd = &req->destroy_cq;
346 RdmaRmCQ *cq;
347 PvrdmaRing *ring;
348
349 cq = rdma_rm_get_cq(&dev->rdma_dev_res, cmd->cq_handle);
350 if (!cq) {
351 rdma_error_report("Got invalid CQ handle");
352 return -EINVAL;
353 }
354
355 ring = (PvrdmaRing *)cq->opaque;
356 destroy_cq_ring(ring);
357
358 rdma_rm_dealloc_cq(&dev->rdma_dev_res, cmd->cq_handle);
359
360 return 0;
361 }
362
363 static int create_qp_rings(PCIDevice *pci_dev, uint64_t pdir_dma,
364 PvrdmaRing **rings, uint32_t scqe, uint32_t smax_sge,
365 uint32_t spages, uint32_t rcqe, uint32_t rmax_sge,
366 uint32_t rpages, uint8_t is_srq)
367 {
368 uint64_t *dir = NULL, *tbl = NULL;
369 PvrdmaRing *sr, *rr;
370 int rc = -EINVAL;
371 char ring_name[MAX_RING_NAME_SZ];
372 uint32_t wqe_sz;
373
374 if (!spages || spages > PVRDMA_MAX_FAST_REG_PAGES) {
375 rdma_error_report("Got invalid send page count for QP ring: %d",
376 spages);
377 return rc;
378 }
379
380 if (!is_srq && (!rpages || rpages > PVRDMA_MAX_FAST_REG_PAGES)) {
381 rdma_error_report("Got invalid recv page count for QP ring: %d",
382 rpages);
383 return rc;
384 }
385
386 dir = rdma_pci_dma_map(pci_dev, pdir_dma, TARGET_PAGE_SIZE);
387 if (!dir) {
388 rdma_error_report("Failed to map to QP page directory");
389 goto out;
390 }
391
392 tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE);
393 if (!tbl) {
394 rdma_error_report("Failed to map to QP page table");
395 goto out;
396 }
397
398 if (!is_srq) {
399 sr = g_malloc(2 * sizeof(*rr));
400 rr = &sr[1];
401 } else {
402 sr = g_malloc(sizeof(*sr));
403 }
404
405 *rings = sr;
406
407 /* Create send ring */
408 sr->ring_state = (PvrdmaRingState *)
409 rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
410 if (!sr->ring_state) {
411 rdma_error_report("Failed to map to QP ring state");
412 goto out_free_sr_mem;
413 }
414
415 wqe_sz = pow2ceil(sizeof(struct pvrdma_sq_wqe_hdr) +
416 sizeof(struct pvrdma_sge) * smax_sge - 1);
417
418 sprintf(ring_name, "qp_sring_%" PRIx64, pdir_dma);
419 rc = pvrdma_ring_init(sr, ring_name, pci_dev, sr->ring_state,
420 scqe, wqe_sz, (dma_addr_t *)&tbl[1], spages);
421 if (rc) {
422 goto out_unmap_ring_state;
423 }
424
425 if (!is_srq) {
426 /* Create recv ring */
427 rr->ring_state = &sr->ring_state[1];
428 wqe_sz = pow2ceil(sizeof(struct pvrdma_rq_wqe_hdr) +
429 sizeof(struct pvrdma_sge) * rmax_sge - 1);
430 sprintf(ring_name, "qp_rring_%" PRIx64, pdir_dma);
431 rc = pvrdma_ring_init(rr, ring_name, pci_dev, rr->ring_state,
432 rcqe, wqe_sz, (dma_addr_t *)&tbl[1 + spages],
433 rpages);
434 if (rc) {
435 goto out_free_sr;
436 }
437 }
438
439 goto out;
440
441 out_free_sr:
442 pvrdma_ring_free(sr);
443
444 out_unmap_ring_state:
445 rdma_pci_dma_unmap(pci_dev, sr->ring_state, TARGET_PAGE_SIZE);
446
447 out_free_sr_mem:
448 g_free(sr);
449
450 out:
451 rdma_pci_dma_unmap(pci_dev, tbl, TARGET_PAGE_SIZE);
452 rdma_pci_dma_unmap(pci_dev, dir, TARGET_PAGE_SIZE);
453
454 return rc;
455 }
456
457 static void destroy_qp_rings(PvrdmaRing *ring, uint8_t is_srq)
458 {
459 pvrdma_ring_free(&ring[0]);
460 if (!is_srq) {
461 pvrdma_ring_free(&ring[1]);
462 }
463
464 rdma_pci_dma_unmap(ring->dev, ring->ring_state, TARGET_PAGE_SIZE);
465 g_free(ring);
466 }
467
468 static int create_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
469 union pvrdma_cmd_resp *rsp)
470 {
471 struct pvrdma_cmd_create_qp *cmd = &req->create_qp;
472 struct pvrdma_cmd_create_qp_resp *resp = &rsp->create_qp_resp;
473 PvrdmaRing *rings = NULL;
474 int rc;
475
476 memset(resp, 0, sizeof(*resp));
477
478 rc = create_qp_rings(PCI_DEVICE(dev), cmd->pdir_dma, &rings,
479 cmd->max_send_wr, cmd->max_send_sge, cmd->send_chunks,
480 cmd->max_recv_wr, cmd->max_recv_sge,
481 cmd->total_chunks - cmd->send_chunks - 1, cmd->is_srq);
482 if (rc) {
483 return rc;
484 }
485
486 rc = rdma_rm_alloc_qp(&dev->rdma_dev_res, cmd->pd_handle, cmd->qp_type,
487 cmd->max_send_wr, cmd->max_send_sge,
488 cmd->send_cq_handle, cmd->max_recv_wr,
489 cmd->max_recv_sge, cmd->recv_cq_handle, rings,
490 &resp->qpn, cmd->is_srq, cmd->srq_handle);
491 if (rc) {
492 destroy_qp_rings(rings, cmd->is_srq);
493 return rc;
494 }
495
496 resp->max_send_wr = cmd->max_send_wr;
497 resp->max_recv_wr = cmd->max_recv_wr;
498 resp->max_send_sge = cmd->max_send_sge;
499 resp->max_recv_sge = cmd->max_recv_sge;
500 resp->max_inline_data = cmd->max_inline_data;
501
502 return 0;
503 }
504
505 static int modify_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
506 union pvrdma_cmd_resp *rsp)
507 {
508 struct pvrdma_cmd_modify_qp *cmd = &req->modify_qp;
509 int rc;
510
511 /* No need to verify sgid_index since it is u8 */
512
513 rc = rdma_rm_modify_qp(&dev->rdma_dev_res, &dev->backend_dev,
514 cmd->qp_handle, cmd->attr_mask,
515 cmd->attrs.ah_attr.grh.sgid_index,
516 (union ibv_gid *)&cmd->attrs.ah_attr.grh.dgid,
517 cmd->attrs.dest_qp_num,
518 (enum ibv_qp_state)cmd->attrs.qp_state,
519 cmd->attrs.qkey, cmd->attrs.rq_psn,
520 cmd->attrs.sq_psn);
521
522 return rc;
523 }
524
525 static int query_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
526 union pvrdma_cmd_resp *rsp)
527 {
528 struct pvrdma_cmd_query_qp *cmd = &req->query_qp;
529 struct pvrdma_cmd_query_qp_resp *resp = &rsp->query_qp_resp;
530 struct ibv_qp_init_attr init_attr;
531 int rc;
532
533 memset(resp, 0, sizeof(*resp));
534
535 rc = rdma_rm_query_qp(&dev->rdma_dev_res, &dev->backend_dev, cmd->qp_handle,
536 (struct ibv_qp_attr *)&resp->attrs, cmd->attr_mask,
537 &init_attr);
538
539 return rc;
540 }
541
542 static int destroy_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
543 union pvrdma_cmd_resp *rsp)
544 {
545 struct pvrdma_cmd_destroy_qp *cmd = &req->destroy_qp;
546 RdmaRmQP *qp;
547 PvrdmaRing *ring;
548
549 qp = rdma_rm_get_qp(&dev->rdma_dev_res, cmd->qp_handle);
550 if (!qp) {
551 return -EINVAL;
552 }
553
554 ring = (PvrdmaRing *)qp->opaque;
555 destroy_qp_rings(ring, qp->is_srq);
556 rdma_rm_dealloc_qp(&dev->rdma_dev_res, cmd->qp_handle);
557
558 return 0;
559 }
560
561 static int create_bind(PVRDMADev *dev, union pvrdma_cmd_req *req,
562 union pvrdma_cmd_resp *rsp)
563 {
564 struct pvrdma_cmd_create_bind *cmd = &req->create_bind;
565 int rc;
566 union ibv_gid *gid = (union ibv_gid *)&cmd->new_gid;
567
568 if (cmd->index >= MAX_PORT_GIDS) {
569 return -EINVAL;
570 }
571
572 rc = rdma_rm_add_gid(&dev->rdma_dev_res, &dev->backend_dev,
573 dev->backend_eth_device_name, gid, cmd->index);
574
575 return rc;
576 }
577
578 static int destroy_bind(PVRDMADev *dev, union pvrdma_cmd_req *req,
579 union pvrdma_cmd_resp *rsp)
580 {
581 int rc;
582
583 struct pvrdma_cmd_destroy_bind *cmd = &req->destroy_bind;
584
585 if (cmd->index >= MAX_PORT_GIDS) {
586 return -EINVAL;
587 }
588
589 rc = rdma_rm_del_gid(&dev->rdma_dev_res, &dev->backend_dev,
590 dev->backend_eth_device_name, cmd->index);
591
592 return rc;
593 }
594
595 static int create_uc(PVRDMADev *dev, union pvrdma_cmd_req *req,
596 union pvrdma_cmd_resp *rsp)
597 {
598 struct pvrdma_cmd_create_uc *cmd = &req->create_uc;
599 struct pvrdma_cmd_create_uc_resp *resp = &rsp->create_uc_resp;
600 int rc;
601
602 memset(resp, 0, sizeof(*resp));
603 rc = rdma_rm_alloc_uc(&dev->rdma_dev_res, cmd->pfn, &resp->ctx_handle);
604
605 return rc;
606 }
607
608 static int destroy_uc(PVRDMADev *dev, union pvrdma_cmd_req *req,
609 union pvrdma_cmd_resp *rsp)
610 {
611 struct pvrdma_cmd_destroy_uc *cmd = &req->destroy_uc;
612
613 rdma_rm_dealloc_uc(&dev->rdma_dev_res, cmd->ctx_handle);
614
615 return 0;
616 }
617
618 static int create_srq_ring(PCIDevice *pci_dev, PvrdmaRing **ring,
619 uint64_t pdir_dma, uint32_t max_wr,
620 uint32_t max_sge, uint32_t nchunks)
621 {
622 uint64_t *dir = NULL, *tbl = NULL;
623 PvrdmaRing *r;
624 int rc = -EINVAL;
625 char ring_name[MAX_RING_NAME_SZ];
626 uint32_t wqe_sz;
627
628 if (!nchunks || nchunks > PVRDMA_MAX_FAST_REG_PAGES) {
629 rdma_error_report("Got invalid page count for SRQ ring: %d",
630 nchunks);
631 return rc;
632 }
633
634 dir = rdma_pci_dma_map(pci_dev, pdir_dma, TARGET_PAGE_SIZE);
635 if (!dir) {
636 rdma_error_report("Failed to map to SRQ page directory");
637 goto out;
638 }
639
640 tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE);
641 if (!tbl) {
642 rdma_error_report("Failed to map to SRQ page table");
643 goto out;
644 }
645
646 r = g_malloc(sizeof(*r));
647 *ring = r;
648
649 r->ring_state = (PvrdmaRingState *)
650 rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
651 if (!r->ring_state) {
652 rdma_error_report("Failed to map tp SRQ ring state");
653 goto out_free_ring_mem;
654 }
655
656 wqe_sz = pow2ceil(sizeof(struct pvrdma_rq_wqe_hdr) +
657 sizeof(struct pvrdma_sge) * max_sge - 1);
658 sprintf(ring_name, "srq_ring_%" PRIx64, pdir_dma);
659 rc = pvrdma_ring_init(r, ring_name, pci_dev, &r->ring_state[1], max_wr,
660 wqe_sz, (dma_addr_t *)&tbl[1], nchunks - 1);
661 if (rc) {
662 goto out_unmap_ring_state;
663 }
664
665 goto out;
666
667 out_unmap_ring_state:
668 rdma_pci_dma_unmap(pci_dev, r->ring_state, TARGET_PAGE_SIZE);
669
670 out_free_ring_mem:
671 g_free(r);
672
673 out:
674 rdma_pci_dma_unmap(pci_dev, tbl, TARGET_PAGE_SIZE);
675 rdma_pci_dma_unmap(pci_dev, dir, TARGET_PAGE_SIZE);
676
677 return rc;
678 }
679
680 static void destroy_srq_ring(PvrdmaRing *ring)
681 {
682 pvrdma_ring_free(ring);
683 rdma_pci_dma_unmap(ring->dev, ring->ring_state, TARGET_PAGE_SIZE);
684 g_free(ring);
685 }
686
687 static int create_srq(PVRDMADev *dev, union pvrdma_cmd_req *req,
688 union pvrdma_cmd_resp *rsp)
689 {
690 struct pvrdma_cmd_create_srq *cmd = &req->create_srq;
691 struct pvrdma_cmd_create_srq_resp *resp = &rsp->create_srq_resp;
692 PvrdmaRing *ring = NULL;
693 int rc;
694
695 memset(resp, 0, sizeof(*resp));
696
697 rc = create_srq_ring(PCI_DEVICE(dev), &ring, cmd->pdir_dma,
698 cmd->attrs.max_wr, cmd->attrs.max_sge,
699 cmd->nchunks);
700 if (rc) {
701 return rc;
702 }
703
704 rc = rdma_rm_alloc_srq(&dev->rdma_dev_res, cmd->pd_handle,
705 cmd->attrs.max_wr, cmd->attrs.max_sge,
706 cmd->attrs.srq_limit, &resp->srqn, ring);
707 if (rc) {
708 destroy_srq_ring(ring);
709 return rc;
710 }
711
712 return 0;
713 }
714
715 static int query_srq(PVRDMADev *dev, union pvrdma_cmd_req *req,
716 union pvrdma_cmd_resp *rsp)
717 {
718 struct pvrdma_cmd_query_srq *cmd = &req->query_srq;
719 struct pvrdma_cmd_query_srq_resp *resp = &rsp->query_srq_resp;
720
721 memset(resp, 0, sizeof(*resp));
722
723 return rdma_rm_query_srq(&dev->rdma_dev_res, cmd->srq_handle,
724 (struct ibv_srq_attr *)&resp->attrs);
725 }
726
727 static int modify_srq(PVRDMADev *dev, union pvrdma_cmd_req *req,
728 union pvrdma_cmd_resp *rsp)
729 {
730 struct pvrdma_cmd_modify_srq *cmd = &req->modify_srq;
731
732 /* Only support SRQ limit */
733 if (!(cmd->attr_mask & IBV_SRQ_LIMIT) ||
734 (cmd->attr_mask & IBV_SRQ_MAX_WR))
735 return -EINVAL;
736
737 return rdma_rm_modify_srq(&dev->rdma_dev_res, cmd->srq_handle,
738 (struct ibv_srq_attr *)&cmd->attrs,
739 cmd->attr_mask);
740 }
741
742 static int destroy_srq(PVRDMADev *dev, union pvrdma_cmd_req *req,
743 union pvrdma_cmd_resp *rsp)
744 {
745 struct pvrdma_cmd_destroy_srq *cmd = &req->destroy_srq;
746 RdmaRmSRQ *srq;
747 PvrdmaRing *ring;
748
749 srq = rdma_rm_get_srq(&dev->rdma_dev_res, cmd->srq_handle);
750 if (!srq) {
751 return -EINVAL;
752 }
753
754 ring = (PvrdmaRing *)srq->opaque;
755 destroy_srq_ring(ring);
756 rdma_rm_dealloc_srq(&dev->rdma_dev_res, cmd->srq_handle);
757
758 return 0;
759 }
760
761 struct cmd_handler {
762 uint32_t cmd;
763 uint32_t ack;
764 int (*exec)(PVRDMADev *dev, union pvrdma_cmd_req *req,
765 union pvrdma_cmd_resp *rsp);
766 };
767
768 static struct cmd_handler cmd_handlers[] = {
769 {PVRDMA_CMD_QUERY_PORT, PVRDMA_CMD_QUERY_PORT_RESP, query_port},
770 {PVRDMA_CMD_QUERY_PKEY, PVRDMA_CMD_QUERY_PKEY_RESP, query_pkey},
771 {PVRDMA_CMD_CREATE_PD, PVRDMA_CMD_CREATE_PD_RESP, create_pd},
772 {PVRDMA_CMD_DESTROY_PD, PVRDMA_CMD_DESTROY_PD_RESP_NOOP, destroy_pd},
773 {PVRDMA_CMD_CREATE_MR, PVRDMA_CMD_CREATE_MR_RESP, create_mr},
774 {PVRDMA_CMD_DESTROY_MR, PVRDMA_CMD_DESTROY_MR_RESP_NOOP, destroy_mr},
775 {PVRDMA_CMD_CREATE_CQ, PVRDMA_CMD_CREATE_CQ_RESP, create_cq},
776 {PVRDMA_CMD_RESIZE_CQ, PVRDMA_CMD_RESIZE_CQ_RESP, NULL},
777 {PVRDMA_CMD_DESTROY_CQ, PVRDMA_CMD_DESTROY_CQ_RESP_NOOP, destroy_cq},
778 {PVRDMA_CMD_CREATE_QP, PVRDMA_CMD_CREATE_QP_RESP, create_qp},
779 {PVRDMA_CMD_MODIFY_QP, PVRDMA_CMD_MODIFY_QP_RESP, modify_qp},
780 {PVRDMA_CMD_QUERY_QP, PVRDMA_CMD_QUERY_QP_RESP, query_qp},
781 {PVRDMA_CMD_DESTROY_QP, PVRDMA_CMD_DESTROY_QP_RESP, destroy_qp},
782 {PVRDMA_CMD_CREATE_UC, PVRDMA_CMD_CREATE_UC_RESP, create_uc},
783 {PVRDMA_CMD_DESTROY_UC, PVRDMA_CMD_DESTROY_UC_RESP_NOOP, destroy_uc},
784 {PVRDMA_CMD_CREATE_BIND, PVRDMA_CMD_CREATE_BIND_RESP_NOOP, create_bind},
785 {PVRDMA_CMD_DESTROY_BIND, PVRDMA_CMD_DESTROY_BIND_RESP_NOOP, destroy_bind},
786 {PVRDMA_CMD_CREATE_SRQ, PVRDMA_CMD_CREATE_SRQ_RESP, create_srq},
787 {PVRDMA_CMD_QUERY_SRQ, PVRDMA_CMD_QUERY_SRQ_RESP, query_srq},
788 {PVRDMA_CMD_MODIFY_SRQ, PVRDMA_CMD_MODIFY_SRQ_RESP, modify_srq},
789 {PVRDMA_CMD_DESTROY_SRQ, PVRDMA_CMD_DESTROY_SRQ_RESP, destroy_srq},
790 };
791
792 int pvrdma_exec_cmd(PVRDMADev *dev)
793 {
794 int err = 0xFFFF;
795 DSRInfo *dsr_info;
796
797 dsr_info = &dev->dsr_info;
798
799 if (dsr_info->req->hdr.cmd >= sizeof(cmd_handlers) /
800 sizeof(struct cmd_handler)) {
801 rdma_error_report("Unsupported command");
802 goto out;
803 }
804
805 if (!cmd_handlers[dsr_info->req->hdr.cmd].exec) {
806 rdma_error_report("Unsupported command (not implemented yet)");
807 goto out;
808 }
809
810 err = cmd_handlers[dsr_info->req->hdr.cmd].exec(dev, dsr_info->req,
811 dsr_info->rsp);
812 dsr_info->rsp->hdr.response = dsr_info->req->hdr.response;
813 dsr_info->rsp->hdr.ack = cmd_handlers[dsr_info->req->hdr.cmd].ack;
814 dsr_info->rsp->hdr.err = err < 0 ? -err : 0;
815
816 trace_pvrdma_exec_cmd(dsr_info->req->hdr.cmd, dsr_info->rsp->hdr.err);
817
818 dev->stats.commands++;
819
820 out:
821 set_reg_val(dev, PVRDMA_REG_ERR, err);
822 post_interrupt(dev, INTR_VEC_CMD_RING);
823
824 return (err == 0) ? 0 : -EINVAL;
825 }