hw/rdma: Utilize ibv_reg_mr_iova for memory registration
[qemu.git] / hw / rdma / rdma_backend.h
1 /*
2 * RDMA device: Definitions of Backend Device functions
3 *
4 * Copyright (C) 2018 Oracle
5 * Copyright (C) 2018 Red Hat Inc
6 *
7 * Authors:
8 * Yuval Shaia <yuval.shaia@oracle.com>
9 * Marcel Apfelbaum <marcel@redhat.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
13 *
14 */
15
16 #ifndef RDMA_BACKEND_H
17 #define RDMA_BACKEND_H
18
19 #include "qapi/error.h"
20 #include "chardev/char-fe.h"
21
22 #include "rdma_rm_defs.h"
23 #include "rdma_backend_defs.h"
24
25 /* Vendor Errors */
26 #define VENDOR_ERR_FAIL_BACKEND 0x201
27 #define VENDOR_ERR_TOO_MANY_SGES 0x202
28 #define VENDOR_ERR_NOMEM 0x203
29 #define VENDOR_ERR_QP0 0x204
30 #define VENDOR_ERR_INV_NUM_SGE 0x205
31 #define VENDOR_ERR_MAD_SEND 0x206
32 #define VENDOR_ERR_INVLKEY 0x207
33 #define VENDOR_ERR_MR_SMALL 0x208
34 #define VENDOR_ERR_INV_MAD_BUFF 0x209
35 #define VENDOR_ERR_INV_GID_IDX 0x210
36
37 /* Add definition for QP0 and QP1 as there is no userspace enums for them */
38 enum ibv_special_qp_type {
39 IBV_QPT_SMI = 0,
40 IBV_QPT_GSI = 1,
41 };
42
43 static inline uint32_t rdma_backend_qpn(const RdmaBackendQP *qp)
44 {
45 return qp->ibqp ? qp->ibqp->qp_num : 1;
46 }
47
48 static inline uint32_t rdma_backend_mr_lkey(const RdmaBackendMR *mr)
49 {
50 return mr->ibmr ? mr->ibmr->lkey : 0;
51 }
52
53 static inline uint32_t rdma_backend_mr_rkey(const RdmaBackendMR *mr)
54 {
55 return mr->ibmr ? mr->ibmr->rkey : 0;
56 }
57
58 int rdma_backend_init(RdmaBackendDev *backend_dev, PCIDevice *pdev,
59 RdmaDeviceResources *rdma_dev_res,
60 const char *backend_device_name, uint8_t port_num,
61 struct ibv_device_attr *dev_attr,
62 CharBackend *mad_chr_be);
63 void rdma_backend_fini(RdmaBackendDev *backend_dev);
64 int rdma_backend_add_gid(RdmaBackendDev *backend_dev, const char *ifname,
65 union ibv_gid *gid);
66 int rdma_backend_del_gid(RdmaBackendDev *backend_dev, const char *ifname,
67 union ibv_gid *gid);
68 int rdma_backend_get_gid_index(RdmaBackendDev *backend_dev,
69 union ibv_gid *gid);
70 void rdma_backend_start(RdmaBackendDev *backend_dev);
71 void rdma_backend_stop(RdmaBackendDev *backend_dev);
72 void rdma_backend_register_comp_handler(void (*handler)(void *ctx,
73 struct ibv_wc *wc));
74 void rdma_backend_unregister_comp_handler(void);
75
76 int rdma_backend_query_port(RdmaBackendDev *backend_dev,
77 struct ibv_port_attr *port_attr);
78 int rdma_backend_create_pd(RdmaBackendDev *backend_dev, RdmaBackendPD *pd);
79 void rdma_backend_destroy_pd(RdmaBackendPD *pd);
80
81 #ifdef LEGACY_RDMA_REG_MR
82 int rdma_backend_create_mr(RdmaBackendMR *mr, RdmaBackendPD *pd, void *addr,
83 size_t length, int access);
84 #else
85 int rdma_backend_create_mr(RdmaBackendMR *mr, RdmaBackendPD *pd, void *addr,
86 size_t length, uint64_t guest_start, int access);
87 #endif
88 void rdma_backend_destroy_mr(RdmaBackendMR *mr);
89
90 int rdma_backend_create_cq(RdmaBackendDev *backend_dev, RdmaBackendCQ *cq,
91 int cqe);
92 void rdma_backend_destroy_cq(RdmaBackendCQ *cq);
93 void rdma_backend_poll_cq(RdmaDeviceResources *rdma_dev_res, RdmaBackendCQ *cq);
94
95 int rdma_backend_create_qp(RdmaBackendQP *qp, uint8_t qp_type,
96 RdmaBackendPD *pd, RdmaBackendCQ *scq,
97 RdmaBackendCQ *rcq, RdmaBackendSRQ *srq,
98 uint32_t max_send_wr, uint32_t max_recv_wr,
99 uint32_t max_send_sge, uint32_t max_recv_sge);
100 int rdma_backend_qp_state_init(RdmaBackendDev *backend_dev, RdmaBackendQP *qp,
101 uint8_t qp_type, uint32_t qkey);
102 int rdma_backend_qp_state_rtr(RdmaBackendDev *backend_dev, RdmaBackendQP *qp,
103 uint8_t qp_type, uint8_t sgid_idx,
104 union ibv_gid *dgid, uint32_t dqpn,
105 uint32_t rq_psn, uint32_t qkey, bool use_qkey);
106 int rdma_backend_qp_state_rts(RdmaBackendQP *qp, uint8_t qp_type,
107 uint32_t sq_psn, uint32_t qkey, bool use_qkey);
108 int rdma_backend_query_qp(RdmaBackendQP *qp, struct ibv_qp_attr *attr,
109 int attr_mask, struct ibv_qp_init_attr *init_attr);
110 void rdma_backend_destroy_qp(RdmaBackendQP *qp, RdmaDeviceResources *dev_res);
111
112 void rdma_backend_post_send(RdmaBackendDev *backend_dev,
113 RdmaBackendQP *qp, uint8_t qp_type,
114 struct ibv_sge *sge, uint32_t num_sge,
115 uint8_t sgid_idx, union ibv_gid *sgid,
116 union ibv_gid *dgid, uint32_t dqpn, uint32_t dqkey,
117 void *ctx);
118 void rdma_backend_post_recv(RdmaBackendDev *backend_dev,
119 RdmaBackendQP *qp, uint8_t qp_type,
120 struct ibv_sge *sge, uint32_t num_sge, void *ctx);
121
122 int rdma_backend_create_srq(RdmaBackendSRQ *srq, RdmaBackendPD *pd,
123 uint32_t max_wr, uint32_t max_sge,
124 uint32_t srq_limit);
125 int rdma_backend_query_srq(RdmaBackendSRQ *srq, struct ibv_srq_attr *srq_attr);
126 int rdma_backend_modify_srq(RdmaBackendSRQ *srq, struct ibv_srq_attr *srq_attr,
127 int srq_attr_mask);
128 void rdma_backend_destroy_srq(RdmaBackendSRQ *srq,
129 RdmaDeviceResources *dev_res);
130 void rdma_backend_post_srq_recv(RdmaBackendDev *backend_dev,
131 RdmaBackendSRQ *srq, struct ibv_sge *sge,
132 uint32_t num_sge, void *ctx);
133
134 #endif