vhost-vdpa: introduce vhost-vdpa backend
[qemu.git] / hw / net / vhost_net.c
1 /*
2 * vhost-net support
3 *
4 * Copyright Red Hat, Inc. 2010
5 *
6 * Authors:
7 * Michael S. Tsirkin <mst@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
14 */
15
16 #include "qemu/osdep.h"
17 #include "net/net.h"
18 #include "net/tap.h"
19 #include "net/vhost-user.h"
20 #include "net/vhost-vdpa.h"
21
22 #include "standard-headers/linux/vhost_types.h"
23 #include "hw/virtio/virtio-net.h"
24 #include "net/vhost_net.h"
25 #include "qemu/error-report.h"
26 #include "qemu/main-loop.h"
27
28 #include <sys/socket.h>
29 #include <net/if.h>
30 #include <netinet/in.h>
31
32
33 #include "standard-headers/linux/virtio_ring.h"
34 #include "hw/virtio/vhost.h"
35 #include "hw/virtio/virtio-bus.h"
36
37
38 /* Features supported by host kernel. */
39 static const int kernel_feature_bits[] = {
40 VIRTIO_F_NOTIFY_ON_EMPTY,
41 VIRTIO_RING_F_INDIRECT_DESC,
42 VIRTIO_RING_F_EVENT_IDX,
43 VIRTIO_NET_F_MRG_RXBUF,
44 VIRTIO_F_VERSION_1,
45 VIRTIO_NET_F_MTU,
46 VIRTIO_F_IOMMU_PLATFORM,
47 VIRTIO_F_RING_PACKED,
48 VHOST_INVALID_FEATURE_BIT
49 };
50
51 /* Features supported by others. */
52 static const int user_feature_bits[] = {
53 VIRTIO_F_NOTIFY_ON_EMPTY,
54 VIRTIO_RING_F_INDIRECT_DESC,
55 VIRTIO_RING_F_EVENT_IDX,
56
57 VIRTIO_F_ANY_LAYOUT,
58 VIRTIO_F_VERSION_1,
59 VIRTIO_NET_F_CSUM,
60 VIRTIO_NET_F_GUEST_CSUM,
61 VIRTIO_NET_F_GSO,
62 VIRTIO_NET_F_GUEST_TSO4,
63 VIRTIO_NET_F_GUEST_TSO6,
64 VIRTIO_NET_F_GUEST_ECN,
65 VIRTIO_NET_F_GUEST_UFO,
66 VIRTIO_NET_F_HOST_TSO4,
67 VIRTIO_NET_F_HOST_TSO6,
68 VIRTIO_NET_F_HOST_ECN,
69 VIRTIO_NET_F_HOST_UFO,
70 VIRTIO_NET_F_MRG_RXBUF,
71 VIRTIO_NET_F_MTU,
72 VIRTIO_F_IOMMU_PLATFORM,
73 VIRTIO_F_RING_PACKED,
74
75 /* This bit implies RARP isn't sent by QEMU out of band */
76 VIRTIO_NET_F_GUEST_ANNOUNCE,
77
78 VIRTIO_NET_F_MQ,
79
80 VHOST_INVALID_FEATURE_BIT
81 };
82
83 static const int *vhost_net_get_feature_bits(struct vhost_net *net)
84 {
85 const int *feature_bits = 0;
86
87 switch (net->nc->info->type) {
88 case NET_CLIENT_DRIVER_TAP:
89 feature_bits = kernel_feature_bits;
90 break;
91 case NET_CLIENT_DRIVER_VHOST_USER:
92 feature_bits = user_feature_bits;
93 break;
94 #ifdef CONFIG_VHOST_NET_VDPA
95 case NET_CLIENT_DRIVER_VHOST_VDPA:
96 feature_bits = vdpa_feature_bits;
97 break;
98 #endif
99 default:
100 error_report("Feature bits not defined for this type: %d",
101 net->nc->info->type);
102 break;
103 }
104
105 return feature_bits;
106 }
107
108 uint64_t vhost_net_get_features(struct vhost_net *net, uint64_t features)
109 {
110 return vhost_get_features(&net->dev, vhost_net_get_feature_bits(net),
111 features);
112 }
113 int vhost_net_get_config(struct vhost_net *net, uint8_t *config,
114 uint32_t config_len)
115 {
116 return vhost_dev_get_config(&net->dev, config, config_len);
117 }
118 int vhost_net_set_config(struct vhost_net *net, const uint8_t *data,
119 uint32_t offset, uint32_t size, uint32_t flags)
120 {
121 return vhost_dev_set_config(&net->dev, data, offset, size, flags);
122 }
123
124 void vhost_net_ack_features(struct vhost_net *net, uint64_t features)
125 {
126 net->dev.acked_features = net->dev.backend_features;
127 vhost_ack_features(&net->dev, vhost_net_get_feature_bits(net), features);
128 }
129
130 uint64_t vhost_net_get_max_queues(VHostNetState *net)
131 {
132 return net->dev.max_queues;
133 }
134
135 uint64_t vhost_net_get_acked_features(VHostNetState *net)
136 {
137 return net->dev.acked_features;
138 }
139
140 static int vhost_net_get_fd(NetClientState *backend)
141 {
142 switch (backend->info->type) {
143 case NET_CLIENT_DRIVER_TAP:
144 return tap_get_fd(backend);
145 default:
146 fprintf(stderr, "vhost-net requires tap backend\n");
147 return -ENOSYS;
148 }
149 }
150
151 struct vhost_net *vhost_net_init(VhostNetOptions *options)
152 {
153 int r;
154 bool backend_kernel = options->backend_type == VHOST_BACKEND_TYPE_KERNEL;
155 struct vhost_net *net = g_new0(struct vhost_net, 1);
156 uint64_t features = 0;
157
158 if (!options->net_backend) {
159 fprintf(stderr, "vhost-net requires net backend to be setup\n");
160 goto fail;
161 }
162 net->nc = options->net_backend;
163
164 net->dev.max_queues = 1;
165 net->dev.nvqs = 2;
166 net->dev.vqs = net->vqs;
167
168 if (backend_kernel) {
169 r = vhost_net_get_fd(options->net_backend);
170 if (r < 0) {
171 goto fail;
172 }
173 net->dev.backend_features = qemu_has_vnet_hdr(options->net_backend)
174 ? 0 : (1ULL << VHOST_NET_F_VIRTIO_NET_HDR);
175 net->backend = r;
176 net->dev.protocol_features = 0;
177 } else {
178 net->dev.backend_features = 0;
179 net->dev.protocol_features = 0;
180 net->backend = -1;
181
182 /* vhost-user needs vq_index to initiate a specific queue pair */
183 net->dev.vq_index = net->nc->queue_index * net->dev.nvqs;
184 }
185
186 r = vhost_dev_init(&net->dev, options->opaque,
187 options->backend_type, options->busyloop_timeout);
188 if (r < 0) {
189 goto fail;
190 }
191 if (backend_kernel) {
192 if (!qemu_has_vnet_hdr_len(options->net_backend,
193 sizeof(struct virtio_net_hdr_mrg_rxbuf))) {
194 net->dev.features &= ~(1ULL << VIRTIO_NET_F_MRG_RXBUF);
195 }
196 if (~net->dev.features & net->dev.backend_features) {
197 fprintf(stderr, "vhost lacks feature mask %" PRIu64
198 " for backend\n",
199 (uint64_t)(~net->dev.features & net->dev.backend_features));
200 goto fail;
201 }
202 }
203
204 /* Set sane init value. Override when guest acks. */
205 #ifdef CONFIG_VHOST_NET_USER
206 if (net->nc->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
207 features = vhost_user_get_acked_features(net->nc);
208 if (~net->dev.features & features) {
209 fprintf(stderr, "vhost lacks feature mask %" PRIu64
210 " for backend\n",
211 (uint64_t)(~net->dev.features & features));
212 goto fail;
213 }
214 }
215 #endif
216
217 vhost_net_ack_features(net, features);
218
219 return net;
220
221 fail:
222 vhost_dev_cleanup(&net->dev);
223 g_free(net);
224 return NULL;
225 }
226
227 static void vhost_net_set_vq_index(struct vhost_net *net, int vq_index)
228 {
229 net->dev.vq_index = vq_index;
230 }
231
232 static int vhost_net_start_one(struct vhost_net *net,
233 VirtIODevice *dev)
234 {
235 struct vhost_vring_file file = { };
236 int r;
237
238 net->dev.nvqs = 2;
239 net->dev.vqs = net->vqs;
240
241 r = vhost_dev_enable_notifiers(&net->dev, dev);
242 if (r < 0) {
243 goto fail_notifiers;
244 }
245
246 r = vhost_dev_start(&net->dev, dev);
247 if (r < 0) {
248 goto fail_start;
249 }
250
251 if (net->nc->info->poll) {
252 net->nc->info->poll(net->nc, false);
253 }
254
255 if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) {
256 qemu_set_fd_handler(net->backend, NULL, NULL, NULL);
257 file.fd = net->backend;
258 for (file.index = 0; file.index < net->dev.nvqs; ++file.index) {
259 if (!virtio_queue_enabled(dev, net->dev.vq_index +
260 file.index)) {
261 /* Queue might not be ready for start */
262 continue;
263 }
264 r = vhost_net_set_backend(&net->dev, &file);
265 if (r < 0) {
266 r = -errno;
267 goto fail;
268 }
269 }
270 }
271 return 0;
272 fail:
273 file.fd = -1;
274 if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) {
275 while (file.index-- > 0) {
276 if (!virtio_queue_enabled(dev, net->dev.vq_index +
277 file.index)) {
278 /* Queue might not be ready for start */
279 continue;
280 }
281 int r = vhost_net_set_backend(&net->dev, &file);
282 assert(r >= 0);
283 }
284 }
285 if (net->nc->info->poll) {
286 net->nc->info->poll(net->nc, true);
287 }
288 vhost_dev_stop(&net->dev, dev);
289 fail_start:
290 vhost_dev_disable_notifiers(&net->dev, dev);
291 fail_notifiers:
292 return r;
293 }
294
295 static void vhost_net_stop_one(struct vhost_net *net,
296 VirtIODevice *dev)
297 {
298 struct vhost_vring_file file = { .fd = -1 };
299
300 if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) {
301 for (file.index = 0; file.index < net->dev.nvqs; ++file.index) {
302 int r = vhost_net_set_backend(&net->dev, &file);
303 assert(r >= 0);
304 }
305 }
306 if (net->nc->info->poll) {
307 net->nc->info->poll(net->nc, true);
308 }
309 vhost_dev_stop(&net->dev, dev);
310 vhost_dev_disable_notifiers(&net->dev, dev);
311 }
312
313 int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
314 int total_queues)
315 {
316 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
317 VirtioBusState *vbus = VIRTIO_BUS(qbus);
318 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
319 struct vhost_net *net;
320 int r, e, i;
321 NetClientState *peer;
322
323 if (!k->set_guest_notifiers) {
324 error_report("binding does not support guest notifiers");
325 return -ENOSYS;
326 }
327
328 for (i = 0; i < total_queues; i++) {
329
330 peer = qemu_get_peer(ncs, i);
331 net = get_vhost_net(peer);
332 vhost_net_set_vq_index(net, i * 2);
333
334 /* Suppress the masking guest notifiers on vhost user
335 * because vhost user doesn't interrupt masking/unmasking
336 * properly.
337 */
338 if (net->nc->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
339 dev->use_guest_notifier_mask = false;
340 }
341 }
342
343 r = k->set_guest_notifiers(qbus->parent, total_queues * 2, true);
344 if (r < 0) {
345 error_report("Error binding guest notifier: %d", -r);
346 goto err;
347 }
348
349 for (i = 0; i < total_queues; i++) {
350 peer = qemu_get_peer(ncs, i);
351 r = vhost_net_start_one(get_vhost_net(peer), dev);
352
353 if (r < 0) {
354 goto err_start;
355 }
356
357 if (peer->vring_enable) {
358 /* restore vring enable state */
359 r = vhost_set_vring_enable(peer, peer->vring_enable);
360
361 if (r < 0) {
362 goto err_start;
363 }
364 }
365 }
366
367 return 0;
368
369 err_start:
370 while (--i >= 0) {
371 peer = qemu_get_peer(ncs , i);
372 vhost_net_stop_one(get_vhost_net(peer), dev);
373 }
374 e = k->set_guest_notifiers(qbus->parent, total_queues * 2, false);
375 if (e < 0) {
376 fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", e);
377 fflush(stderr);
378 }
379 err:
380 return r;
381 }
382
383 void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
384 int total_queues)
385 {
386 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
387 VirtioBusState *vbus = VIRTIO_BUS(qbus);
388 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
389 int i, r;
390
391 for (i = 0; i < total_queues; i++) {
392 vhost_net_stop_one(get_vhost_net(ncs[i].peer), dev);
393 }
394
395 r = k->set_guest_notifiers(qbus->parent, total_queues * 2, false);
396 if (r < 0) {
397 fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r);
398 fflush(stderr);
399 }
400 assert(r >= 0);
401 }
402
403 void vhost_net_cleanup(struct vhost_net *net)
404 {
405 vhost_dev_cleanup(&net->dev);
406 }
407
408 int vhost_net_notify_migration_done(struct vhost_net *net, char* mac_addr)
409 {
410 const VhostOps *vhost_ops = net->dev.vhost_ops;
411
412 assert(vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
413 assert(vhost_ops->vhost_migration_done);
414
415 return vhost_ops->vhost_migration_done(&net->dev, mac_addr);
416 }
417
418 bool vhost_net_virtqueue_pending(VHostNetState *net, int idx)
419 {
420 return vhost_virtqueue_pending(&net->dev, idx);
421 }
422
423 void vhost_net_virtqueue_mask(VHostNetState *net, VirtIODevice *dev,
424 int idx, bool mask)
425 {
426 vhost_virtqueue_mask(&net->dev, dev, idx, mask);
427 }
428
429 VHostNetState *get_vhost_net(NetClientState *nc)
430 {
431 VHostNetState *vhost_net = 0;
432
433 if (!nc) {
434 return 0;
435 }
436
437 switch (nc->info->type) {
438 case NET_CLIENT_DRIVER_TAP:
439 vhost_net = tap_get_vhost_net(nc);
440 break;
441 #ifdef CONFIG_VHOST_NET_USER
442 case NET_CLIENT_DRIVER_VHOST_USER:
443 vhost_net = vhost_user_get_vhost_net(nc);
444 assert(vhost_net);
445 break;
446 #endif
447 #ifdef CONFIG_VHOST_NET_VDPA
448 case NET_CLIENT_DRIVER_VHOST_VDPA:
449 vhost_net = vhost_vdpa_get_vhost_net(nc);
450 assert(vhost_net);
451 break;
452 #endif
453 default:
454 break;
455 }
456
457 return vhost_net;
458 }
459
460 int vhost_set_vring_enable(NetClientState *nc, int enable)
461 {
462 VHostNetState *net = get_vhost_net(nc);
463 const VhostOps *vhost_ops = net->dev.vhost_ops;
464
465 nc->vring_enable = enable;
466
467 if (vhost_ops && vhost_ops->vhost_set_vring_enable) {
468 return vhost_ops->vhost_set_vring_enable(&net->dev, enable);
469 }
470
471 return 0;
472 }
473
474 int vhost_net_set_mtu(struct vhost_net *net, uint16_t mtu)
475 {
476 const VhostOps *vhost_ops = net->dev.vhost_ops;
477
478 if (!vhost_ops->vhost_net_set_mtu) {
479 return 0;
480 }
481
482 return vhost_ops->vhost_net_set_mtu(&net->dev, mtu);
483 }