[virtio] Fix virtio-pci logging
[ipxe.git] / src / drivers / bus / virtio-pci.c
1 /* virtio-pci.c - pci interface for virtio interface
2 *
3 * (c) Copyright 2008 Bull S.A.S.
4 *
5 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
6 *
7 * some parts from Linux Virtio PCI driver
8 *
9 * Copyright IBM Corp. 2007
10 * Authors: Anthony Liguori <aliguori@us.ibm.com>
11 *
12 */
13
14 #include "errno.h"
15 #include "byteswap.h"
16 #include "etherboot.h"
17 #include "ipxe/io.h"
18 #include "ipxe/iomap.h"
19 #include "ipxe/pci.h"
20 #include "ipxe/reboot.h"
21 #include "ipxe/virtio-pci.h"
22 #include "ipxe/virtio-ring.h"
23
24 int vp_find_vq(unsigned int ioaddr, int queue_index,
25 struct vring_virtqueue *vq)
26 {
27 struct vring * vr = &vq->vring;
28 u16 num;
29
30 /* select the queue */
31
32 outw(queue_index, ioaddr + VIRTIO_PCI_QUEUE_SEL);
33
34 /* check if the queue is available */
35
36 num = inw(ioaddr + VIRTIO_PCI_QUEUE_NUM);
37 if (!num) {
38 DBG("VIRTIO-PCI ERROR: queue size is 0\n");
39 return -1;
40 }
41
42 if (num > MAX_QUEUE_NUM) {
43 DBG("VIRTIO-PCI ERROR: queue size %d > %d\n", num, MAX_QUEUE_NUM);
44 return -1;
45 }
46
47 /* check if the queue is already active */
48
49 if (inl(ioaddr + VIRTIO_PCI_QUEUE_PFN)) {
50 DBG("VIRTIO-PCI ERROR: queue already active\n");
51 return -1;
52 }
53
54 vq->queue_index = queue_index;
55
56 /* initialize the queue */
57
58 vring_init(vr, num, (unsigned char*)&vq->queue);
59
60 /* activate the queue
61 *
62 * NOTE: vr->desc is initialized by vring_init()
63 */
64
65 outl((unsigned long)virt_to_phys(vr->desc) >> PAGE_SHIFT,
66 ioaddr + VIRTIO_PCI_QUEUE_PFN);
67
68 return num;
69 }
70
71 #define CFG_POS(vdev, field) \
72 (vdev->cfg_cap_pos + offsetof(struct virtio_pci_cfg_cap, field))
73
74 static void prep_pci_cfg_cap(struct virtio_pci_modern_device *vdev,
75 struct virtio_pci_region *region,
76 size_t offset, u32 length)
77 {
78 pci_write_config_byte(vdev->pci, CFG_POS(vdev, cap.bar), region->bar);
79 pci_write_config_dword(vdev->pci, CFG_POS(vdev, cap.length), length);
80 pci_write_config_dword(vdev->pci, CFG_POS(vdev, cap.offset),
81 (intptr_t)(region->base + offset));
82 }
83
84 void vpm_iowrite8(struct virtio_pci_modern_device *vdev,
85 struct virtio_pci_region *region, u8 data, size_t offset)
86 {
87 switch (region->flags & VIRTIO_PCI_REGION_TYPE_MASK) {
88 case VIRTIO_PCI_REGION_MEMORY:
89 writeb(data, region->base + offset);
90 break;
91 case VIRTIO_PCI_REGION_PORT:
92 outb(data, region->base + offset);
93 break;
94 case VIRTIO_PCI_REGION_PCI_CONFIG:
95 prep_pci_cfg_cap(vdev, region, offset, 1);
96 pci_write_config_byte(vdev->pci, CFG_POS(vdev, pci_cfg_data), data);
97 break;
98 default:
99 assert(0);
100 break;
101 }
102 }
103
104 void vpm_iowrite16(struct virtio_pci_modern_device *vdev,
105 struct virtio_pci_region *region, u16 data, size_t offset)
106 {
107 data = cpu_to_le16(data);
108 switch (region->flags & VIRTIO_PCI_REGION_TYPE_MASK) {
109 case VIRTIO_PCI_REGION_MEMORY:
110 writew(data, region->base + offset);
111 break;
112 case VIRTIO_PCI_REGION_PORT:
113 outw(data, region->base + offset);
114 break;
115 case VIRTIO_PCI_REGION_PCI_CONFIG:
116 prep_pci_cfg_cap(vdev, region, offset, 2);
117 pci_write_config_word(vdev->pci, CFG_POS(vdev, pci_cfg_data), data);
118 break;
119 default:
120 assert(0);
121 break;
122 }
123 }
124
125 void vpm_iowrite32(struct virtio_pci_modern_device *vdev,
126 struct virtio_pci_region *region, u32 data, size_t offset)
127 {
128 data = cpu_to_le32(data);
129 switch (region->flags & VIRTIO_PCI_REGION_TYPE_MASK) {
130 case VIRTIO_PCI_REGION_MEMORY:
131 writel(data, region->base + offset);
132 break;
133 case VIRTIO_PCI_REGION_PORT:
134 outl(data, region->base + offset);
135 break;
136 case VIRTIO_PCI_REGION_PCI_CONFIG:
137 prep_pci_cfg_cap(vdev, region, offset, 4);
138 pci_write_config_dword(vdev->pci, CFG_POS(vdev, pci_cfg_data), data);
139 break;
140 default:
141 assert(0);
142 break;
143 }
144 }
145
146 u8 vpm_ioread8(struct virtio_pci_modern_device *vdev,
147 struct virtio_pci_region *region, size_t offset)
148 {
149 uint8_t data;
150 switch (region->flags & VIRTIO_PCI_REGION_TYPE_MASK) {
151 case VIRTIO_PCI_REGION_MEMORY:
152 data = readb(region->base + offset);
153 break;
154 case VIRTIO_PCI_REGION_PORT:
155 data = inb(region->base + offset);
156 break;
157 case VIRTIO_PCI_REGION_PCI_CONFIG:
158 prep_pci_cfg_cap(vdev, region, offset, 1);
159 pci_read_config_byte(vdev->pci, CFG_POS(vdev, pci_cfg_data), &data);
160 break;
161 default:
162 assert(0);
163 data = 0;
164 break;
165 }
166 return data;
167 }
168
169 u16 vpm_ioread16(struct virtio_pci_modern_device *vdev,
170 struct virtio_pci_region *region, size_t offset)
171 {
172 uint16_t data;
173 switch (region->flags & VIRTIO_PCI_REGION_TYPE_MASK) {
174 case VIRTIO_PCI_REGION_MEMORY:
175 data = readw(region->base + offset);
176 break;
177 case VIRTIO_PCI_REGION_PORT:
178 data = inw(region->base + offset);
179 break;
180 case VIRTIO_PCI_REGION_PCI_CONFIG:
181 prep_pci_cfg_cap(vdev, region, offset, 2);
182 pci_read_config_word(vdev->pci, CFG_POS(vdev, pci_cfg_data), &data);
183 break;
184 default:
185 assert(0);
186 data = 0;
187 break;
188 }
189 return le16_to_cpu(data);
190 }
191
192 u32 vpm_ioread32(struct virtio_pci_modern_device *vdev,
193 struct virtio_pci_region *region, size_t offset)
194 {
195 uint32_t data;
196 switch (region->flags & VIRTIO_PCI_REGION_TYPE_MASK) {
197 case VIRTIO_PCI_REGION_MEMORY:
198 data = readw(region->base + offset);
199 break;
200 case VIRTIO_PCI_REGION_PORT:
201 data = inw(region->base + offset);
202 break;
203 case VIRTIO_PCI_REGION_PCI_CONFIG:
204 prep_pci_cfg_cap(vdev, region, offset, 4);
205 pci_read_config_dword(vdev->pci, CFG_POS(vdev, pci_cfg_data), &data);
206 break;
207 default:
208 assert(0);
209 data = 0;
210 break;
211 }
212 return le32_to_cpu(data);
213 }
214
215 int virtio_pci_find_capability(struct pci_device *pci, uint8_t cfg_type)
216 {
217 int pos;
218 uint8_t type, bar;
219
220 for (pos = pci_find_capability(pci, PCI_CAP_ID_VNDR);
221 pos > 0;
222 pos = pci_find_next_capability(pci, pos, PCI_CAP_ID_VNDR)) {
223
224 pci_read_config_byte(pci, pos + offsetof(struct virtio_pci_cap,
225 cfg_type), &type);
226 pci_read_config_byte(pci, pos + offsetof(struct virtio_pci_cap,
227 bar), &bar);
228
229 /* Ignore structures with reserved BAR values */
230 if (bar > 0x5) {
231 continue;
232 }
233
234 if (type == cfg_type) {
235 return pos;
236 }
237 }
238 return 0;
239 }
240
241 int virtio_pci_map_capability(struct pci_device *pci, int cap, size_t minlen,
242 u32 align, u32 start, u32 size,
243 struct virtio_pci_region *region)
244 {
245 u8 bar;
246 u32 offset, length, base_raw;
247 unsigned long base;
248
249 pci_read_config_byte(pci, cap + offsetof(struct virtio_pci_cap, bar), &bar);
250 pci_read_config_dword(pci, cap + offsetof(struct virtio_pci_cap, offset),
251 &offset);
252 pci_read_config_dword(pci, cap + offsetof(struct virtio_pci_cap, length),
253 &length);
254
255 if (length <= start) {
256 DBG("VIRTIO-PCI bad capability len %d (>%d expected)\n", length, start);
257 return -EINVAL;
258 }
259 if (length - start < minlen) {
260 DBG("VIRTIO-PCI bad capability len %d (>=%zd expected)\n", length, minlen);
261 return -EINVAL;
262 }
263 length -= start;
264 if (start + offset < offset) {
265 DBG("VIRTIO-PCI map wrap-around %d+%d\n", start, offset);
266 return -EINVAL;
267 }
268 offset += start;
269 if (offset & (align - 1)) {
270 DBG("VIRTIO-PCI offset %d not aligned to %d\n", offset, align);
271 return -EINVAL;
272 }
273 if (length > size) {
274 length = size;
275 }
276
277 if (minlen + offset < minlen ||
278 minlen + offset > pci_bar_size(pci, PCI_BASE_ADDRESS(bar))) {
279 DBG("VIRTIO-PCI map virtio %zd@%d out of range on bar %i length %ld\n",
280 minlen, offset,
281 bar, pci_bar_size(pci, PCI_BASE_ADDRESS(bar)));
282 return -EINVAL;
283 }
284
285 region->base = NULL;
286 region->length = length;
287 region->bar = bar;
288
289 base = pci_bar_start(pci, PCI_BASE_ADDRESS(bar));
290 if (base) {
291 pci_read_config_dword(pci, PCI_BASE_ADDRESS(bar), &base_raw);
292
293 if (base_raw & PCI_BASE_ADDRESS_SPACE_IO) {
294 /* Region accessed using port I/O */
295 region->base = (void *)(base + offset);
296 region->flags = VIRTIO_PCI_REGION_PORT;
297 } else {
298 /* Region mapped into memory space */
299 region->base = ioremap(base + offset, length);
300 region->flags = VIRTIO_PCI_REGION_MEMORY;
301 }
302 }
303 if (!region->base) {
304 /* Region accessed via PCI config space window */
305 region->base = (void *)(intptr_t)offset;
306 region->flags = VIRTIO_PCI_REGION_PCI_CONFIG;
307 }
308 return 0;
309 }
310
311 void virtio_pci_unmap_capability(struct virtio_pci_region *region)
312 {
313 unsigned region_type = region->flags & VIRTIO_PCI_REGION_TYPE_MASK;
314 if (region_type == VIRTIO_PCI_REGION_MEMORY) {
315 iounmap(region->base);
316 }
317 }
318
319 void vpm_notify(struct virtio_pci_modern_device *vdev,
320 struct vring_virtqueue *vq)
321 {
322 vpm_iowrite16(vdev, &vq->notification, (u16)vq->queue_index, 0);
323 }
324
325 int vpm_find_vqs(struct virtio_pci_modern_device *vdev,
326 unsigned nvqs, struct vring_virtqueue *vqs)
327 {
328 unsigned i;
329 struct vring_virtqueue *vq;
330 u16 size, off;
331 u32 notify_offset_multiplier;
332 int err;
333
334 if (nvqs > vpm_ioread16(vdev, &vdev->common, COMMON_OFFSET(num_queues))) {
335 return -ENOENT;
336 }
337
338 /* Read notify_off_multiplier from config space. */
339 pci_read_config_dword(vdev->pci,
340 vdev->notify_cap_pos + offsetof(struct virtio_pci_notify_cap,
341 notify_off_multiplier),
342 &notify_offset_multiplier);
343
344 for (i = 0; i < nvqs; i++) {
345 /* Select the queue we're interested in */
346 vpm_iowrite16(vdev, &vdev->common, (u16)i, COMMON_OFFSET(queue_select));
347
348 /* Check if queue is either not available or already active. */
349 size = vpm_ioread16(vdev, &vdev->common, COMMON_OFFSET(queue_size));
350 /* QEMU has a bug where queues don't revert to inactive on device
351 * reset. Skip checking the queue_enable field until it is fixed.
352 */
353 if (!size /*|| vpm_ioread16(vdev, &vdev->common.queue_enable)*/)
354 return -ENOENT;
355
356 if (size & (size - 1)) {
357 DBG("VIRTIO-PCI %p: bad queue size %d", vdev, size);
358 return -EINVAL;
359 }
360
361 vq = &vqs[i];
362 vq->queue_index = i;
363
364 /* get offset of notification word for this vq */
365 off = vpm_ioread16(vdev, &vdev->common, COMMON_OFFSET(queue_notify_off));
366 vq->vring.num = size;
367
368 vring_init(&vq->vring, size, (unsigned char *)vq->queue);
369
370 /* activate the queue */
371 vpm_iowrite16(vdev, &vdev->common, size, COMMON_OFFSET(queue_size));
372
373 vpm_iowrite64(vdev, &vdev->common, virt_to_phys(vq->vring.desc),
374 COMMON_OFFSET(queue_desc_lo),
375 COMMON_OFFSET(queue_desc_hi));
376 vpm_iowrite64(vdev, &vdev->common, virt_to_phys(vq->vring.avail),
377 COMMON_OFFSET(queue_avail_lo),
378 COMMON_OFFSET(queue_avail_hi));
379 vpm_iowrite64(vdev, &vdev->common, virt_to_phys(vq->vring.used),
380 COMMON_OFFSET(queue_used_lo),
381 COMMON_OFFSET(queue_used_hi));
382
383 err = virtio_pci_map_capability(vdev->pci,
384 vdev->notify_cap_pos, 2, 2,
385 off * notify_offset_multiplier, 2,
386 &vq->notification);
387 if (err) {
388 goto err_map_notify;
389 }
390 }
391
392 /* Select and activate all queues. Has to be done last: once we do
393 * this, there's no way to go back except reset.
394 */
395 for (i = 0; i < nvqs; i++) {
396 vq = &vqs[i];
397 vpm_iowrite16(vdev, &vdev->common, (u16)vq->queue_index,
398 COMMON_OFFSET(queue_select));
399 vpm_iowrite16(vdev, &vdev->common, 1, COMMON_OFFSET(queue_enable));
400 }
401 return 0;
402
403 err_map_notify:
404 /* Undo the virtio_pci_map_capability calls. */
405 while (i-- > 0) {
406 virtio_pci_unmap_capability(&vqs[i].notification);
407 }
408 return err;
409 }