1 /* virtio-pci.c - pci interface for virtio interface
3 * (c) Copyright 2008 Bull S.A.S.
5 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
7 * some parts from Linux Virtio PCI driver
9 * Copyright IBM Corp. 2007
10 * Authors: Anthony Liguori <aliguori@us.ibm.com>
16 #include "etherboot.h"
18 #include "ipxe/iomap.h"
20 #include "ipxe/reboot.h"
21 #include "ipxe/virtio-pci.h"
22 #include "ipxe/virtio-ring.h"
24 int vp_find_vq(unsigned int ioaddr
, int queue_index
,
25 struct vring_virtqueue
*vq
)
27 struct vring
* vr
= &vq
->vring
;
30 /* select the queue */
32 outw(queue_index
, ioaddr
+ VIRTIO_PCI_QUEUE_SEL
);
34 /* check if the queue is available */
36 num
= inw(ioaddr
+ VIRTIO_PCI_QUEUE_NUM
);
38 DBG("VIRTIO-PCI ERROR: queue size is 0\n");
42 if (num
> MAX_QUEUE_NUM
) {
43 DBG("VIRTIO-PCI ERROR: queue size %d > %d\n", num
, MAX_QUEUE_NUM
);
47 /* check if the queue is already active */
49 if (inl(ioaddr
+ VIRTIO_PCI_QUEUE_PFN
)) {
50 DBG("VIRTIO-PCI ERROR: queue already active\n");
54 vq
->queue_index
= queue_index
;
56 /* initialize the queue */
58 vring_init(vr
, num
, (unsigned char*)&vq
->queue
);
62 * NOTE: vr->desc is initialized by vring_init()
65 outl((unsigned long)virt_to_phys(vr
->desc
) >> PAGE_SHIFT
,
66 ioaddr
+ VIRTIO_PCI_QUEUE_PFN
);
71 #define CFG_POS(vdev, field) \
72 (vdev->cfg_cap_pos + offsetof(struct virtio_pci_cfg_cap, field))
74 static void prep_pci_cfg_cap(struct virtio_pci_modern_device
*vdev
,
75 struct virtio_pci_region
*region
,
76 size_t offset
, u32 length
)
78 pci_write_config_byte(vdev
->pci
, CFG_POS(vdev
, cap
.bar
), region
->bar
);
79 pci_write_config_dword(vdev
->pci
, CFG_POS(vdev
, cap
.length
), length
);
80 pci_write_config_dword(vdev
->pci
, CFG_POS(vdev
, cap
.offset
),
81 (intptr_t)(region
->base
+ offset
));
84 void vpm_iowrite8(struct virtio_pci_modern_device
*vdev
,
85 struct virtio_pci_region
*region
, u8 data
, size_t offset
)
87 switch (region
->flags
& VIRTIO_PCI_REGION_TYPE_MASK
) {
88 case VIRTIO_PCI_REGION_MEMORY
:
89 writeb(data
, region
->base
+ offset
);
91 case VIRTIO_PCI_REGION_PORT
:
92 outb(data
, region
->base
+ offset
);
94 case VIRTIO_PCI_REGION_PCI_CONFIG
:
95 prep_pci_cfg_cap(vdev
, region
, offset
, 1);
96 pci_write_config_byte(vdev
->pci
, CFG_POS(vdev
, pci_cfg_data
), data
);
104 void vpm_iowrite16(struct virtio_pci_modern_device
*vdev
,
105 struct virtio_pci_region
*region
, u16 data
, size_t offset
)
107 data
= cpu_to_le16(data
);
108 switch (region
->flags
& VIRTIO_PCI_REGION_TYPE_MASK
) {
109 case VIRTIO_PCI_REGION_MEMORY
:
110 writew(data
, region
->base
+ offset
);
112 case VIRTIO_PCI_REGION_PORT
:
113 outw(data
, region
->base
+ offset
);
115 case VIRTIO_PCI_REGION_PCI_CONFIG
:
116 prep_pci_cfg_cap(vdev
, region
, offset
, 2);
117 pci_write_config_word(vdev
->pci
, CFG_POS(vdev
, pci_cfg_data
), data
);
125 void vpm_iowrite32(struct virtio_pci_modern_device
*vdev
,
126 struct virtio_pci_region
*region
, u32 data
, size_t offset
)
128 data
= cpu_to_le32(data
);
129 switch (region
->flags
& VIRTIO_PCI_REGION_TYPE_MASK
) {
130 case VIRTIO_PCI_REGION_MEMORY
:
131 writel(data
, region
->base
+ offset
);
133 case VIRTIO_PCI_REGION_PORT
:
134 outl(data
, region
->base
+ offset
);
136 case VIRTIO_PCI_REGION_PCI_CONFIG
:
137 prep_pci_cfg_cap(vdev
, region
, offset
, 4);
138 pci_write_config_dword(vdev
->pci
, CFG_POS(vdev
, pci_cfg_data
), data
);
146 u8
vpm_ioread8(struct virtio_pci_modern_device
*vdev
,
147 struct virtio_pci_region
*region
, size_t offset
)
150 switch (region
->flags
& VIRTIO_PCI_REGION_TYPE_MASK
) {
151 case VIRTIO_PCI_REGION_MEMORY
:
152 data
= readb(region
->base
+ offset
);
154 case VIRTIO_PCI_REGION_PORT
:
155 data
= inb(region
->base
+ offset
);
157 case VIRTIO_PCI_REGION_PCI_CONFIG
:
158 prep_pci_cfg_cap(vdev
, region
, offset
, 1);
159 pci_read_config_byte(vdev
->pci
, CFG_POS(vdev
, pci_cfg_data
), &data
);
169 u16
vpm_ioread16(struct virtio_pci_modern_device
*vdev
,
170 struct virtio_pci_region
*region
, size_t offset
)
173 switch (region
->flags
& VIRTIO_PCI_REGION_TYPE_MASK
) {
174 case VIRTIO_PCI_REGION_MEMORY
:
175 data
= readw(region
->base
+ offset
);
177 case VIRTIO_PCI_REGION_PORT
:
178 data
= inw(region
->base
+ offset
);
180 case VIRTIO_PCI_REGION_PCI_CONFIG
:
181 prep_pci_cfg_cap(vdev
, region
, offset
, 2);
182 pci_read_config_word(vdev
->pci
, CFG_POS(vdev
, pci_cfg_data
), &data
);
189 return le16_to_cpu(data
);
192 u32
vpm_ioread32(struct virtio_pci_modern_device
*vdev
,
193 struct virtio_pci_region
*region
, size_t offset
)
196 switch (region
->flags
& VIRTIO_PCI_REGION_TYPE_MASK
) {
197 case VIRTIO_PCI_REGION_MEMORY
:
198 data
= readw(region
->base
+ offset
);
200 case VIRTIO_PCI_REGION_PORT
:
201 data
= inw(region
->base
+ offset
);
203 case VIRTIO_PCI_REGION_PCI_CONFIG
:
204 prep_pci_cfg_cap(vdev
, region
, offset
, 4);
205 pci_read_config_dword(vdev
->pci
, CFG_POS(vdev
, pci_cfg_data
), &data
);
212 return le32_to_cpu(data
);
215 int virtio_pci_find_capability(struct pci_device
*pci
, uint8_t cfg_type
)
220 for (pos
= pci_find_capability(pci
, PCI_CAP_ID_VNDR
);
222 pos
= pci_find_next_capability(pci
, pos
, PCI_CAP_ID_VNDR
)) {
224 pci_read_config_byte(pci
, pos
+ offsetof(struct virtio_pci_cap
,
226 pci_read_config_byte(pci
, pos
+ offsetof(struct virtio_pci_cap
,
229 /* Ignore structures with reserved BAR values */
234 if (type
== cfg_type
) {
241 int virtio_pci_map_capability(struct pci_device
*pci
, int cap
, size_t minlen
,
242 u32 align
, u32 start
, u32 size
,
243 struct virtio_pci_region
*region
)
246 u32 offset
, length
, base_raw
;
249 pci_read_config_byte(pci
, cap
+ offsetof(struct virtio_pci_cap
, bar
), &bar
);
250 pci_read_config_dword(pci
, cap
+ offsetof(struct virtio_pci_cap
, offset
),
252 pci_read_config_dword(pci
, cap
+ offsetof(struct virtio_pci_cap
, length
),
255 if (length
<= start
) {
256 DBG("VIRTIO-PCI bad capability len %d (>%d expected)\n", length
, start
);
259 if (length
- start
< minlen
) {
260 DBG("VIRTIO-PCI bad capability len %d (>=%zd expected)\n", length
, minlen
);
264 if (start
+ offset
< offset
) {
265 DBG("VIRTIO-PCI map wrap-around %d+%d\n", start
, offset
);
269 if (offset
& (align
- 1)) {
270 DBG("VIRTIO-PCI offset %d not aligned to %d\n", offset
, align
);
277 if (minlen
+ offset
< minlen
||
278 minlen
+ offset
> pci_bar_size(pci
, PCI_BASE_ADDRESS(bar
))) {
279 DBG("VIRTIO-PCI map virtio %zd@%d out of range on bar %i length %ld\n",
281 bar
, pci_bar_size(pci
, PCI_BASE_ADDRESS(bar
)));
286 region
->length
= length
;
289 base
= pci_bar_start(pci
, PCI_BASE_ADDRESS(bar
));
291 pci_read_config_dword(pci
, PCI_BASE_ADDRESS(bar
), &base_raw
);
293 if (base_raw
& PCI_BASE_ADDRESS_SPACE_IO
) {
294 /* Region accessed using port I/O */
295 region
->base
= (void *)(base
+ offset
);
296 region
->flags
= VIRTIO_PCI_REGION_PORT
;
298 /* Region mapped into memory space */
299 region
->base
= ioremap(base
+ offset
, length
);
300 region
->flags
= VIRTIO_PCI_REGION_MEMORY
;
304 /* Region accessed via PCI config space window */
305 region
->base
= (void *)(intptr_t)offset
;
306 region
->flags
= VIRTIO_PCI_REGION_PCI_CONFIG
;
311 void virtio_pci_unmap_capability(struct virtio_pci_region
*region
)
313 unsigned region_type
= region
->flags
& VIRTIO_PCI_REGION_TYPE_MASK
;
314 if (region_type
== VIRTIO_PCI_REGION_MEMORY
) {
315 iounmap(region
->base
);
319 void vpm_notify(struct virtio_pci_modern_device
*vdev
,
320 struct vring_virtqueue
*vq
)
322 vpm_iowrite16(vdev
, &vq
->notification
, (u16
)vq
->queue_index
, 0);
325 int vpm_find_vqs(struct virtio_pci_modern_device
*vdev
,
326 unsigned nvqs
, struct vring_virtqueue
*vqs
)
329 struct vring_virtqueue
*vq
;
331 u32 notify_offset_multiplier
;
334 if (nvqs
> vpm_ioread16(vdev
, &vdev
->common
, COMMON_OFFSET(num_queues
))) {
338 /* Read notify_off_multiplier from config space. */
339 pci_read_config_dword(vdev
->pci
,
340 vdev
->notify_cap_pos
+ offsetof(struct virtio_pci_notify_cap
,
341 notify_off_multiplier
),
342 ¬ify_offset_multiplier
);
344 for (i
= 0; i
< nvqs
; i
++) {
345 /* Select the queue we're interested in */
346 vpm_iowrite16(vdev
, &vdev
->common
, (u16
)i
, COMMON_OFFSET(queue_select
));
348 /* Check if queue is either not available or already active. */
349 size
= vpm_ioread16(vdev
, &vdev
->common
, COMMON_OFFSET(queue_size
));
350 /* QEMU has a bug where queues don't revert to inactive on device
351 * reset. Skip checking the queue_enable field until it is fixed.
353 if (!size
/*|| vpm_ioread16(vdev, &vdev->common.queue_enable)*/)
356 if (size
& (size
- 1)) {
357 DBG("VIRTIO-PCI %p: bad queue size %d", vdev
, size
);
364 /* get offset of notification word for this vq */
365 off
= vpm_ioread16(vdev
, &vdev
->common
, COMMON_OFFSET(queue_notify_off
));
366 vq
->vring
.num
= size
;
368 vring_init(&vq
->vring
, size
, (unsigned char *)vq
->queue
);
370 /* activate the queue */
371 vpm_iowrite16(vdev
, &vdev
->common
, size
, COMMON_OFFSET(queue_size
));
373 vpm_iowrite64(vdev
, &vdev
->common
, virt_to_phys(vq
->vring
.desc
),
374 COMMON_OFFSET(queue_desc_lo
),
375 COMMON_OFFSET(queue_desc_hi
));
376 vpm_iowrite64(vdev
, &vdev
->common
, virt_to_phys(vq
->vring
.avail
),
377 COMMON_OFFSET(queue_avail_lo
),
378 COMMON_OFFSET(queue_avail_hi
));
379 vpm_iowrite64(vdev
, &vdev
->common
, virt_to_phys(vq
->vring
.used
),
380 COMMON_OFFSET(queue_used_lo
),
381 COMMON_OFFSET(queue_used_hi
));
383 err
= virtio_pci_map_capability(vdev
->pci
,
384 vdev
->notify_cap_pos
, 2, 2,
385 off
* notify_offset_multiplier
, 2,
392 /* Select and activate all queues. Has to be done last: once we do
393 * this, there's no way to go back except reset.
395 for (i
= 0; i
< nvqs
; i
++) {
397 vpm_iowrite16(vdev
, &vdev
->common
, (u16
)vq
->queue_index
,
398 COMMON_OFFSET(queue_select
));
399 vpm_iowrite16(vdev
, &vdev
->common
, 1, COMMON_OFFSET(queue_enable
));
404 /* Undo the virtio_pci_map_capability calls. */
406 virtio_pci_unmap_capability(&vqs
[i
].notification
);