mirror of
https://github.com/ipxe/ipxe
synced 2025-12-23 21:41:43 +03:00
[virtio] Add virtio 1.0 PCI support
This commit adds support for driving virtio 1.0 PCI devices. In addition to various helpers, a number of vpm_ functions are introduced to be used instead of their legacy vp_ counterparts when accessing virtio 1.0 (aka modern) devices. Signed-off-by: Ladi Prosek <lprosek@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Modified-by: Michael Brown <mcb30@ipxe.org> Signed-off-by: Michael Brown <mcb30@ipxe.org>
This commit is contained in:
committed by
Michael Brown
parent
7b499f849e
commit
8a055a2a70
@@ -11,10 +11,15 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "errno.h"
|
||||
#include "byteswap.h"
|
||||
#include "etherboot.h"
|
||||
#include "ipxe/io.h"
|
||||
#include "ipxe/virtio-ring.h"
|
||||
#include "ipxe/iomap.h"
|
||||
#include "ipxe/pci.h"
|
||||
#include "ipxe/reboot.h"
|
||||
#include "ipxe/virtio-pci.h"
|
||||
#include "ipxe/virtio-ring.h"
|
||||
|
||||
int vp_find_vq(unsigned int ioaddr, int queue_index,
|
||||
struct vring_virtqueue *vq)
|
||||
@@ -30,19 +35,19 @@ int vp_find_vq(unsigned int ioaddr, int queue_index,
|
||||
|
||||
num = inw(ioaddr + VIRTIO_PCI_QUEUE_NUM);
|
||||
if (!num) {
|
||||
printf("ERROR: queue size is 0\n");
|
||||
DBG("VIRTIO-PCI ERROR: queue size is 0\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (num > MAX_QUEUE_NUM) {
|
||||
printf("ERROR: queue size %d > %d\n", num, MAX_QUEUE_NUM);
|
||||
DBG("VIRTIO-PCI ERROR: queue size %d > %d\n", num, MAX_QUEUE_NUM);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* check if the queue is already active */
|
||||
|
||||
if (inl(ioaddr + VIRTIO_PCI_QUEUE_PFN)) {
|
||||
printf("ERROR: queue already active\n");
|
||||
DBG("VIRTIO-PCI ERROR: queue already active\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -62,3 +67,343 @@ int vp_find_vq(unsigned int ioaddr, int queue_index,
|
||||
|
||||
return num;
|
||||
}
|
||||
|
||||
#define CFG_POS(vdev, field) \
|
||||
(vdev->cfg_cap_pos + offsetof(struct virtio_pci_cfg_cap, field))
|
||||
|
||||
static void prep_pci_cfg_cap(struct virtio_pci_modern_device *vdev,
|
||||
struct virtio_pci_region *region,
|
||||
size_t offset, u32 length)
|
||||
{
|
||||
pci_write_config_byte(vdev->pci, CFG_POS(vdev, cap.bar), region->bar);
|
||||
pci_write_config_dword(vdev->pci, CFG_POS(vdev, cap.length), length);
|
||||
pci_write_config_dword(vdev->pci, CFG_POS(vdev, cap.offset),
|
||||
(intptr_t)(region->base + offset));
|
||||
}
|
||||
|
||||
void vpm_iowrite8(struct virtio_pci_modern_device *vdev,
|
||||
struct virtio_pci_region *region, u8 data, size_t offset)
|
||||
{
|
||||
switch (region->flags & VIRTIO_PCI_REGION_TYPE_MASK) {
|
||||
case VIRTIO_PCI_REGION_MEMORY:
|
||||
writeb(data, region->base + offset);
|
||||
break;
|
||||
case VIRTIO_PCI_REGION_PORT:
|
||||
outb(data, region->base + offset);
|
||||
break;
|
||||
case VIRTIO_PCI_REGION_PCI_CONFIG:
|
||||
prep_pci_cfg_cap(vdev, region, offset, 1);
|
||||
pci_write_config_byte(vdev->pci, CFG_POS(vdev, pci_cfg_data), data);
|
||||
break;
|
||||
default:
|
||||
assert(0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void vpm_iowrite16(struct virtio_pci_modern_device *vdev,
|
||||
struct virtio_pci_region *region, u16 data, size_t offset)
|
||||
{
|
||||
data = cpu_to_le16(data);
|
||||
switch (region->flags & VIRTIO_PCI_REGION_TYPE_MASK) {
|
||||
case VIRTIO_PCI_REGION_MEMORY:
|
||||
writew(data, region->base + offset);
|
||||
break;
|
||||
case VIRTIO_PCI_REGION_PORT:
|
||||
outw(data, region->base + offset);
|
||||
break;
|
||||
case VIRTIO_PCI_REGION_PCI_CONFIG:
|
||||
prep_pci_cfg_cap(vdev, region, offset, 2);
|
||||
pci_write_config_word(vdev->pci, CFG_POS(vdev, pci_cfg_data), data);
|
||||
break;
|
||||
default:
|
||||
assert(0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void vpm_iowrite32(struct virtio_pci_modern_device *vdev,
|
||||
struct virtio_pci_region *region, u32 data, size_t offset)
|
||||
{
|
||||
data = cpu_to_le32(data);
|
||||
switch (region->flags & VIRTIO_PCI_REGION_TYPE_MASK) {
|
||||
case VIRTIO_PCI_REGION_MEMORY:
|
||||
writel(data, region->base + offset);
|
||||
break;
|
||||
case VIRTIO_PCI_REGION_PORT:
|
||||
outl(data, region->base + offset);
|
||||
break;
|
||||
case VIRTIO_PCI_REGION_PCI_CONFIG:
|
||||
prep_pci_cfg_cap(vdev, region, offset, 4);
|
||||
pci_write_config_dword(vdev->pci, CFG_POS(vdev, pci_cfg_data), data);
|
||||
break;
|
||||
default:
|
||||
assert(0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
u8 vpm_ioread8(struct virtio_pci_modern_device *vdev,
|
||||
struct virtio_pci_region *region, size_t offset)
|
||||
{
|
||||
uint8_t data;
|
||||
switch (region->flags & VIRTIO_PCI_REGION_TYPE_MASK) {
|
||||
case VIRTIO_PCI_REGION_MEMORY:
|
||||
data = readb(region->base + offset);
|
||||
break;
|
||||
case VIRTIO_PCI_REGION_PORT:
|
||||
data = inb(region->base + offset);
|
||||
break;
|
||||
case VIRTIO_PCI_REGION_PCI_CONFIG:
|
||||
prep_pci_cfg_cap(vdev, region, offset, 1);
|
||||
pci_read_config_byte(vdev->pci, CFG_POS(vdev, pci_cfg_data), &data);
|
||||
break;
|
||||
default:
|
||||
assert(0);
|
||||
data = 0;
|
||||
break;
|
||||
}
|
||||
return data;
|
||||
}
|
||||
|
||||
u16 vpm_ioread16(struct virtio_pci_modern_device *vdev,
|
||||
struct virtio_pci_region *region, size_t offset)
|
||||
{
|
||||
uint16_t data;
|
||||
switch (region->flags & VIRTIO_PCI_REGION_TYPE_MASK) {
|
||||
case VIRTIO_PCI_REGION_MEMORY:
|
||||
data = readw(region->base + offset);
|
||||
break;
|
||||
case VIRTIO_PCI_REGION_PORT:
|
||||
data = inw(region->base + offset);
|
||||
break;
|
||||
case VIRTIO_PCI_REGION_PCI_CONFIG:
|
||||
prep_pci_cfg_cap(vdev, region, offset, 2);
|
||||
pci_read_config_word(vdev->pci, CFG_POS(vdev, pci_cfg_data), &data);
|
||||
break;
|
||||
default:
|
||||
assert(0);
|
||||
data = 0;
|
||||
break;
|
||||
}
|
||||
return le16_to_cpu(data);
|
||||
}
|
||||
|
||||
u32 vpm_ioread32(struct virtio_pci_modern_device *vdev,
|
||||
struct virtio_pci_region *region, size_t offset)
|
||||
{
|
||||
uint32_t data;
|
||||
switch (region->flags & VIRTIO_PCI_REGION_TYPE_MASK) {
|
||||
case VIRTIO_PCI_REGION_MEMORY:
|
||||
data = readw(region->base + offset);
|
||||
break;
|
||||
case VIRTIO_PCI_REGION_PORT:
|
||||
data = inw(region->base + offset);
|
||||
break;
|
||||
case VIRTIO_PCI_REGION_PCI_CONFIG:
|
||||
prep_pci_cfg_cap(vdev, region, offset, 4);
|
||||
pci_read_config_dword(vdev->pci, CFG_POS(vdev, pci_cfg_data), &data);
|
||||
break;
|
||||
default:
|
||||
assert(0);
|
||||
data = 0;
|
||||
break;
|
||||
}
|
||||
return le32_to_cpu(data);
|
||||
}
|
||||
|
||||
int virtio_pci_find_capability(struct pci_device *pci, uint8_t cfg_type)
|
||||
{
|
||||
int pos;
|
||||
uint8_t type, bar;
|
||||
|
||||
for (pos = pci_find_capability(pci, PCI_CAP_ID_VNDR);
|
||||
pos > 0;
|
||||
pos = pci_find_next_capability(pci, pos, PCI_CAP_ID_VNDR)) {
|
||||
|
||||
pci_read_config_byte(pci, pos + offsetof(struct virtio_pci_cap,
|
||||
cfg_type), &type);
|
||||
pci_read_config_byte(pci, pos + offsetof(struct virtio_pci_cap,
|
||||
bar), &bar);
|
||||
|
||||
/* Ignore structures with reserved BAR values */
|
||||
if (bar > 0x5) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (type == cfg_type) {
|
||||
return pos;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int virtio_pci_map_capability(struct pci_device *pci, int cap, size_t minlen,
|
||||
u32 align, u32 start, u32 size,
|
||||
struct virtio_pci_region *region)
|
||||
{
|
||||
u8 bar;
|
||||
u32 offset, length, base_raw;
|
||||
unsigned long base;
|
||||
|
||||
pci_read_config_byte(pci, cap + offsetof(struct virtio_pci_cap, bar), &bar);
|
||||
pci_read_config_dword(pci, cap + offsetof(struct virtio_pci_cap, offset),
|
||||
&offset);
|
||||
pci_read_config_dword(pci, cap + offsetof(struct virtio_pci_cap, length),
|
||||
&length);
|
||||
|
||||
if (length <= start) {
|
||||
DBG("VIRTIO-PCI bad capability len %u (>%u expected)\n", length, start);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (length - start < minlen) {
|
||||
DBG("VIRTIO-PCI bad capability len %u (>=%zu expected)\n", length, minlen);
|
||||
return -EINVAL;
|
||||
}
|
||||
length -= start;
|
||||
if (start + offset < offset) {
|
||||
DBG("VIRTIO-PCI map wrap-around %u+%u\n", start, offset);
|
||||
return -EINVAL;
|
||||
}
|
||||
offset += start;
|
||||
if (offset & (align - 1)) {
|
||||
DBG("VIRTIO-PCI offset %u not aligned to %u\n", offset, align);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (length > size) {
|
||||
length = size;
|
||||
}
|
||||
|
||||
if (minlen + offset < minlen ||
|
||||
minlen + offset > pci_bar_size(pci, PCI_BASE_ADDRESS(bar))) {
|
||||
DBG("VIRTIO-PCI map virtio %zu@%u out of range on bar %i length %lu\n",
|
||||
minlen, offset,
|
||||
bar, (unsigned long)pci_bar_size(pci, PCI_BASE_ADDRESS(bar)));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
region->base = NULL;
|
||||
region->length = length;
|
||||
region->bar = bar;
|
||||
|
||||
base = pci_bar_start(pci, PCI_BASE_ADDRESS(bar));
|
||||
if (base) {
|
||||
pci_read_config_dword(pci, PCI_BASE_ADDRESS(bar), &base_raw);
|
||||
|
||||
if (base_raw & PCI_BASE_ADDRESS_SPACE_IO) {
|
||||
/* Region accessed using port I/O */
|
||||
region->base = (void *)(base + offset);
|
||||
region->flags = VIRTIO_PCI_REGION_PORT;
|
||||
} else {
|
||||
/* Region mapped into memory space */
|
||||
region->base = ioremap(base + offset, length);
|
||||
region->flags = VIRTIO_PCI_REGION_MEMORY;
|
||||
}
|
||||
}
|
||||
if (!region->base) {
|
||||
/* Region accessed via PCI config space window */
|
||||
region->base = (void *)(intptr_t)offset;
|
||||
region->flags = VIRTIO_PCI_REGION_PCI_CONFIG;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void virtio_pci_unmap_capability(struct virtio_pci_region *region)
|
||||
{
|
||||
unsigned region_type = region->flags & VIRTIO_PCI_REGION_TYPE_MASK;
|
||||
if (region_type == VIRTIO_PCI_REGION_MEMORY) {
|
||||
iounmap(region->base);
|
||||
}
|
||||
}
|
||||
|
||||
void vpm_notify(struct virtio_pci_modern_device *vdev,
|
||||
struct vring_virtqueue *vq)
|
||||
{
|
||||
vpm_iowrite16(vdev, &vq->notification, (u16)vq->queue_index, 0);
|
||||
}
|
||||
|
||||
int vpm_find_vqs(struct virtio_pci_modern_device *vdev,
|
||||
unsigned nvqs, struct vring_virtqueue *vqs)
|
||||
{
|
||||
unsigned i;
|
||||
struct vring_virtqueue *vq;
|
||||
u16 size, off;
|
||||
u32 notify_offset_multiplier;
|
||||
int err;
|
||||
|
||||
if (nvqs > vpm_ioread16(vdev, &vdev->common, COMMON_OFFSET(num_queues))) {
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/* Read notify_off_multiplier from config space. */
|
||||
pci_read_config_dword(vdev->pci,
|
||||
vdev->notify_cap_pos + offsetof(struct virtio_pci_notify_cap,
|
||||
notify_off_multiplier),
|
||||
¬ify_offset_multiplier);
|
||||
|
||||
for (i = 0; i < nvqs; i++) {
|
||||
/* Select the queue we're interested in */
|
||||
vpm_iowrite16(vdev, &vdev->common, (u16)i, COMMON_OFFSET(queue_select));
|
||||
|
||||
/* Check if queue is either not available or already active. */
|
||||
size = vpm_ioread16(vdev, &vdev->common, COMMON_OFFSET(queue_size));
|
||||
/* QEMU has a bug where queues don't revert to inactive on device
|
||||
* reset. Skip checking the queue_enable field until it is fixed.
|
||||
*/
|
||||
if (!size /*|| vpm_ioread16(vdev, &vdev->common.queue_enable)*/)
|
||||
return -ENOENT;
|
||||
|
||||
if (size & (size - 1)) {
|
||||
DBG("VIRTIO-PCI %p: bad queue size %u", vdev, size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
vq = &vqs[i];
|
||||
vq->queue_index = i;
|
||||
|
||||
/* get offset of notification word for this vq */
|
||||
off = vpm_ioread16(vdev, &vdev->common, COMMON_OFFSET(queue_notify_off));
|
||||
vq->vring.num = size;
|
||||
|
||||
vring_init(&vq->vring, size, (unsigned char *)vq->queue);
|
||||
|
||||
/* activate the queue */
|
||||
vpm_iowrite16(vdev, &vdev->common, size, COMMON_OFFSET(queue_size));
|
||||
|
||||
vpm_iowrite64(vdev, &vdev->common, virt_to_phys(vq->vring.desc),
|
||||
COMMON_OFFSET(queue_desc_lo),
|
||||
COMMON_OFFSET(queue_desc_hi));
|
||||
vpm_iowrite64(vdev, &vdev->common, virt_to_phys(vq->vring.avail),
|
||||
COMMON_OFFSET(queue_avail_lo),
|
||||
COMMON_OFFSET(queue_avail_hi));
|
||||
vpm_iowrite64(vdev, &vdev->common, virt_to_phys(vq->vring.used),
|
||||
COMMON_OFFSET(queue_used_lo),
|
||||
COMMON_OFFSET(queue_used_hi));
|
||||
|
||||
err = virtio_pci_map_capability(vdev->pci,
|
||||
vdev->notify_cap_pos, 2, 2,
|
||||
off * notify_offset_multiplier, 2,
|
||||
&vq->notification);
|
||||
if (err) {
|
||||
goto err_map_notify;
|
||||
}
|
||||
}
|
||||
|
||||
/* Select and activate all queues. Has to be done last: once we do
|
||||
* this, there's no way to go back except reset.
|
||||
*/
|
||||
for (i = 0; i < nvqs; i++) {
|
||||
vq = &vqs[i];
|
||||
vpm_iowrite16(vdev, &vdev->common, (u16)vq->queue_index,
|
||||
COMMON_OFFSET(queue_select));
|
||||
vpm_iowrite16(vdev, &vdev->common, 1, COMMON_OFFSET(queue_enable));
|
||||
}
|
||||
return 0;
|
||||
|
||||
err_map_notify:
|
||||
/* Undo the virtio_pci_map_capability calls. */
|
||||
while (i-- > 0) {
|
||||
virtio_pci_unmap_capability(&vqs[i].notification);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -18,8 +18,8 @@ FILE_LICENCE ( GPL2_OR_LATER );
|
||||
|
||||
#include "etherboot.h"
|
||||
#include "ipxe/io.h"
|
||||
#include "ipxe/virtio-ring.h"
|
||||
#include "ipxe/virtio-pci.h"
|
||||
#include "ipxe/virtio-ring.h"
|
||||
|
||||
#define BUG() do { \
|
||||
printf("BUG: failure at %s:%d/%s()!\n", \
|
||||
@@ -122,7 +122,8 @@ void vring_add_buf(struct vring_virtqueue *vq,
|
||||
wmb();
|
||||
}
|
||||
|
||||
void vring_kick(unsigned int ioaddr, struct vring_virtqueue *vq, int num_added)
|
||||
void vring_kick(struct virtio_pci_modern_device *vdev, unsigned int ioaddr,
|
||||
struct vring_virtqueue *vq, int num_added)
|
||||
{
|
||||
struct vring *vr = &vq->vring;
|
||||
|
||||
@@ -130,7 +131,13 @@ void vring_kick(unsigned int ioaddr, struct vring_virtqueue *vq, int num_added)
|
||||
vr->avail->idx += num_added;
|
||||
|
||||
mb();
|
||||
if (!(vr->used->flags & VRING_USED_F_NO_NOTIFY))
|
||||
vp_notify(ioaddr, vq->queue_index);
|
||||
if (!(vr->used->flags & VRING_USED_F_NO_NOTIFY)) {
|
||||
if (vdev) {
|
||||
/* virtio 1.0 */
|
||||
vpm_notify(vdev, vq);
|
||||
} else {
|
||||
/* legacy virtio */
|
||||
vp_notify(ioaddr, vq->queue_index);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user