From 5b3adfa31a46c27d9719bd67af4206a03904ca4d Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 13 Mar 2012 15:39:10 +0100 Subject: [PATCH] force-enable VIRTIO_BLK_F_SCSI if present on migration source RH-Author: Paolo Bonzini Message-id: <1331653150-28921-1-git-send-email-pbonzini@redhat.com> Patchwork-id: 38490 O-Subject: [RHEL 6.3 qemu-kvm PATCH] force-enable VIRTIO_BLK_F_SCSI if present on migration source Bugzilla: 800536 RH-Acked-by: Kevin Wolf RH-Acked-by: Markus Armbruster RH-Acked-by: Orit Wasserman Bugzilla: 800536 Upstream status: different patch needed there, see below. Brew build: 4147215 After finding the kernel SG_IO vulnerability, libvirt switched SG_IO support in virtio-blk from the QEMU default of "on" to opt-in; libvirt now looks at domain XML and passes scsi=on/scsi=off explicitly. Unfortunately this caused problems with migration. If the destination uses a new libvirt, the same domain XML will have a different feature set. In retrospect the right thing to do should have been to disable SG_IO *in QEMU* for new machines, and have libvirt pass scsi=on only (otherwise relying on the default). Unfortunately this is now too late to change, and we need to deal with the problem. Upstream, mst asked to always turn on VIRTIO_BLK_F_SCSI, and reject requests later. This is possible because the feature bit does not need to represent "do SG_IO requests work?", only "are SG_IO requests understood?". SG_IO requests can and will be rejected anyway if the storage is a file or partition or LVM volume. This is quite clean, but unfortunately it creates another migration breakage: old QEMU with scsi=off will refuse migration from new QEMU (also with scsi=off). Upstream does not support new->old migration, but we do, so we need something else. This patch just overrides scsi=off to scsi=on if the migration source had it that way. Since we're deviating from upstream we need to consider what to do for RHEL7. There are two possibilities: 1) RHEL7->RHEL6 migration will not be supported, and the upstream solution will be okay for RHEL6->RHEL7 migration even with rhel6.x.0 machine types. 2) RHEL7->RHEL6 migration will be supported, and we'll have to re-add the coupling between scsi=on/off and VIRTIO_BLK_F_SCSI for the rhel6.x.0 machine type. Tested here. After migrating a "scsi=on" guest to "scsi=off" with the patch installed, migration works and "info qtree" shows: dev-prop: scsi = on Please hold your nose closed, review and ack. Signed-off-by: Paolo Bonzini --- hw/virtio-blk.c | 12 +++++++++++- hw/virtio-pci.c | 7 +++++++ hw/virtio.c | 15 ++++++++++++--- hw/virtio.h | 3 +++ 4 files changed, 33 insertions(+), 4 deletions(-) Signed-off-by: Michal Novotny --- hw/virtio-blk.c | 13 ++++++++++++- hw/virtio-pci.c | 7 +++++++ hw/virtio.c | 15 ++++++++++++--- hw/virtio.h | 3 +++ 4 files changed, 34 insertions(+), 4 deletions(-) diff --git a/hw/virtio-blk.c b/hw/virtio-blk.c index 278e886..05209c6 100644 --- a/hw/virtio-blk.c +++ b/hw/virtio-blk.c @@ -545,7 +545,18 @@ static int virtio_blk_load(QEMUFile *f, void *opaque, int version_id) if (version_id != 2) return -EINVAL; - ret = virtio_load(&s->vdev, f); + /* RHEL only. Upstream we will fix this by ensuring that VIRTIO_BLK_F_SCSI + * is always set. This however will cause migration from new QEMU to old + * QEMU to fail if both have scsi=off. We cannot use compatibility + * properties because the scsi property is being overridden by management + * (libvirt). + * + * If RHEL7->RHEL6 migration will be supported, we'll have to disable + * VIRTIO_BLK_F_SCSI in the migration stream when running on a rhel6.x.0 + * machine type with scsi=off. Otherwise SG_IO will magically start + * working on the destination. + */ + ret = virtio_load_with_features(&s->vdev, f, 1 << VIRTIO_BLK_F_SCSI); if (ret) { return ret; } diff --git a/hw/virtio-pci.c b/hw/virtio-pci.c index 3e7bc71..b2a558d 100644 --- a/hw/virtio-pci.c +++ b/hw/virtio-pci.c @@ -557,6 +557,12 @@ static unsigned virtio_pci_get_features(void *opaque) return proxy->host_features; } +static void virtio_pci_force_features(void *opaque, uint32_t extra_features) +{ + VirtIOPCIProxy *proxy = opaque; + proxy->host_features |= extra_features; +} + static void virtio_pci_guest_notifier_read(void *opaque) { VirtQueue *vq = opaque; @@ -743,6 +749,7 @@ static const VirtIOBindings virtio_pci_bindings = { .save_queue = virtio_pci_save_queue, .load_queue = virtio_pci_load_queue, .get_features = virtio_pci_get_features, + .force_features = virtio_pci_force_features, .query_guest_notifiers = virtio_pci_query_guest_notifiers, .set_host_notifier = virtio_pci_set_host_notifier, .set_guest_notifiers = virtio_pci_set_guest_notifiers, diff --git a/hw/virtio.c b/hw/virtio.c index 445db6e..c7f39f2 100644 --- a/hw/virtio.c +++ b/hw/virtio.c @@ -761,12 +761,12 @@ void virtio_save(VirtIODevice *vdev, QEMUFile *f) } } -int virtio_load(VirtIODevice *vdev, QEMUFile *f) +int virtio_load_with_features(VirtIODevice *vdev, QEMUFile *f, + uint32_t extra_features) { int num, i, ret; uint32_t features; - uint32_t supported_features = - vdev->binding->get_features(vdev->binding_opaque); + uint32_t supported_features; if (vdev->binding->load_config) { ret = vdev->binding->load_config(vdev->binding_opaque, f); @@ -778,6 +778,10 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f) qemu_get_8s(f, &vdev->isr); qemu_get_be16s(f, &vdev->queue_sel); qemu_get_be32s(f, &features); + + vdev->binding->force_features(vdev->binding_opaque, + features & extra_features); + supported_features = vdev->binding->get_features(vdev->binding_opaque); if (features & ~supported_features) { fprintf(stderr, "Features 0x%x unsupported. Allowed features: 0x%x\n", features, supported_features); @@ -812,6 +816,11 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f) return 0; } +int virtio_load(VirtIODevice *vdev, QEMUFile *f) +{ + return virtio_load_with_features(vdev, f, 0); +} + void virtio_cleanup(VirtIODevice *vdev) { qemu_del_vm_change_state_handler(vdev->vmstate); diff --git a/hw/virtio.h b/hw/virtio.h index 7ce62c9..d0ddb44 100644 --- a/hw/virtio.h +++ b/hw/virtio.h @@ -95,6 +95,7 @@ typedef struct { int (*load_config)(void * opaque, QEMUFile *f); int (*load_queue)(void * opaque, int n, QEMUFile *f); unsigned (*get_features)(void * opaque); + void (*force_features)(void * opaque, uint32_t features); bool (*query_guest_notifiers)(void * opaque); int (*set_guest_notifiers)(void * opaque, bool assigned); int (*set_host_notifier)(void * opaque, int n, bool assigned); @@ -159,6 +160,8 @@ void virtio_notify(VirtIODevice *vdev, VirtQueue *vq); void virtio_save(VirtIODevice *vdev, QEMUFile *f); int virtio_load(VirtIODevice *vdev, QEMUFile *f); +int virtio_load_with_features(VirtIODevice *vdev, QEMUFile *f, + uint32_t supported_features); void virtio_cleanup(VirtIODevice *vdev); -- 1.7.7.6