From 00d7e0e572c0fbe3521a2d1ffb6cc704e1abd2eb Mon Sep 17 00:00:00 2001 Message-Id: <00d7e0e572c0fbe3521a2d1ffb6cc704e1abd2eb.1376495567.git.minovotn@redhat.com> From: Kevin Wolf Date: Wed, 14 Aug 2013 08:52:45 +0200 Subject: [PATCH 1/6] ceph/rbd block driver for qemu-kvm RH-Author: Kevin Wolf Message-id: <1376470370-32441-2-git-send-email-kwolf@redhat.com> Patchwork-id: 53329 O-Subject: [RHEL-6.5 qemu-kvm PATCH 1/6] ceph/rbd block driver for qemu-kvm Bugzilla: 988079 RH-Acked-by: Laszlo Ersek RH-Acked-by: Jeffrey Cody RH-Acked-by: Stefan Hajnoczi From: Christian Brunner Bugzilla: 988079 This imports the upstream rbd driver as of today with patches reverted and small changes applied to supposedly make it build in the RHEL 6 tree (this isn't actually tested because it won't be built anyway: the check in configure will disable it) The closest upstream state should be that of commit dc7588c1. Signed-off-by: Kevin Wolf --- Makefile.objs | 1 + block/rbd.c | 1003 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ configure | 52 +++ 3 files changed, 1056 insertions(+) create mode 100644 block/rbd.c Signed-off-by: Michal Novotny --- Makefile.objs | 1 + block/rbd.c | 1003 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ configure | 52 +++ 3 files changed, 1056 insertions(+) create mode 100644 block/rbd.c diff --git a/Makefile.objs b/Makefile.objs index 2e9bdc3..d6815ef 100644 --- a/Makefile.objs +++ b/Makefile.objs @@ -32,6 +32,7 @@ block-nested-$(CONFIG_WIN32) += raw-win32.o block-nested-$(CONFIG_POSIX) += raw-posix.o block-nested-$(CONFIG_CURL) += curl.o block-nested-$(CONFIG_GLUSTERFS) += gluster.o +block-nested-$(CONFIG_RBD) += rbd.o block-obj-y += $(addprefix block/, $(block-nested-y)) diff --git a/block/rbd.c b/block/rbd.c new file mode 100644 index 0000000..448bed1 --- /dev/null +++ b/block/rbd.c @@ -0,0 +1,1003 @@ +/* + * QEMU Block driver for RADOS (Ceph) + * + * Copyright (C) 2010-2011 Christian Brunner , + * Josh Durgin + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include + +#include "qemu-common.h" +#include "qemu-error.h" +#include "block_int.h" + +#include + +/* + * When specifying the image filename use: + * + * rbd:poolname/devicename[@snapshotname][:option1=value1[:option2=value2...]] + * + * poolname must be the name of an existing rados pool. + * + * devicename is the name of the rbd image. + * + * Each option given is used to configure rados, and may be any valid + * Ceph option, "id", or "conf". + * + * The "id" option indicates what user we should authenticate as to + * the Ceph cluster. If it is excluded we will use the Ceph default + * (normally 'admin'). + * + * The "conf" option specifies a Ceph configuration file to read. If + * it is not specified, we will read from the default Ceph locations + * (e.g., /etc/ceph/ceph.conf). To avoid reading _any_ configuration + * file, specify conf=/dev/null. + * + * Configuration values containing :, @, or = can be escaped with a + * leading "\". + */ + +/* rbd_aio_discard added in 0.1.2 */ +#if LIBRBD_VERSION_CODE >= LIBRBD_VERSION(0, 1, 2) +#define LIBRBD_SUPPORTS_DISCARD +#else +#undef LIBRBD_SUPPORTS_DISCARD +#endif + +#define OBJ_MAX_SIZE (1UL << OBJ_DEFAULT_OBJ_ORDER) + +#define RBD_MAX_CONF_NAME_SIZE 128 +#define RBD_MAX_CONF_VAL_SIZE 512 +#define RBD_MAX_CONF_SIZE 1024 +#define RBD_MAX_POOL_NAME_SIZE 128 +#define RBD_MAX_SNAP_NAME_SIZE 128 +#define RBD_MAX_SNAPS 100 + +typedef enum { + RBD_AIO_READ, + RBD_AIO_WRITE, + RBD_AIO_DISCARD, + RBD_AIO_FLUSH +} RBDAIOCmd; + +typedef struct RBDAIOCB { + BlockDriverAIOCB common; + QEMUBH *bh; + int64_t ret; + QEMUIOVector *qiov; + char *bounce; + RBDAIOCmd cmd; + int64_t sector_num; + int error; + struct BDRVRBDState *s; + int cancelled; + int status; +} RBDAIOCB; + +typedef struct RADOSCB { + int rcbid; + RBDAIOCB *acb; + struct BDRVRBDState *s; + int done; + int64_t size; + char *buf; + int64_t ret; +} RADOSCB; + +#define RBD_FD_READ 0 +#define RBD_FD_WRITE 1 + +typedef struct BDRVRBDState { + int fds[2]; + rados_t cluster; + rados_ioctx_t io_ctx; + rbd_image_t image; + char name[RBD_MAX_IMAGE_NAME_SIZE]; + int qemu_aio_count; + char *snap; + int event_reader_pos; + RADOSCB *event_rcb; +} BDRVRBDState; + +static void rbd_aio_bh_cb(void *opaque); + +static int qemu_rbd_next_tok(char *dst, int dst_len, + char *src, char delim, + const char *name, + char **p) +{ + int l; + char *end; + + *p = NULL; + + if (delim != '\0') { + for (end = src; *end; ++end) { + if (*end == delim) { + break; + } + if (*end == '\\' && end[1] != '\0') { + end++; + } + } + if (*end == delim) { + *p = end + 1; + *end = '\0'; + } + } + l = strlen(src); + if (l >= dst_len) { + error_report("%s too long", name); + return -EINVAL; + } else if (l == 0) { + error_report("%s too short", name); + return -EINVAL; + } + + pstrcpy(dst, dst_len, src); + + return 0; +} + +static void qemu_rbd_unescape(char *src) +{ + char *p; + + for (p = src; *src; ++src, ++p) { + if (*src == '\\' && src[1] != '\0') { + src++; + } + *p = *src; + } + *p = '\0'; +} + +static int qemu_rbd_parsename(const char *filename, + char *pool, int pool_len, + char *snap, int snap_len, + char *name, int name_len, + char *conf, int conf_len) +{ + const char *start; + char *p, *buf; + int ret; + + if (!strstart(filename, "rbd:", &start)) { + return -EINVAL; + } + + buf = g_strdup(start); + p = buf; + *snap = '\0'; + *conf = '\0'; + + ret = qemu_rbd_next_tok(pool, pool_len, p, '/', "pool name", &p); + if (ret < 0 || !p) { + ret = -EINVAL; + goto done; + } + qemu_rbd_unescape(pool); + + if (strchr(p, '@')) { + ret = qemu_rbd_next_tok(name, name_len, p, '@', "object name", &p); + if (ret < 0) { + goto done; + } + ret = qemu_rbd_next_tok(snap, snap_len, p, ':', "snap name", &p); + qemu_rbd_unescape(snap); + } else { + ret = qemu_rbd_next_tok(name, name_len, p, ':', "object name", &p); + } + qemu_rbd_unescape(name); + if (ret < 0 || !p) { + goto done; + } + + ret = qemu_rbd_next_tok(conf, conf_len, p, '\0', "configuration", &p); + +done: + g_free(buf); + return ret; +} + +static char *qemu_rbd_parse_clientname(const char *conf, char *clientname) +{ + const char *p = conf; + + while (*p) { + int len; + const char *end = strchr(p, ':'); + + if (end) { + len = end - p; + } else { + len = strlen(p); + } + + if (strncmp(p, "id=", 3) == 0) { + len -= 3; + strncpy(clientname, p + 3, len); + clientname[len] = '\0'; + return clientname; + } + if (end == NULL) { + break; + } + p = end + 1; + } + return NULL; +} + +static int qemu_rbd_set_conf(rados_t cluster, const char *conf) +{ + char *p, *buf; + char name[RBD_MAX_CONF_NAME_SIZE]; + char value[RBD_MAX_CONF_VAL_SIZE]; + int ret = 0; + + buf = g_strdup(conf); + p = buf; + + while (p) { + ret = qemu_rbd_next_tok(name, sizeof(name), p, + '=', "conf option name", &p); + if (ret < 0) { + break; + } + qemu_rbd_unescape(name); + + if (!p) { + error_report("conf option %s has no value", name); + ret = -EINVAL; + break; + } + + ret = qemu_rbd_next_tok(value, sizeof(value), p, + ':', "conf option value", &p); + if (ret < 0) { + break; + } + qemu_rbd_unescape(value); + + if (strcmp(name, "conf") == 0) { + ret = rados_conf_read_file(cluster, value); + if (ret < 0) { + error_report("error reading conf file %s", value); + break; + } + } else if (strcmp(name, "id") == 0) { + /* ignore, this is parsed by qemu_rbd_parse_clientname() */ + } else { + ret = rados_conf_set(cluster, name, value); + if (ret < 0) { + error_report("invalid conf option %s", name); + ret = -EINVAL; + break; + } + } + } + + g_free(buf); + return ret; +} + +static int qemu_rbd_create(const char *filename, QEMUOptionParameter *options) +{ + int64_t bytes = 0; + int64_t objsize; + int obj_order = 0; + char pool[RBD_MAX_POOL_NAME_SIZE]; + char name[RBD_MAX_IMAGE_NAME_SIZE]; + char snap_buf[RBD_MAX_SNAP_NAME_SIZE]; + char conf[RBD_MAX_CONF_SIZE]; + char clientname_buf[RBD_MAX_CONF_SIZE]; + char *clientname; + rados_t cluster; + rados_ioctx_t io_ctx; + int ret; + + if (qemu_rbd_parsename(filename, pool, sizeof(pool), + snap_buf, sizeof(snap_buf), + name, sizeof(name), + conf, sizeof(conf)) < 0) { + return -EINVAL; + } + + /* Read out options */ + while (options && options->name) { + if (!strcmp(options->name, BLOCK_OPT_SIZE)) { + bytes = options->value.n; + } else if (!strcmp(options->name, BLOCK_OPT_CLUSTER_SIZE)) { + if (options->value.n) { + objsize = options->value.n; + if ((objsize - 1) & objsize) { /* not a power of 2? */ + error_report("obj size needs to be power of 2"); + return -EINVAL; + } + if (objsize < 4096) { + error_report("obj size too small"); + return -EINVAL; + } + obj_order = ffs(objsize) - 1; + } + } + options++; + } + + clientname = qemu_rbd_parse_clientname(conf, clientname_buf); + if (rados_create(&cluster, clientname) < 0) { + error_report("error initializing"); + return -EIO; + } + + if (strstr(conf, "conf=") == NULL) { + /* try default location, but ignore failure */ + rados_conf_read_file(cluster, NULL); + } + + if (conf[0] != '\0' && + qemu_rbd_set_conf(cluster, conf) < 0) { + error_report("error setting config options"); + rados_shutdown(cluster); + return -EIO; + } + + if (rados_connect(cluster) < 0) { + error_report("error connecting"); + rados_shutdown(cluster); + return -EIO; + } + + if (rados_ioctx_create(cluster, pool, &io_ctx) < 0) { + error_report("error opening pool %s", pool); + rados_shutdown(cluster); + return -EIO; + } + + ret = rbd_create(io_ctx, name, bytes, &obj_order); + rados_ioctx_destroy(io_ctx); + rados_shutdown(cluster); + + return ret; +} + +/* + * This aio completion is being called from qemu_rbd_aio_event_reader() + * and runs in qemu context. It schedules a bh, but just in case the aio + * was not cancelled before. + */ +static void qemu_rbd_complete_aio(RADOSCB *rcb) +{ + RBDAIOCB *acb = rcb->acb; + int64_t r; + + r = rcb->ret; + + if (acb->cmd != RBD_AIO_READ) { + if (r < 0) { + acb->ret = r; + acb->error = 1; + } else if (!acb->error) { + acb->ret = rcb->size; + } + } else { + if (r < 0) { + memset(rcb->buf, 0, rcb->size); + acb->ret = r; + acb->error = 1; + } else if (r < rcb->size) { + memset(rcb->buf + r, 0, rcb->size - r); + if (!acb->error) { + acb->ret = rcb->size; + } + } else if (!acb->error) { + acb->ret = r; + } + } + /* Note that acb->bh can be NULL in case where the aio was cancelled */ + acb->bh = qemu_bh_new(rbd_aio_bh_cb, acb); + qemu_bh_schedule(acb->bh); + g_free(rcb); +} + +/* + * aio fd read handler. It runs in the qemu context and calls the + * completion handling of completed rados aio operations. + */ +static void qemu_rbd_aio_event_reader(void *opaque) +{ + BDRVRBDState *s = opaque; + + ssize_t ret; + + do { + char *p = (char *)&s->event_rcb; + + /* now read the rcb pointer that was sent from a non qemu thread */ + ret = read(s->fds[RBD_FD_READ], p + s->event_reader_pos, + sizeof(s->event_rcb) - s->event_reader_pos); + if (ret > 0) { + s->event_reader_pos += ret; + if (s->event_reader_pos == sizeof(s->event_rcb)) { + s->event_reader_pos = 0; + qemu_rbd_complete_aio(s->event_rcb); + s->qemu_aio_count--; + } + } + } while (ret < 0 && errno == EINTR); +} + +static int qemu_rbd_aio_flush_cb(void *opaque) +{ + BDRVRBDState *s = opaque; + + return (s->qemu_aio_count > 0); +} + +static int qemu_rbd_open(BlockDriverState *bs, const char *filename, int flags) +{ + BDRVRBDState *s = bs->opaque; + char pool[RBD_MAX_POOL_NAME_SIZE]; + char snap_buf[RBD_MAX_SNAP_NAME_SIZE]; + char conf[RBD_MAX_CONF_SIZE]; + char clientname_buf[RBD_MAX_CONF_SIZE]; + char *clientname; + int r; + + if (qemu_rbd_parsename(filename, pool, sizeof(pool), + snap_buf, sizeof(snap_buf), + s->name, sizeof(s->name), + conf, sizeof(conf)) < 0) { + return -EINVAL; + } + + clientname = qemu_rbd_parse_clientname(conf, clientname_buf); + r = rados_create(&s->cluster, clientname); + if (r < 0) { + error_report("error initializing"); + return r; + } + + s->snap = NULL; + if (snap_buf[0] != '\0') { + s->snap = g_strdup(snap_buf); + } + + /* + * Fallback to more conservative semantics if setting cache + * options fails. Ignore errors from setting rbd_cache because the + * only possible error is that the option does not exist, and + * librbd defaults to no caching. If write through caching cannot + * be set up, fall back to no caching. + */ + if (flags & BDRV_O_NOCACHE) { + rados_conf_set(s->cluster, "rbd_cache", "false"); + } else { + rados_conf_set(s->cluster, "rbd_cache", "true"); + if (!(flags & BDRV_O_CACHE_WB)) { + r = rados_conf_set(s->cluster, "rbd_cache_max_dirty", "0"); + if (r < 0) { + rados_conf_set(s->cluster, "rbd_cache", "false"); + } + } + } + + if (strstr(conf, "conf=") == NULL) { + /* try default location, but ignore failure */ + rados_conf_read_file(s->cluster, NULL); + } + + if (conf[0] != '\0') { + r = qemu_rbd_set_conf(s->cluster, conf); + if (r < 0) { + error_report("error setting config options"); + goto failed_shutdown; + } + } + + r = rados_connect(s->cluster); + if (r < 0) { + error_report("error connecting"); + goto failed_shutdown; + } + + r = rados_ioctx_create(s->cluster, pool, &s->io_ctx); + if (r < 0) { + error_report("error opening pool %s", pool); + goto failed_shutdown; + } + + r = rbd_open(s->io_ctx, s->name, &s->image, s->snap); + if (r < 0) { + error_report("error reading header from %s", s->name); + goto failed_open; + } + + bs->read_only = (s->snap != NULL); + + s->event_reader_pos = 0; + r = qemu_pipe(s->fds); + if (r < 0) { + error_report("error opening eventfd"); + goto failed; + } + fcntl(s->fds[0], F_SETFL, O_NONBLOCK); + fcntl(s->fds[1], F_SETFL, O_NONBLOCK); + qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], qemu_rbd_aio_event_reader, + NULL, qemu_rbd_aio_flush_cb, NULL, s); + + + return 0; + +failed: + rbd_close(s->image); +failed_open: + rados_ioctx_destroy(s->io_ctx); +failed_shutdown: + rados_shutdown(s->cluster); + g_free(s->snap); + return r; +} + +static void qemu_rbd_close(BlockDriverState *bs) +{ + BDRVRBDState *s = bs->opaque; + + close(s->fds[0]); + close(s->fds[1]); + qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], NULL, NULL, NULL, NULL, NULL); + + rbd_close(s->image); + rados_ioctx_destroy(s->io_ctx); + g_free(s->snap); + rados_shutdown(s->cluster); +} + +/* + * Cancel aio. Since we don't reference acb in a non qemu threads, + * it is safe to access it here. + */ +static void qemu_rbd_aio_cancel(BlockDriverAIOCB *blockacb) +{ + RBDAIOCB *acb = (RBDAIOCB *) blockacb; + acb->cancelled = 1; + + while (acb->status == -EINPROGRESS) { + qemu_aio_wait(); + } + + qemu_aio_release(acb); +} + +static AIOPool rbd_aiocb_info = { + .aiocb_size = sizeof(RBDAIOCB), + .cancel = qemu_rbd_aio_cancel, +}; + +static int qemu_rbd_send_pipe(BDRVRBDState *s, RADOSCB *rcb) +{ + int ret = 0; + while (1) { + fd_set wfd; + int fd = s->fds[RBD_FD_WRITE]; + + /* send the op pointer to the qemu thread that is responsible + for the aio/op completion. Must do it in a qemu thread context */ + ret = write(fd, (void *)&rcb, sizeof(rcb)); + if (ret >= 0) { + break; + } + if (errno == EINTR) { + continue; + } + if (errno != EAGAIN) { + break; + } + + FD_ZERO(&wfd); + FD_SET(fd, &wfd); + do { + ret = select(fd + 1, NULL, &wfd, NULL, NULL); + } while (ret < 0 && errno == EINTR); + } + + return ret; +} + +/* + * This is the callback function for rbd_aio_read and _write + * + * Note: this function is being called from a non qemu thread so + * we need to be careful about what we do here. Generally we only + * write to the block notification pipe, and do the rest of the + * io completion handling from qemu_rbd_aio_event_reader() which + * runs in a qemu context. + */ +static void rbd_finish_aiocb(rbd_completion_t c, RADOSCB *rcb) +{ + int ret; + rcb->ret = rbd_aio_get_return_value(c); + rbd_aio_release(c); + ret = qemu_rbd_send_pipe(rcb->s, rcb); + if (ret < 0) { + error_report("failed writing to acb->s->fds"); + g_free(rcb); + } +} + +/* Callback when all queued rbd_aio requests are complete */ + +static void rbd_aio_bh_cb(void *opaque) +{ + RBDAIOCB *acb = opaque; + + if (acb->cmd == RBD_AIO_READ) { + qemu_iovec_from_buffer(acb->qiov, acb->bounce, acb->qiov->size); + } + qemu_vfree(acb->bounce); + acb->common.cb(acb->common.opaque, (acb->ret > 0 ? 0 : acb->ret)); + qemu_bh_delete(acb->bh); + acb->bh = NULL; + acb->status = 0; + + if (!acb->cancelled) { + qemu_aio_release(acb); + } +} + +static int rbd_aio_discard_wrapper(rbd_image_t image, + uint64_t off, + uint64_t len, + rbd_completion_t comp) +{ +#ifdef LIBRBD_SUPPORTS_DISCARD + return rbd_aio_discard(image, off, len, comp); +#else + return -ENOTSUP; +#endif +} + +static int rbd_aio_flush_wrapper(rbd_image_t image, + rbd_completion_t comp) +{ +#ifdef LIBRBD_SUPPORTS_AIO_FLUSH + return rbd_aio_flush(image, comp); +#else + return -ENOTSUP; +#endif +} + +static BlockDriverAIOCB *rbd_start_aio(BlockDriverState *bs, + int64_t sector_num, + QEMUIOVector *qiov, + int nb_sectors, + BlockDriverCompletionFunc *cb, + void *opaque, + RBDAIOCmd cmd) +{ + RBDAIOCB *acb; + RADOSCB *rcb; + rbd_completion_t c; + int64_t off, size; + char *buf; + int r; + + BDRVRBDState *s = bs->opaque; + + acb = qemu_aio_get(&rbd_aiocb_info, bs, cb, opaque); + acb->cmd = cmd; + acb->qiov = qiov; + if (cmd == RBD_AIO_DISCARD || cmd == RBD_AIO_FLUSH) { + acb->bounce = NULL; + } else { + acb->bounce = qemu_blockalign(bs, qiov->size); + } + acb->ret = 0; + acb->error = 0; + acb->s = s; + acb->cancelled = 0; + acb->bh = NULL; + acb->status = -EINPROGRESS; + + if (cmd == RBD_AIO_WRITE) { + qemu_iovec_to_buffer(acb->qiov, acb->bounce); + } + + buf = acb->bounce; + + off = sector_num * BDRV_SECTOR_SIZE; + size = nb_sectors * BDRV_SECTOR_SIZE; + + s->qemu_aio_count++; /* All the RADOSCB */ + + rcb = g_malloc(sizeof(RADOSCB)); + rcb->done = 0; + rcb->acb = acb; + rcb->buf = buf; + rcb->s = acb->s; + rcb->size = size; + r = rbd_aio_create_completion(rcb, (rbd_callback_t) rbd_finish_aiocb, &c); + if (r < 0) { + goto failed; + } + + switch (cmd) { + case RBD_AIO_WRITE: + r = rbd_aio_write(s->image, off, size, buf, c); + break; + case RBD_AIO_READ: + r = rbd_aio_read(s->image, off, size, buf, c); + break; + case RBD_AIO_DISCARD: + r = rbd_aio_discard_wrapper(s->image, off, size, c); + break; + case RBD_AIO_FLUSH: + r = rbd_aio_flush_wrapper(s->image, c); + break; + default: + r = -EINVAL; + } + + if (r < 0) { + goto failed; + } + + return &acb->common; + +failed: + g_free(rcb); + s->qemu_aio_count--; + qemu_aio_release(acb); + return NULL; +} + +static BlockDriverAIOCB *qemu_rbd_aio_readv(BlockDriverState *bs, + int64_t sector_num, + QEMUIOVector *qiov, + int nb_sectors, + BlockDriverCompletionFunc *cb, + void *opaque) +{ + return rbd_start_aio(bs, sector_num, qiov, nb_sectors, cb, opaque, + RBD_AIO_READ); +} + +static BlockDriverAIOCB *qemu_rbd_aio_writev(BlockDriverState *bs, + int64_t sector_num, + QEMUIOVector *qiov, + int nb_sectors, + BlockDriverCompletionFunc *cb, + void *opaque) +{ + return rbd_start_aio(bs, sector_num, qiov, nb_sectors, cb, opaque, + RBD_AIO_WRITE); +} + +#ifdef LIBRBD_SUPPORTS_AIO_FLUSH +static BlockDriverAIOCB *qemu_rbd_aio_flush(BlockDriverState *bs, + BlockDriverCompletionFunc *cb, + void *opaque) +{ + return rbd_start_aio(bs, 0, NULL, 0, cb, opaque, RBD_AIO_FLUSH); +} + +#else + +static int qemu_rbd_co_flush(BlockDriverState *bs) +{ +#if LIBRBD_VERSION_CODE >= LIBRBD_VERSION(0, 1, 1) + /* rbd_flush added in 0.1.1 */ + BDRVRBDState *s = bs->opaque; + return rbd_flush(s->image); +#else + return 0; +#endif +} +#endif + +static int qemu_rbd_getinfo(BlockDriverState *bs, BlockDriverInfo *bdi) +{ + BDRVRBDState *s = bs->opaque; + rbd_image_info_t info; + int r; + + r = rbd_stat(s->image, &info, sizeof(info)); + if (r < 0) { + return r; + } + + bdi->cluster_size = info.obj_size; + return 0; +} + +static int64_t qemu_rbd_getlength(BlockDriverState *bs) +{ + BDRVRBDState *s = bs->opaque; + rbd_image_info_t info; + int r; + + r = rbd_stat(s->image, &info, sizeof(info)); + if (r < 0) { + return r; + } + + return info.size; +} + +static int qemu_rbd_truncate(BlockDriverState *bs, int64_t offset) +{ + BDRVRBDState *s = bs->opaque; + int r; + + r = rbd_resize(s->image, offset); + if (r < 0) { + return r; + } + + return 0; +} + +static int qemu_rbd_snap_create(BlockDriverState *bs, + QEMUSnapshotInfo *sn_info) +{ + BDRVRBDState *s = bs->opaque; + int r; + + if (sn_info->name[0] == '\0') { + return -EINVAL; /* we need a name for rbd snapshots */ + } + + /* + * rbd snapshots are using the name as the user controlled unique identifier + * we can't use the rbd snapid for that purpose, as it can't be set + */ + if (sn_info->id_str[0] != '\0' && + strcmp(sn_info->id_str, sn_info->name) != 0) { + return -EINVAL; + } + + if (strlen(sn_info->name) >= sizeof(sn_info->id_str)) { + return -ERANGE; + } + + r = rbd_snap_create(s->image, sn_info->name); + if (r < 0) { + error_report("failed to create snap: %s", strerror(-r)); + return r; + } + + return 0; +} + +static int qemu_rbd_snap_remove(BlockDriverState *bs, + const char *snapshot_name) +{ + BDRVRBDState *s = bs->opaque; + int r; + + r = rbd_snap_remove(s->image, snapshot_name); + return r; +} + +static int qemu_rbd_snap_rollback(BlockDriverState *bs, + const char *snapshot_name) +{ + BDRVRBDState *s = bs->opaque; + int r; + + r = rbd_snap_rollback(s->image, snapshot_name); + return r; +} + +static int qemu_rbd_snap_list(BlockDriverState *bs, + QEMUSnapshotInfo **psn_tab) +{ + BDRVRBDState *s = bs->opaque; + QEMUSnapshotInfo *sn_info, *sn_tab = NULL; + int i, snap_count; + rbd_snap_info_t *snaps; + int max_snaps = RBD_MAX_SNAPS; + + do { + snaps = g_malloc(sizeof(*snaps) * max_snaps); + snap_count = rbd_snap_list(s->image, snaps, &max_snaps); + if (snap_count < 0) { + g_free(snaps); + } + } while (snap_count == -ERANGE); + + if (snap_count <= 0) { + goto done; + } + + sn_tab = g_malloc0(snap_count * sizeof(QEMUSnapshotInfo)); + + for (i = 0; i < snap_count; i++) { + const char *snap_name = snaps[i].name; + + sn_info = sn_tab + i; + pstrcpy(sn_info->id_str, sizeof(sn_info->id_str), snap_name); + pstrcpy(sn_info->name, sizeof(sn_info->name), snap_name); + + sn_info->vm_state_size = snaps[i].size; + sn_info->date_sec = 0; + sn_info->date_nsec = 0; + sn_info->vm_clock_nsec = 0; + } + rbd_snap_list_end(snaps); + + done: + *psn_tab = sn_tab; + return snap_count; +} + +#ifdef LIBRBD_SUPPORTS_DISCARD +static BlockDriverAIOCB* qemu_rbd_aio_discard(BlockDriverState *bs, + int64_t sector_num, + int nb_sectors, + BlockDriverCompletionFunc *cb, + void *opaque) +{ + return rbd_start_aio(bs, sector_num, NULL, nb_sectors, cb, opaque, + RBD_AIO_DISCARD); +} +#endif + +static QEMUOptionParameter qemu_rbd_create_options[] = { + { + .name = BLOCK_OPT_SIZE, + .type = OPT_SIZE, + .help = "Virtual disk size" + }, + { + .name = BLOCK_OPT_CLUSTER_SIZE, + .type = OPT_SIZE, + .help = "RBD object size" + }, + {NULL} +}; + +static BlockDriver bdrv_rbd = { + .format_name = "rbd", + .instance_size = sizeof(BDRVRBDState), + .bdrv_file_open = qemu_rbd_open, + .bdrv_close = qemu_rbd_close, + .bdrv_create = qemu_rbd_create, + .bdrv_get_info = qemu_rbd_getinfo, + .create_options = qemu_rbd_create_options, + .bdrv_getlength = qemu_rbd_getlength, + .bdrv_truncate = qemu_rbd_truncate, + .protocol_name = "rbd", + + .bdrv_aio_readv = qemu_rbd_aio_readv, + .bdrv_aio_writev = qemu_rbd_aio_writev, + +#ifdef LIBRBD_SUPPORTS_AIO_FLUSH + .bdrv_aio_flush = qemu_rbd_aio_flush, +#else + .bdrv_co_flush = qemu_rbd_co_flush, +#endif + +#ifdef LIBRBD_SUPPORTS_DISCARD + .bdrv_aio_discard = qemu_rbd_aio_discard, +#endif + + .bdrv_snapshot_create = qemu_rbd_snap_create, + .bdrv_snapshot_delete = qemu_rbd_snap_remove, + .bdrv_snapshot_list = qemu_rbd_snap_list, + .bdrv_snapshot_goto = qemu_rbd_snap_rollback, +}; + +static void bdrv_rbd_init(void) +{ + bdrv_register(&bdrv_rbd); +} + +block_init(bdrv_rbd_init); diff --git a/configure b/configure index d9987f9..1f932d8 100755 --- a/configure +++ b/configure @@ -284,6 +284,7 @@ zero_malloc="" trace_backend="nop" fake_machine="no" spice="" +rbd="" smartcard="" smartcard_nss="" live_snapshots="yes" @@ -723,6 +724,10 @@ for opt do ;; --enable-glusterfs) glusterfs="yes" ;; + --disable-rbd) rbd="no" + ;; + --enable-rbd) rbd="yes" + ;; *) echo "ERROR: unknown option $opt"; show_help="yes" ;; esac @@ -895,6 +900,7 @@ echo " --disable-fake-machine disable -fake-machine option" echo " --enable-fake-machine enable -fake-machine option" echo " --disable-spice disable spice" echo " --enable-spice enable spice" +echo " --enable-rbd enable building the rados block device (rbd)" echo " --disable-smartcard disable smartcard support" echo " --enable-smartcard enable smartcard support" echo " --disable-smartcard-nss disable smartcard nss support" @@ -1735,6 +1741,48 @@ if test "$mingw32" != yes -a "$pthread" = no; then fi ########################################## +# rbd probe +if test "$rbd" != "no" ; then + cat > $TMPC < +#include +int main(void) { rados_initialize(0, NULL); return 0; } +EOF + rbd_libs="-lrados -lcrypto" + if compile_prog "" "$rbd_libs" ; then + librados_too_old=no + cat > $TMPC < +#include +#ifndef CEPH_OSD_TMAP_SET +#error missing CEPH_OSD_TMAP_SET +#endif +int main(void) { + int (*func)(const rados_pool_t pool, uint64_t *snapid) = rados_selfmanaged_snap_create; + rados_initialize(0, NULL); + return 0; +} +EOF + if compile_prog "" "$rbd_libs" ; then + rbd=yes + libs_tools="$rbd_libs $libs_tools" + libs_softmmu="$rbd_libs $libs_softmmu" + else + rbd=no + librados_too_old=yes + fi + else + if test "$rbd" = "yes" ; then + feature_not_found "rados block device" + fi + rbd=no + fi + if test "$librados_too_old" = "yes" ; then + echo "-> Your librados version is too old - upgrade needed to have rbd support" + fi +fi + +########################################## # linux-aio probe if test "$linux_aio" != "no" ; then @@ -2375,6 +2423,7 @@ echo "vhost-net support $vhost_net" echo "-fake-machine $fake_machine" echo "Trace backend $trace_backend" echo "spice support $spice ($spice_protocol_version/$spice_server_version)" +echo "rbd support $rbd" echo "nss used $smartcard_nss" echo "Live snapshots $live_snapshots" echo "Block streaming $block_stream" @@ -2666,6 +2715,9 @@ echo "CONFIG_UNAME_RELEASE=\"$uname_release\"" >> $config_host_mak if test "$zero_malloc" = "yes" ; then echo "CONFIG_ZERO_MALLOC=y" >> $config_host_mak fi +if test "$rbd" = "yes" ; then + echo "CONFIG_RBD=y" >> $config_host_mak +fi if test "$has_environ" = "yes" ; then echo "CONFIG_HAS_ENVIRON=y" >> $config_host_mak -- 1.7.11.7