From a0f232a8ffa067781ae61b96732733a438372d19 Mon Sep 17 00:00:00 2001 From: Fam Zheng Date: Fri, 21 Jun 2013 06:19:59 +0200 Subject: [PATCH 03/21] block: add I/O throttling algorithm RH-Author: Fam Zheng Message-id: <1371795611-7208-4-git-send-email-famz@redhat.com> Patchwork-id: 52077 O-Subject: [PATCH RHEL-6.5 qemu-kvm v3 03/15] block: add I/O throttling algorithm Bugzilla: 956825 RH-Acked-by: Stefan Hajnoczi RH-Acked-by: Paolo Bonzini RH-Acked-by: Laszlo Ersek From: Zhi Yong Wu Signed-off-by: Zhi Yong Wu Signed-off-by: Stefan Hajnoczi Signed-off-by: Kevin Wolf (cherry picked from commit 98f90dba5ee56f699b28509a6cc7a9a8a57636eb) Timer function name is different downstream. Use qemu_new_timer. (resolution is the same) Signed-off-by: Fam Zheng Conflicts: block.c --- block.c | 234 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ block.h | 1 + block_int.h | 1 + 3 files changed, 236 insertions(+) Signed-off-by: Miroslav Rezanina --- block.c | 234 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ block.h | 1 + block_int.h | 1 + 3 files changed, 236 insertions(+), 0 deletions(-) diff --git a/block.c b/block.c index 523290d..c1eda27 100644 --- a/block.c +++ b/block.c @@ -81,6 +81,13 @@ static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs, bool is_write); static void coroutine_fn bdrv_co_do_rw(void *opaque); +static bool bdrv_exceed_bps_limits(BlockDriverState *bs, int nb_sectors, + bool is_write, double elapsed_time, uint64_t *wait); +static bool bdrv_exceed_iops_limits(BlockDriverState *bs, bool is_write, + double elapsed_time, uint64_t *wait); +static bool bdrv_exceed_io_limits(BlockDriverState *bs, int nb_sectors, + bool is_write, int64_t *wait); + static QTAILQ_HEAD(, BlockDriverState) bdrv_states = QTAILQ_HEAD_INITIALIZER(bdrv_states); @@ -114,6 +121,24 @@ int is_windows_drive(const char *filename) #endif /* throttling disk I/O limits */ +void bdrv_io_limits_disable(BlockDriverState *bs) +{ + bs->io_limits_enabled = false; + + while (qemu_co_queue_next(&bs->throttled_reqs)); + + if (bs->block_timer) { + qemu_del_timer(bs->block_timer); + qemu_free_timer(bs->block_timer); + bs->block_timer = NULL; + } + + bs->slice_start = 0; + bs->slice_end = 0; + bs->slice_time = 0; + memset(&bs->io_base, 0, sizeof(bs->io_base)); +} + static void bdrv_block_timer(void *opaque) { BlockDriverState *bs = opaque; @@ -143,6 +168,31 @@ bool bdrv_io_limits_enabled(BlockDriverState *bs) || io_limits->iops[BLOCK_IO_LIMIT_TOTAL]; } +static void bdrv_io_limits_intercept(BlockDriverState *bs, + bool is_write, int nb_sectors) +{ + int64_t wait_time = -1; + + if (!qemu_co_queue_empty(&bs->throttled_reqs)) { + qemu_co_queue_wait(&bs->throttled_reqs); + } + + /* In fact, we hope to keep each request's timing, in FIFO mode. The next + * throttled requests will not be dequeued until the current request is + * allowed to be serviced. So if the current request still exceeds the + * limits, it will be inserted to the head. All requests followed it will + * be still in throttled_reqs queue. + */ + + while (bdrv_exceed_io_limits(bs, nb_sectors, is_write, &wait_time)) { + qemu_mod_timer(bs->block_timer, + wait_time + qemu_get_clock(vm_clock)); + qemu_co_queue_wait_insert_head(&bs->throttled_reqs); + } + + qemu_co_queue_next(&bs->throttled_reqs); +} + /* check if the path starts with ":" */ int path_has_protocol(const char *path) { @@ -738,6 +788,11 @@ int bdrv_open(BlockDriverState *bs, const char *filename, int flags, } } + /* throttling disk I/O limits */ + if (bs->io_limits_enabled) { + bdrv_io_limits_enable(bs); + } + return 0; unlink_and_fail: @@ -1012,6 +1067,11 @@ void bdrv_close(BlockDriverState *bs) bdrv_dev_change_media_cb(bs, false); } } + + /*throttling disk I/O limits*/ + if (bs->io_limits_enabled) { + bdrv_io_limits_disable(bs); + } } void bdrv_close_all(void) @@ -2012,6 +2072,11 @@ static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs, return -EIO; } + /* throttling disk read I/O */ + if (bs->io_limits_enabled) { + bdrv_io_limits_intercept(bs, false, nb_sectors); + } + if (bs->copy_on_read) { flags |= BDRV_REQ_COPY_ON_READ; } @@ -2114,6 +2179,11 @@ static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs, return -EIO; } + /* throttling disk write I/O */ + if (bs->io_limits_enabled) { + bdrv_io_limits_intercept(bs, true, nb_sectors); + } + if (bs->copy_on_read_in_flight) { wait_for_overlapping_requests(bs, sector_num, nb_sectors); } @@ -3439,6 +3509,170 @@ void bdrv_aio_cancel(BlockDriverAIOCB *acb) acb->pool->cancel(acb); } +/* block I/O throttling */ +static bool bdrv_exceed_bps_limits(BlockDriverState *bs, int nb_sectors, + bool is_write, double elapsed_time, uint64_t *wait) +{ + uint64_t bps_limit = 0; + double bytes_limit, bytes_base, bytes_res; + double slice_time, wait_time; + + if (bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL]) { + bps_limit = bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL]; + } else if (bs->io_limits.bps[is_write]) { + bps_limit = bs->io_limits.bps[is_write]; + } else { + if (wait) { + *wait = 0; + } + + return false; + } + + slice_time = bs->slice_end - bs->slice_start; + slice_time /= (NANOSECONDS_PER_SECOND); + bytes_limit = bps_limit * slice_time; + bytes_base = bs->nr_bytes[is_write] - bs->io_base.bytes[is_write]; + if (bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL]) { + bytes_base += bs->nr_bytes[!is_write] - bs->io_base.bytes[!is_write]; + } + + /* bytes_base: the bytes of data which have been read/written; and + * it is obtained from the history statistic info. + * bytes_res: the remaining bytes of data which need to be read/written. + * (bytes_base + bytes_res) / bps_limit: used to calcuate + * the total time for completing reading/writting all data. + */ + bytes_res = (unsigned) nb_sectors * BDRV_SECTOR_SIZE; + + if (bytes_base + bytes_res <= bytes_limit) { + if (wait) { + *wait = 0; + } + + return false; + } + + /* Calc approx time to dispatch */ + wait_time = (bytes_base + bytes_res) / bps_limit - elapsed_time; + + /* When the I/O rate at runtime exceeds the limits, + * bs->slice_end need to be extended in order that the current statistic + * info can be kept until the timer fire, so it is increased and tuned + * based on the result of experiment. + */ + bs->slice_time = wait_time * BLOCK_IO_SLICE_TIME * 10; + bs->slice_end += bs->slice_time - 3 * BLOCK_IO_SLICE_TIME; + if (wait) { + *wait = wait_time * BLOCK_IO_SLICE_TIME * 10; + } + + return true; +} + +static bool bdrv_exceed_iops_limits(BlockDriverState *bs, bool is_write, + double elapsed_time, uint64_t *wait) +{ + uint64_t iops_limit = 0; + double ios_limit, ios_base; + double slice_time, wait_time; + + if (bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL]) { + iops_limit = bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL]; + } else if (bs->io_limits.iops[is_write]) { + iops_limit = bs->io_limits.iops[is_write]; + } else { + if (wait) { + *wait = 0; + } + + return false; + } + + slice_time = bs->slice_end - bs->slice_start; + slice_time /= (NANOSECONDS_PER_SECOND); + ios_limit = iops_limit * slice_time; + ios_base = bs->nr_ops[is_write] - bs->io_base.ios[is_write]; + if (bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL]) { + ios_base += bs->nr_ops[!is_write] - bs->io_base.ios[!is_write]; + } + + if (ios_base + 1 <= ios_limit) { + if (wait) { + *wait = 0; + } + + return false; + } + + /* Calc approx time to dispatch */ + wait_time = (ios_base + 1) / iops_limit; + if (wait_time > elapsed_time) { + wait_time = wait_time - elapsed_time; + } else { + wait_time = 0; + } + + bs->slice_time = wait_time * BLOCK_IO_SLICE_TIME * 10; + bs->slice_end += bs->slice_time - 3 * BLOCK_IO_SLICE_TIME; + if (wait) { + *wait = wait_time * BLOCK_IO_SLICE_TIME * 10; + } + + return true; +} + +static bool bdrv_exceed_io_limits(BlockDriverState *bs, int nb_sectors, + bool is_write, int64_t *wait) +{ + int64_t now, max_wait; + uint64_t bps_wait = 0, iops_wait = 0; + double elapsed_time; + int bps_ret, iops_ret; + + now = qemu_get_clock(vm_clock); + if ((bs->slice_start < now) + && (bs->slice_end > now)) { + bs->slice_end = now + bs->slice_time; + } else { + bs->slice_time = 5 * BLOCK_IO_SLICE_TIME; + bs->slice_start = now; + bs->slice_end = now + bs->slice_time; + + bs->io_base.bytes[is_write] = bs->nr_bytes[is_write]; + bs->io_base.bytes[!is_write] = bs->nr_bytes[!is_write]; + + bs->io_base.ios[is_write] = bs->nr_ops[is_write]; + bs->io_base.ios[!is_write] = bs->nr_ops[!is_write]; + } + + elapsed_time = now - bs->slice_start; + elapsed_time /= (NANOSECONDS_PER_SECOND); + + bps_ret = bdrv_exceed_bps_limits(bs, nb_sectors, + is_write, elapsed_time, &bps_wait); + iops_ret = bdrv_exceed_iops_limits(bs, is_write, + elapsed_time, &iops_wait); + if (bps_ret || iops_ret) { + max_wait = bps_wait > iops_wait ? bps_wait : iops_wait; + if (wait) { + *wait = max_wait; + } + + now = qemu_get_clock(vm_clock); + if (bs->slice_end < now + max_wait) { + bs->slice_end = now + max_wait; + } + + return true; + } + + if (wait) { + *wait = 0; + } + + return false; +} /**************************************************************/ /* async block device emulation */ diff --git a/block.h b/block.h index ae1f346..9fbd072 100644 --- a/block.h +++ b/block.h @@ -124,6 +124,7 @@ void bdrv_info_stats(Monitor *mon, QObject **ret_data); /* disk I/O throttling */ void bdrv_io_limits_enable(BlockDriverState *bs); +void bdrv_io_limits_disable(BlockDriverState *bs); bool bdrv_io_limits_enabled(BlockDriverState *bs); void bdrv_init(void); diff --git a/block_int.h b/block_int.h index da52d7e..0e405f5 100644 --- a/block_int.h +++ b/block_int.h @@ -39,6 +39,7 @@ #define BLOCK_IO_LIMIT_TOTAL 2 #define BLOCK_IO_SLICE_TIME 100000000 +#define NANOSECONDS_PER_SECOND 1000000000.0 #define BLOCK_OPT_SIZE "size" #define BLOCK_OPT_ENCRYPT "encryption" -- 1.7.1