2019-05-20 17:08:12 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* raid1.c : Multiple Devices driver for Linux
|
|
|
|
*
|
|
|
|
* Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
|
|
|
|
*
|
|
|
|
* Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
|
|
|
|
*
|
|
|
|
* RAID-1 management functions.
|
|
|
|
*
|
|
|
|
* Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
|
|
|
|
*
|
2007-10-19 21:21:04 +00:00
|
|
|
* Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk>
|
2005-04-16 22:20:36 +00:00
|
|
|
* Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
|
|
|
|
*
|
2005-06-22 00:17:23 +00:00
|
|
|
* Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
|
|
|
|
* bitmapped intelligence in resync:
|
|
|
|
*
|
|
|
|
* - bitmap marked during normal i/o
|
|
|
|
* - bitmap used to skip nondirty blocks during sync
|
|
|
|
*
|
|
|
|
* Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
|
|
|
|
* - persistent bitmap code
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 08:04:11 +00:00
|
|
|
#include <linux/slab.h>
|
2008-10-14 22:09:21 +00:00
|
|
|
#include <linux/delay.h>
|
2009-03-31 03:33:13 +00:00
|
|
|
#include <linux/blkdev.h>
|
2011-07-03 17:58:33 +00:00
|
|
|
#include <linux/module.h>
|
2009-03-31 03:33:13 +00:00
|
|
|
#include <linux/seq_file.h>
|
2011-07-27 01:00:36 +00:00
|
|
|
#include <linux/ratelimit.h>
|
2019-12-23 09:49:00 +00:00
|
|
|
#include <linux/interval_tree_generic.h>
|
2017-02-08 17:51:30 +00:00
|
|
|
|
2016-11-18 02:22:04 +00:00
|
|
|
#include <trace/events/block.h>
|
2017-02-08 17:51:30 +00:00
|
|
|
|
2009-03-31 03:33:13 +00:00
|
|
|
#include "md.h"
|
2009-03-31 03:27:03 +00:00
|
|
|
#include "raid1.h"
|
2017-10-10 21:02:41 +00:00
|
|
|
#include "md-bitmap.h"
|
2005-06-22 00:17:23 +00:00
|
|
|
|
2017-01-05 00:10:19 +00:00
|
|
|
#define UNSUPPORTED_MDDEV_FLAGS \
|
|
|
|
((1L << MD_HAS_JOURNAL) | \
|
2017-03-09 08:59:57 +00:00
|
|
|
(1L << MD_JOURNAL_CLEAN) | \
|
2017-08-16 15:13:45 +00:00
|
|
|
(1L << MD_HAS_PPL) | \
|
|
|
|
(1L << MD_HAS_MULTIPLE_PPLS))
|
2017-01-05 00:10:19 +00:00
|
|
|
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
static void allow_barrier(struct r1conf *conf, sector_t sector_nr);
|
|
|
|
static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2016-11-14 05:30:21 +00:00
|
|
|
#define raid1_log(md, fmt, args...) \
|
|
|
|
do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0)
|
|
|
|
|
2017-07-14 08:14:43 +00:00
|
|
|
#include "raid1-10.c"
|
|
|
|
|
2019-12-23 09:49:00 +00:00
|
|
|
#define START(node) ((node)->start)
|
|
|
|
#define LAST(node) ((node)->last)
|
|
|
|
INTERVAL_TREE_DEFINE(struct serial_info, node, sector_t, _subtree_last,
|
|
|
|
START, LAST, static inline, raid1_rb);
|
|
|
|
|
2019-12-23 09:49:02 +00:00
|
|
|
static int check_and_add_serial(struct md_rdev *rdev, struct r1bio *r1_bio,
|
|
|
|
struct serial_info *si, int idx)
|
2019-06-19 09:30:46 +00:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
int ret = 0;
|
2019-12-23 09:49:02 +00:00
|
|
|
sector_t lo = r1_bio->sector;
|
|
|
|
sector_t hi = lo + r1_bio->sectors;
|
2019-12-23 09:49:01 +00:00
|
|
|
struct serial_in_rdev *serial = &rdev->serial[idx];
|
2019-06-19 09:30:46 +00:00
|
|
|
|
2019-12-23 09:49:00 +00:00
|
|
|
spin_lock_irqsave(&serial->serial_lock, flags);
|
|
|
|
/* collision happened */
|
|
|
|
if (raid1_rb_iter_first(&serial->serial_rb, lo, hi))
|
|
|
|
ret = -EBUSY;
|
2019-12-23 09:49:02 +00:00
|
|
|
else {
|
2019-12-23 09:49:00 +00:00
|
|
|
si->start = lo;
|
|
|
|
si->last = hi;
|
|
|
|
raid1_rb_insert(si, &serial->serial_rb);
|
2019-12-23 09:49:02 +00:00
|
|
|
}
|
2019-12-23 09:49:00 +00:00
|
|
|
spin_unlock_irqrestore(&serial->serial_lock, flags);
|
2019-06-19 09:30:46 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-12-23 09:49:02 +00:00
|
|
|
static void wait_for_serialization(struct md_rdev *rdev, struct r1bio *r1_bio)
|
|
|
|
{
|
|
|
|
struct mddev *mddev = rdev->mddev;
|
|
|
|
struct serial_info *si;
|
|
|
|
int idx = sector_to_idx(r1_bio->sector);
|
|
|
|
struct serial_in_rdev *serial = &rdev->serial[idx];
|
|
|
|
|
|
|
|
if (WARN_ON(!mddev->serial_info_pool))
|
|
|
|
return;
|
|
|
|
si = mempool_alloc(mddev->serial_info_pool, GFP_NOIO);
|
|
|
|
wait_event(serial->serial_io_wait,
|
|
|
|
check_and_add_serial(rdev, r1_bio, si, idx) == 0);
|
|
|
|
}
|
|
|
|
|
2019-12-23 09:48:53 +00:00
|
|
|
static void remove_serial(struct md_rdev *rdev, sector_t lo, sector_t hi)
|
2019-06-19 09:30:46 +00:00
|
|
|
{
|
2019-12-23 09:49:00 +00:00
|
|
|
struct serial_info *si;
|
2019-06-19 09:30:46 +00:00
|
|
|
unsigned long flags;
|
|
|
|
int found = 0;
|
|
|
|
struct mddev *mddev = rdev->mddev;
|
2019-12-23 09:49:01 +00:00
|
|
|
int idx = sector_to_idx(lo);
|
|
|
|
struct serial_in_rdev *serial = &rdev->serial[idx];
|
2019-12-23 09:49:00 +00:00
|
|
|
|
|
|
|
spin_lock_irqsave(&serial->serial_lock, flags);
|
|
|
|
for (si = raid1_rb_iter_first(&serial->serial_rb, lo, hi);
|
|
|
|
si; si = raid1_rb_iter_next(si, lo, hi)) {
|
|
|
|
if (si->start == lo && si->last == hi) {
|
|
|
|
raid1_rb_remove(si, &serial->serial_rb);
|
|
|
|
mempool_free(si, mddev->serial_info_pool);
|
2019-06-19 09:30:46 +00:00
|
|
|
found = 1;
|
|
|
|
break;
|
|
|
|
}
|
2019-12-23 09:49:00 +00:00
|
|
|
}
|
2019-06-19 09:30:46 +00:00
|
|
|
if (!found)
|
2019-12-23 09:48:53 +00:00
|
|
|
WARN(1, "The write IO is not recorded for serialization\n");
|
2019-12-23 09:49:00 +00:00
|
|
|
spin_unlock_irqrestore(&serial->serial_lock, flags);
|
|
|
|
wake_up(&serial->serial_io_wait);
|
2019-06-19 09:30:46 +00:00
|
|
|
}
|
|
|
|
|
2017-03-16 16:12:26 +00:00
|
|
|
/*
|
|
|
|
* for resync bio, r1bio pointer can be retrieved from the per-bio
|
|
|
|
* 'struct resync_pages'.
|
|
|
|
*/
|
|
|
|
static inline struct r1bio *get_resync_r1bio(struct bio *bio)
|
|
|
|
{
|
|
|
|
return get_resync_pages(bio)->raid_bio;
|
|
|
|
}
|
|
|
|
|
2005-10-07 06:46:04 +00:00
|
|
|
static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct pool_info *pi = data;
|
2011-10-11 05:48:43 +00:00
|
|
|
int size = offsetof(struct r1bio, bios[pi->raid_disks]);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* allocate a r1bio with room for raid_disks entries in the bios array */
|
2011-03-10 07:52:07 +00:00
|
|
|
return kzalloc(size, gfp_flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2013-11-14 04:16:18 +00:00
|
|
|
#define RESYNC_DEPTH 32
|
2005-04-16 22:20:36 +00:00
|
|
|
#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
|
2013-11-14 04:16:18 +00:00
|
|
|
#define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
|
|
|
|
#define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
|
2015-08-18 22:14:42 +00:00
|
|
|
#define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW)
|
|
|
|
#define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-10-07 06:46:04 +00:00
|
|
|
static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct pool_info *pi = data;
|
2011-10-11 05:48:43 +00:00
|
|
|
struct r1bio *r1_bio;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct bio *bio;
|
2014-04-09 02:25:43 +00:00
|
|
|
int need_pages;
|
2017-03-16 16:12:26 +00:00
|
|
|
int j;
|
|
|
|
struct resync_pages *rps;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
r1_bio = r1bio_pool_alloc(gfp_flags, pi);
|
2011-03-10 07:52:07 +00:00
|
|
|
if (!r1_bio)
|
2005-04-16 22:20:36 +00:00
|
|
|
return NULL;
|
|
|
|
|
treewide: kmalloc() -> kmalloc_array()
The kmalloc() function has a 2-factor argument form, kmalloc_array(). This
patch replaces cases of:
kmalloc(a * b, gfp)
with:
kmalloc_array(a * b, gfp)
as well as handling cases of:
kmalloc(a * b * c, gfp)
with:
kmalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kmalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kmalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The tools/ directory was manually excluded, since it has its own
implementation of kmalloc().
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kmalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kmalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kmalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kmalloc
+ kmalloc_array
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kmalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kmalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kmalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kmalloc(sizeof(THING) * C2, ...)
|
kmalloc(sizeof(TYPE) * C2, ...)
|
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(C1 * C2, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * E2
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 20:55:00 +00:00
|
|
|
rps = kmalloc_array(pi->raid_disks, sizeof(struct resync_pages),
|
|
|
|
gfp_flags);
|
2017-03-16 16:12:26 +00:00
|
|
|
if (!rps)
|
|
|
|
goto out_free_r1bio;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Allocate bios : 1 for reading, n-1 for writing
|
|
|
|
*/
|
|
|
|
for (j = pi->raid_disks ; j-- ; ) {
|
2010-10-26 06:33:54 +00:00
|
|
|
bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!bio)
|
|
|
|
goto out_free_bio;
|
|
|
|
r1_bio->bios[j] = bio;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Allocate RESYNC_PAGES data pages and attach them to
|
2006-01-06 08:20:26 +00:00
|
|
|
* the first bio.
|
|
|
|
* If this is a user-requested check/repair, allocate
|
|
|
|
* RESYNC_PAGES for each bio.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2006-01-06 08:20:26 +00:00
|
|
|
if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
|
2014-04-09 02:25:43 +00:00
|
|
|
need_pages = pi->raid_disks;
|
2006-01-06 08:20:26 +00:00
|
|
|
else
|
2014-04-09 02:25:43 +00:00
|
|
|
need_pages = 1;
|
2017-03-16 16:12:26 +00:00
|
|
|
for (j = 0; j < pi->raid_disks; j++) {
|
|
|
|
struct resync_pages *rp = &rps[j];
|
|
|
|
|
2006-01-06 08:20:26 +00:00
|
|
|
bio = r1_bio->bios[j];
|
|
|
|
|
2017-03-16 16:12:26 +00:00
|
|
|
if (j < need_pages) {
|
|
|
|
if (resync_alloc_pages(rp, gfp_flags))
|
|
|
|
goto out_free_pages;
|
|
|
|
} else {
|
|
|
|
memcpy(rp, &rps[0], sizeof(*rp));
|
|
|
|
resync_get_all_pages(rp);
|
|
|
|
}
|
|
|
|
|
|
|
|
rp->raid_bio = r1_bio;
|
|
|
|
bio->bi_private = rp;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
r1_bio->master_bio = NULL;
|
|
|
|
|
|
|
|
return r1_bio;
|
|
|
|
|
2014-04-09 02:25:43 +00:00
|
|
|
out_free_pages:
|
2016-09-22 07:10:01 +00:00
|
|
|
while (--j >= 0)
|
2017-03-16 16:12:26 +00:00
|
|
|
resync_free_pages(&rps[j]);
|
2014-04-09 02:25:43 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
out_free_bio:
|
2011-12-22 23:17:56 +00:00
|
|
|
while (++j < pi->raid_disks)
|
2005-04-16 22:20:36 +00:00
|
|
|
bio_put(r1_bio->bios[j]);
|
2017-03-16 16:12:26 +00:00
|
|
|
kfree(rps);
|
|
|
|
|
|
|
|
out_free_r1bio:
|
2019-06-14 22:41:10 +00:00
|
|
|
rbio_pool_free(r1_bio, data);
|
2005-04-16 22:20:36 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void r1buf_pool_free(void *__r1_bio, void *data)
|
|
|
|
{
|
|
|
|
struct pool_info *pi = data;
|
2017-03-16 16:12:26 +00:00
|
|
|
int i;
|
2011-10-11 05:48:43 +00:00
|
|
|
struct r1bio *r1bio = __r1_bio;
|
2017-03-16 16:12:26 +00:00
|
|
|
struct resync_pages *rp = NULL;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2017-03-16 16:12:26 +00:00
|
|
|
for (i = pi->raid_disks; i--; ) {
|
|
|
|
rp = get_resync_pages(r1bio->bios[i]);
|
|
|
|
resync_free_pages(rp);
|
2005-04-16 22:20:36 +00:00
|
|
|
bio_put(r1bio->bios[i]);
|
2017-03-16 16:12:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* resync pages array stored in the 1st bio's .bi_private */
|
|
|
|
kfree(rp);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2019-06-14 22:41:10 +00:00
|
|
|
rbio_pool_free(r1bio, data);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2011-10-11 05:49:05 +00:00
|
|
|
static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2011-12-22 23:17:56 +00:00
|
|
|
for (i = 0; i < conf->raid_disks * 2; i++) {
|
2005-04-16 22:20:36 +00:00
|
|
|
struct bio **bio = r1_bio->bios + i;
|
2011-07-28 01:31:49 +00:00
|
|
|
if (!BIO_SPECIAL(*bio))
|
2005-04-16 22:20:36 +00:00
|
|
|
bio_put(*bio);
|
|
|
|
*bio = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-10-11 05:48:43 +00:00
|
|
|
static void free_r1bio(struct r1bio *r1_bio)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2011-10-11 05:49:05 +00:00
|
|
|
struct r1conf *conf = r1_bio->mddev->private;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
put_all_bios(conf, r1_bio);
|
2018-05-20 22:25:52 +00:00
|
|
|
mempool_free(r1_bio, &conf->r1bio_pool);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2011-10-11 05:48:43 +00:00
|
|
|
static void put_buf(struct r1bio *r1_bio)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2011-10-11 05:49:05 +00:00
|
|
|
struct r1conf *conf = r1_bio->mddev->private;
|
2017-02-20 06:41:27 +00:00
|
|
|
sector_t sect = r1_bio->sector;
|
2006-01-06 08:20:21 +00:00
|
|
|
int i;
|
|
|
|
|
2011-12-22 23:17:56 +00:00
|
|
|
for (i = 0; i < conf->raid_disks * 2; i++) {
|
2006-01-06 08:20:21 +00:00
|
|
|
struct bio *bio = r1_bio->bios[i];
|
|
|
|
if (bio->bi_end_io)
|
|
|
|
rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2018-05-20 22:25:52 +00:00
|
|
|
mempool_free(r1_bio, &conf->r1buf_pool);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2017-02-20 06:41:27 +00:00
|
|
|
lower_barrier(conf, sect);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2011-10-11 05:48:43 +00:00
|
|
|
static void reschedule_retry(struct r1bio *r1_bio)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
2011-10-11 05:47:53 +00:00
|
|
|
struct mddev *mddev = r1_bio->mddev;
|
2011-10-11 05:49:05 +00:00
|
|
|
struct r1conf *conf = mddev->private;
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
int idx;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
idx = sector_to_idx(r1_bio->sector);
|
2005-04-16 22:20:36 +00:00
|
|
|
spin_lock_irqsave(&conf->device_lock, flags);
|
|
|
|
list_add(&r1_bio->retry_list, &conf->retry_list);
|
RAID1: avoid unnecessary spin locks in I/O barrier code
When I run a parallel reading performan testing on a md raid1 device with
two NVMe SSDs, I observe very bad throughput in supprise: by fio with 64KB
block size, 40 seq read I/O jobs, 128 iodepth, overall throughput is
only 2.7GB/s, this is around 50% of the idea performance number.
The perf reports locking contention happens at allow_barrier() and
wait_barrier() code,
- 41.41% fio [kernel.kallsyms] [k] _raw_spin_lock_irqsave
- _raw_spin_lock_irqsave
+ 89.92% allow_barrier
+ 9.34% __wake_up
- 37.30% fio [kernel.kallsyms] [k] _raw_spin_lock_irq
- _raw_spin_lock_irq
- 100.00% wait_barrier
The reason is, in these I/O barrier related functions,
- raise_barrier()
- lower_barrier()
- wait_barrier()
- allow_barrier()
They always hold conf->resync_lock firstly, even there are only regular
reading I/Os and no resync I/O at all. This is a huge performance penalty.
The solution is a lockless-like algorithm in I/O barrier code, and only
holding conf->resync_lock when it has to.
The original idea is from Hannes Reinecke, and Neil Brown provides
comments to improve it. I continue to work on it, and make the patch into
current form.
In the new simpler raid1 I/O barrier implementation, there are two
wait barrier functions,
- wait_barrier()
Which calls _wait_barrier(), is used for regular write I/O. If there is
resync I/O happening on the same I/O barrier bucket, or the whole
array is frozen, task will wait until no barrier on same barrier bucket,
or the whold array is unfreezed.
- wait_read_barrier()
Since regular read I/O won't interfere with resync I/O (read_balance()
will make sure only uptodate data will be read out), it is unnecessary
to wait for barrier in regular read I/Os, waiting in only necessary
when the whole array is frozen.
The operations on conf->nr_pending[idx], conf->nr_waiting[idx], conf->
barrier[idx] are very carefully designed in raise_barrier(),
lower_barrier(), _wait_barrier() and wait_read_barrier(), in order to
avoid unnecessary spin locks in these functions. Once conf->
nr_pengding[idx] is increased, a resync I/O with same barrier bucket index
has to wait in raise_barrier(). Then in _wait_barrier() if no barrier
raised in same barrier bucket index and array is not frozen, the regular
I/O doesn't need to hold conf->resync_lock, it can just increase
conf->nr_pending[idx], and return to its caller. wait_read_barrier() is
very similar to _wait_barrier(), the only difference is it only waits when
array is frozen. For heavy parallel reading I/Os, the lockless I/O barrier
code almostly gets rid of all spin lock cost.
This patch significantly improves raid1 reading peroformance. From my
testing, a raid1 device built by two NVMe SSD, runs fio with 64KB
blocksize, 40 seq read I/O jobs, 128 iodepth, overall throughput
increases from 2.7GB/s to 4.6GB/s (+70%).
Changelog
V4:
- Change conf->nr_queued[] to atomic_t.
- Define BARRIER_BUCKETS_NR_BITS by (PAGE_SHIFT - ilog2(sizeof(atomic_t)))
V3:
- Add smp_mb__after_atomic() as Shaohua and Neil suggested.
- Change conf->nr_queued[] from atomic_t to int.
- Change conf->array_frozen from atomic_t back to int, and use
READ_ONCE(conf->array_frozen) to check value of conf->array_frozen
in _wait_barrier() and wait_read_barrier().
- In _wait_barrier() and wait_read_barrier(), add a call to
wake_up(&conf->wait_barrier) after atomic_dec(&conf->nr_pending[idx]),
to fix a deadlock between _wait_barrier()/wait_read_barrier and
freeze_array().
V2:
- Remove a spin_lock/unlock pair in raid1d().
- Add more code comments to explain why there is no racy when checking two
atomic_t variables at same time.
V1:
- Original RFC patch for comments.
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Shaohua Li <shli@fb.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:57 +00:00
|
|
|
atomic_inc(&conf->nr_queued[idx]);
|
2005-04-16 22:20:36 +00:00
|
|
|
spin_unlock_irqrestore(&conf->device_lock, flags);
|
|
|
|
|
2006-01-06 08:20:12 +00:00
|
|
|
wake_up(&conf->wait_barrier);
|
2005-04-16 22:20:36 +00:00
|
|
|
md_wakeup_thread(mddev->thread);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* raid_end_bio_io() is called when we have finished servicing a mirrored
|
|
|
|
* operation and are ready to return a success/failure code to the buffer
|
|
|
|
* cache layer.
|
|
|
|
*/
|
2011-10-11 05:48:43 +00:00
|
|
|
static void call_bio_endio(struct r1bio *r1_bio)
|
2011-07-28 01:31:48 +00:00
|
|
|
{
|
|
|
|
struct bio *bio = r1_bio->master_bio;
|
|
|
|
|
|
|
|
if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
|
2017-06-03 07:38:06 +00:00
|
|
|
bio->bi_status = BLK_STS_IOERR;
|
2015-07-20 13:29:37 +00:00
|
|
|
|
2017-03-15 03:05:14 +00:00
|
|
|
bio_endio(bio);
|
2011-07-28 01:31:48 +00:00
|
|
|
}
|
|
|
|
|
2011-10-11 05:48:43 +00:00
|
|
|
static void raid_end_bio_io(struct r1bio *r1_bio)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct bio *bio = r1_bio->master_bio;
|
2020-01-27 15:26:19 +00:00
|
|
|
struct r1conf *conf = r1_bio->mddev->private;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-09-09 23:23:47 +00:00
|
|
|
/* if nobody has done the final endio yet, do it now */
|
|
|
|
if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
|
2011-10-07 03:23:17 +00:00
|
|
|
pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
|
|
|
|
(bio_data_dir(bio) == WRITE) ? "write" : "read",
|
2013-10-11 22:44:27 +00:00
|
|
|
(unsigned long long) bio->bi_iter.bi_sector,
|
|
|
|
(unsigned long long) bio_end_sector(bio) - 1);
|
2005-09-09 23:23:47 +00:00
|
|
|
|
2011-07-28 01:31:48 +00:00
|
|
|
call_bio_endio(r1_bio);
|
2005-09-09 23:23:47 +00:00
|
|
|
}
|
2020-01-27 15:26:19 +00:00
|
|
|
/*
|
|
|
|
* Wake up any possible resync thread that waits for the device
|
|
|
|
* to go idle. All I/Os, even write-behind writes, are done.
|
|
|
|
*/
|
|
|
|
allow_barrier(conf, r1_bio->sector);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
free_r1bio(r1_bio);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Update disk head position estimator based on IRQ completion info.
|
|
|
|
*/
|
2011-10-11 05:48:43 +00:00
|
|
|
static inline void update_head_pos(int disk, struct r1bio *r1_bio)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2011-10-11 05:49:05 +00:00
|
|
|
struct r1conf *conf = r1_bio->mddev->private;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
conf->mirrors[disk].head_position =
|
|
|
|
r1_bio->sector + (r1_bio->sectors);
|
|
|
|
}
|
|
|
|
|
2011-10-07 03:22:53 +00:00
|
|
|
/*
|
|
|
|
* Find the disk number which triggered given bio
|
|
|
|
*/
|
2011-10-11 05:48:43 +00:00
|
|
|
static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
|
2011-10-07 03:22:53 +00:00
|
|
|
{
|
|
|
|
int mirror;
|
2011-12-22 23:17:56 +00:00
|
|
|
struct r1conf *conf = r1_bio->mddev->private;
|
|
|
|
int raid_disks = conf->raid_disks;
|
2011-10-07 03:22:53 +00:00
|
|
|
|
2011-12-22 23:17:56 +00:00
|
|
|
for (mirror = 0; mirror < raid_disks * 2; mirror++)
|
2011-10-07 03:22:53 +00:00
|
|
|
if (r1_bio->bios[mirror] == bio)
|
|
|
|
break;
|
|
|
|
|
2011-12-22 23:17:56 +00:00
|
|
|
BUG_ON(mirror == raid_disks * 2);
|
2011-10-07 03:22:53 +00:00
|
|
|
update_head_pos(mirror, r1_bio);
|
|
|
|
|
|
|
|
return mirror;
|
|
|
|
}
|
|
|
|
|
2015-07-20 13:29:37 +00:00
|
|
|
static void raid1_end_read_request(struct bio *bio)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2017-06-03 07:38:06 +00:00
|
|
|
int uptodate = !bio->bi_status;
|
2011-10-11 05:48:43 +00:00
|
|
|
struct r1bio *r1_bio = bio->bi_private;
|
2011-10-11 05:49:05 +00:00
|
|
|
struct r1conf *conf = r1_bio->mddev->private;
|
2016-06-02 06:19:52 +00:00
|
|
|
struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* this branch is our 'one mirror IO has finished' event handler:
|
|
|
|
*/
|
2016-06-02 06:19:52 +00:00
|
|
|
update_head_pos(r1_bio->read_disk, r1_bio);
|
2006-01-06 08:20:19 +00:00
|
|
|
|
2007-05-10 10:15:50 +00:00
|
|
|
if (uptodate)
|
|
|
|
set_bit(R1BIO_Uptodate, &r1_bio->state);
|
2016-11-18 05:16:12 +00:00
|
|
|
else if (test_bit(FailFast, &rdev->flags) &&
|
|
|
|
test_bit(R1BIO_FailFast, &r1_bio->state))
|
|
|
|
/* This was a fail-fast read so we definitely
|
|
|
|
* want to retry */
|
|
|
|
;
|
2007-05-10 10:15:50 +00:00
|
|
|
else {
|
|
|
|
/* If all other devices have failed, we want to return
|
|
|
|
* the error upwards rather than fail the last device.
|
|
|
|
* Here we redefine "uptodate" to mean "Don't want to retry"
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2007-05-10 10:15:50 +00:00
|
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&conf->device_lock, flags);
|
|
|
|
if (r1_bio->mddev->degraded == conf->raid_disks ||
|
|
|
|
(r1_bio->mddev->degraded == conf->raid_disks-1 &&
|
2016-06-02 06:19:52 +00:00
|
|
|
test_bit(In_sync, &rdev->flags)))
|
2007-05-10 10:15:50 +00:00
|
|
|
uptodate = 1;
|
|
|
|
spin_unlock_irqrestore(&conf->device_lock, flags);
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2012-10-11 02:44:30 +00:00
|
|
|
if (uptodate) {
|
2005-04-16 22:20:36 +00:00
|
|
|
raid_end_bio_io(r1_bio);
|
2016-06-02 06:19:52 +00:00
|
|
|
rdev_dec_pending(rdev, conf->mddev);
|
2012-10-11 02:44:30 +00:00
|
|
|
} else {
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* oops, read error:
|
|
|
|
*/
|
|
|
|
char b[BDEVNAME_SIZE];
|
2016-11-02 03:16:50 +00:00
|
|
|
pr_err_ratelimited("md/raid1:%s: %s: rescheduling sector %llu\n",
|
|
|
|
mdname(conf->mddev),
|
|
|
|
bdevname(rdev->bdev, b),
|
|
|
|
(unsigned long long)r1_bio->sector);
|
2011-07-28 01:31:48 +00:00
|
|
|
set_bit(R1BIO_ReadError, &r1_bio->state);
|
2005-04-16 22:20:36 +00:00
|
|
|
reschedule_retry(r1_bio);
|
2012-10-11 02:44:30 +00:00
|
|
|
/* don't drop the reference on read_disk yet */
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-10-11 05:48:43 +00:00
|
|
|
static void close_write(struct r1bio *r1_bio)
|
2011-07-28 01:32:41 +00:00
|
|
|
{
|
|
|
|
/* it really is the end of this request */
|
|
|
|
if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
|
2017-03-16 16:12:31 +00:00
|
|
|
bio_free_pages(r1_bio->behind_master_bio);
|
|
|
|
bio_put(r1_bio->behind_master_bio);
|
|
|
|
r1_bio->behind_master_bio = NULL;
|
2011-07-28 01:32:41 +00:00
|
|
|
}
|
|
|
|
/* clear the bitmap if all writes complete successfully */
|
2018-08-01 22:20:50 +00:00
|
|
|
md_bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
|
|
|
|
r1_bio->sectors,
|
|
|
|
!test_bit(R1BIO_Degraded, &r1_bio->state),
|
|
|
|
test_bit(R1BIO_BehindIO, &r1_bio->state));
|
2011-07-28 01:32:41 +00:00
|
|
|
md_write_end(r1_bio->mddev);
|
|
|
|
}
|
|
|
|
|
2011-10-11 05:48:43 +00:00
|
|
|
static void r1_bio_write_done(struct r1bio *r1_bio)
|
2010-10-19 01:54:01 +00:00
|
|
|
{
|
2011-07-28 01:32:41 +00:00
|
|
|
if (!atomic_dec_and_test(&r1_bio->remaining))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (test_bit(R1BIO_WriteError, &r1_bio->state))
|
|
|
|
reschedule_retry(r1_bio);
|
|
|
|
else {
|
|
|
|
close_write(r1_bio);
|
2011-07-28 01:31:49 +00:00
|
|
|
if (test_bit(R1BIO_MadeGood, &r1_bio->state))
|
|
|
|
reschedule_retry(r1_bio);
|
|
|
|
else
|
|
|
|
raid_end_bio_io(r1_bio);
|
2010-10-19 01:54:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-20 13:29:37 +00:00
|
|
|
static void raid1_end_write_request(struct bio *bio)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2011-10-11 05:48:43 +00:00
|
|
|
struct r1bio *r1_bio = bio->bi_private;
|
2016-06-02 06:19:52 +00:00
|
|
|
int behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
|
2011-10-11 05:49:05 +00:00
|
|
|
struct r1conf *conf = r1_bio->mddev->private;
|
2006-03-10 01:33:46 +00:00
|
|
|
struct bio *to_put = NULL;
|
2016-06-02 06:19:52 +00:00
|
|
|
int mirror = find_bio_disk(r1_bio, bio);
|
|
|
|
struct md_rdev *rdev = conf->mirrors[mirror].rdev;
|
2016-10-06 21:09:16 +00:00
|
|
|
bool discard_error;
|
2019-12-23 09:48:58 +00:00
|
|
|
sector_t lo = r1_bio->sector;
|
|
|
|
sector_t hi = r1_bio->sector + r1_bio->sectors;
|
2016-10-06 21:09:16 +00:00
|
|
|
|
2017-06-03 07:38:06 +00:00
|
|
|
discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2010-09-03 09:56:18 +00:00
|
|
|
/*
|
|
|
|
* 'one mirror IO has finished' event handler:
|
|
|
|
*/
|
2017-06-03 07:38:06 +00:00
|
|
|
if (bio->bi_status && !discard_error) {
|
2016-06-02 06:19:52 +00:00
|
|
|
set_bit(WriteErrorSeen, &rdev->flags);
|
|
|
|
if (!test_and_set_bit(WantReplacement, &rdev->flags))
|
2011-12-22 23:17:57 +00:00
|
|
|
set_bit(MD_RECOVERY_NEEDED, &
|
|
|
|
conf->mddev->recovery);
|
|
|
|
|
2016-11-18 05:16:12 +00:00
|
|
|
if (test_bit(FailFast, &rdev->flags) &&
|
|
|
|
(bio->bi_opf & MD_FAILFAST) &&
|
|
|
|
/* We never try FailFast to WriteMostly devices */
|
|
|
|
!test_bit(WriteMostly, &rdev->flags)) {
|
|
|
|
md_error(r1_bio->mddev, rdev);
|
md/raid1: end bio when the device faulty
When write bio return error, it would be added to conf->retry_list
and wait for raid1d thread to retry write and acknowledge badblocks.
In narrow_write_error(), the error bio will be split in the unit of
badblock shift (such as one sector) and raid1d thread issues them
one by one. Until all of the splited bio has finished, raid1d thread
can go on processing other things, which is time consuming.
But, there is a scene for error handling that is not necessary.
When the device has been set faulty, flush_bio_list() may end
bios in pending_bio_list with error status. Since these bios
has not been issued to the device actually, error handlding to
retry write and acknowledge badblocks make no sense.
Even without that scene, when the device is faulty, badblocks info
can not be written out to the device. Thus, we also no need to
handle the error IO.
Signed-off-by: Yufen Yu <yuyufen@huawei.com>
Signed-off-by: Song Liu <songliubraving@fb.com>
2019-07-19 05:48:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When the device is faulty, it is not necessary to
|
|
|
|
* handle write error.
|
|
|
|
* For failfast, this is the only remaining device,
|
|
|
|
* We need to retry the write without FailFast.
|
|
|
|
*/
|
|
|
|
if (!test_bit(Faulty, &rdev->flags))
|
2016-11-18 05:16:12 +00:00
|
|
|
set_bit(R1BIO_WriteError, &r1_bio->state);
|
md/raid1: end bio when the device faulty
When write bio return error, it would be added to conf->retry_list
and wait for raid1d thread to retry write and acknowledge badblocks.
In narrow_write_error(), the error bio will be split in the unit of
badblock shift (such as one sector) and raid1d thread issues them
one by one. Until all of the splited bio has finished, raid1d thread
can go on processing other things, which is time consuming.
But, there is a scene for error handling that is not necessary.
When the device has been set faulty, flush_bio_list() may end
bios in pending_bio_list with error status. Since these bios
has not been issued to the device actually, error handlding to
retry write and acknowledge badblocks make no sense.
Even without that scene, when the device is faulty, badblocks info
can not be written out to the device. Thus, we also no need to
handle the error IO.
Signed-off-by: Yufen Yu <yuyufen@huawei.com>
Signed-off-by: Song Liu <songliubraving@fb.com>
2019-07-19 05:48:46 +00:00
|
|
|
else {
|
|
|
|
/* Finished with this branch */
|
|
|
|
r1_bio->bios[mirror] = NULL;
|
|
|
|
to_put = bio;
|
|
|
|
}
|
2011-07-28 01:31:49 +00:00
|
|
|
} else {
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2010-09-03 09:56:18 +00:00
|
|
|
* Set R1BIO_Uptodate in our master bio, so that we
|
|
|
|
* will return a good error code for to the higher
|
|
|
|
* levels even if IO on some other mirrored buffer
|
|
|
|
* fails.
|
|
|
|
*
|
|
|
|
* The 'master' represents the composite IO operation
|
|
|
|
* to user-side. So if something waits for IO, then it
|
|
|
|
* will wait for the 'master' bio.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2011-07-28 01:31:49 +00:00
|
|
|
sector_t first_bad;
|
|
|
|
int bad_sectors;
|
|
|
|
|
2011-07-28 01:32:41 +00:00
|
|
|
r1_bio->bios[mirror] = NULL;
|
|
|
|
to_put = bio;
|
md/raid1: consider WRITE as successful only if at least one non-Faulty and non-rebuilding drive completed it.
Without that fix, the following scenario could happen:
- RAID1 with drives A and B; drive B was freshly-added and is rebuilding
- Drive A fails
- WRITE request arrives to the array. It is failed by drive A, so
r1_bio is marked as R1BIO_WriteError, but the rebuilding drive B
succeeds in writing it, so the same r1_bio is marked as
R1BIO_Uptodate.
- r1_bio arrives to handle_write_finished, badblocks are disabled,
md_error()->error() does nothing because we don't fail the last drive
of raid1
- raid_end_bio_io() calls call_bio_endio()
- As a result, in call_bio_endio():
if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
clear_bit(BIO_UPTODATE, &bio->bi_flags);
this code doesn't clear the BIO_UPTODATE flag, and the whole master
WRITE succeeds, back to the upper layer.
So we returned success to the upper layer, even though we had written
the data onto the rebuilding drive only. But when we want to read the
data back, we would not read from the rebuilding drive, so this data
is lost.
[neilb - applied identical change to raid10 as well]
This bug can result in lost data, so it is suitable for any
-stable kernel.
Cc: stable@vger.kernel.org
Signed-off-by: Alex Lyakas <alex@zadarastorage.com>
Signed-off-by: NeilBrown <neilb@suse.de>
2013-06-04 17:42:21 +00:00
|
|
|
/*
|
|
|
|
* Do not set R1BIO_Uptodate if the current device is
|
|
|
|
* rebuilding or Faulty. This is because we cannot use
|
|
|
|
* such device for properly reading the data back (we could
|
|
|
|
* potentially use it, if the current write would have felt
|
|
|
|
* before rdev->recovery_offset, but for simplicity we don't
|
|
|
|
* check this here.
|
|
|
|
*/
|
2016-06-02 06:19:52 +00:00
|
|
|
if (test_bit(In_sync, &rdev->flags) &&
|
|
|
|
!test_bit(Faulty, &rdev->flags))
|
md/raid1: consider WRITE as successful only if at least one non-Faulty and non-rebuilding drive completed it.
Without that fix, the following scenario could happen:
- RAID1 with drives A and B; drive B was freshly-added and is rebuilding
- Drive A fails
- WRITE request arrives to the array. It is failed by drive A, so
r1_bio is marked as R1BIO_WriteError, but the rebuilding drive B
succeeds in writing it, so the same r1_bio is marked as
R1BIO_Uptodate.
- r1_bio arrives to handle_write_finished, badblocks are disabled,
md_error()->error() does nothing because we don't fail the last drive
of raid1
- raid_end_bio_io() calls call_bio_endio()
- As a result, in call_bio_endio():
if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
clear_bit(BIO_UPTODATE, &bio->bi_flags);
this code doesn't clear the BIO_UPTODATE flag, and the whole master
WRITE succeeds, back to the upper layer.
So we returned success to the upper layer, even though we had written
the data onto the rebuilding drive only. But when we want to read the
data back, we would not read from the rebuilding drive, so this data
is lost.
[neilb - applied identical change to raid10 as well]
This bug can result in lost data, so it is suitable for any
-stable kernel.
Cc: stable@vger.kernel.org
Signed-off-by: Alex Lyakas <alex@zadarastorage.com>
Signed-off-by: NeilBrown <neilb@suse.de>
2013-06-04 17:42:21 +00:00
|
|
|
set_bit(R1BIO_Uptodate, &r1_bio->state);
|
2010-09-03 09:56:18 +00:00
|
|
|
|
2011-07-28 01:31:49 +00:00
|
|
|
/* Maybe we can clear some bad blocks. */
|
2016-06-02 06:19:52 +00:00
|
|
|
if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
|
2016-10-06 21:09:16 +00:00
|
|
|
&first_bad, &bad_sectors) && !discard_error) {
|
2011-07-28 01:31:49 +00:00
|
|
|
r1_bio->bios[mirror] = IO_MADE_GOOD;
|
|
|
|
set_bit(R1BIO_MadeGood, &r1_bio->state);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-09-03 09:56:18 +00:00
|
|
|
if (behind) {
|
2019-12-23 09:48:58 +00:00
|
|
|
if (test_bit(CollisionCheck, &rdev->flags))
|
2019-12-23 09:48:53 +00:00
|
|
|
remove_serial(rdev, lo, hi);
|
2016-06-02 06:19:52 +00:00
|
|
|
if (test_bit(WriteMostly, &rdev->flags))
|
2010-09-03 09:56:18 +00:00
|
|
|
atomic_dec(&r1_bio->behind_remaining);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In behind mode, we ACK the master bio once the I/O
|
|
|
|
* has safely reached all non-writemostly
|
|
|
|
* disks. Setting the Returned bit ensures that this
|
|
|
|
* gets done only once -- we don't ever want to return
|
|
|
|
* -EIO here, instead we'll wait
|
|
|
|
*/
|
|
|
|
if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
|
|
|
|
test_bit(R1BIO_Uptodate, &r1_bio->state)) {
|
|
|
|
/* Maybe we can return now */
|
|
|
|
if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
|
|
|
|
struct bio *mbio = r1_bio->master_bio;
|
2011-10-07 03:23:17 +00:00
|
|
|
pr_debug("raid1: behind end write sectors"
|
|
|
|
" %llu-%llu\n",
|
2013-10-11 22:44:27 +00:00
|
|
|
(unsigned long long) mbio->bi_iter.bi_sector,
|
|
|
|
(unsigned long long) bio_end_sector(mbio) - 1);
|
2011-07-28 01:31:48 +00:00
|
|
|
call_bio_endio(r1_bio);
|
2005-09-09 23:23:47 +00:00
|
|
|
}
|
|
|
|
}
|
2019-12-23 09:48:58 +00:00
|
|
|
} else if (rdev->mddev->serialize_policy)
|
|
|
|
remove_serial(rdev, lo, hi);
|
2011-07-28 01:31:49 +00:00
|
|
|
if (r1_bio->bios[mirror] == NULL)
|
2016-06-02 06:19:52 +00:00
|
|
|
rdev_dec_pending(rdev, conf->mddev);
|
2010-09-03 09:56:18 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Let's see if all mirrored write operations have finished
|
|
|
|
* already.
|
|
|
|
*/
|
2011-05-11 04:51:19 +00:00
|
|
|
r1_bio_write_done(r1_bio);
|
2006-06-26 07:27:35 +00:00
|
|
|
|
2006-03-10 01:33:46 +00:00
|
|
|
if (to_put)
|
|
|
|
bio_put(to_put);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
static sector_t align_to_barrier_unit_end(sector_t start_sector,
|
|
|
|
sector_t sectors)
|
|
|
|
{
|
|
|
|
sector_t len;
|
|
|
|
|
|
|
|
WARN_ON(sectors == 0);
|
|
|
|
/*
|
|
|
|
* len is the number of sectors from start_sector to end of the
|
|
|
|
* barrier unit which start_sector belongs to.
|
|
|
|
*/
|
|
|
|
len = round_up(start_sector + 1, BARRIER_UNIT_SECTOR_SIZE) -
|
|
|
|
start_sector;
|
|
|
|
|
|
|
|
if (len > sectors)
|
|
|
|
len = sectors;
|
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* This routine returns the disk from which the requested read should
|
|
|
|
* be done. There is a per-array 'next expected sequential IO' sector
|
|
|
|
* number - if this matches on the next IO then we use the last disk.
|
|
|
|
* There is also a per-disk 'last know head position' sector that is
|
|
|
|
* maintained from IRQ contexts, both the normal and the resync IO
|
|
|
|
* completion handlers update this position correctly. If there is no
|
|
|
|
* perfect sequential match then we pick the disk whose head is closest.
|
|
|
|
*
|
|
|
|
* If there are 2 mirrors in the same 2 devices, performance degrades
|
|
|
|
* because position is mirror, not device based.
|
|
|
|
*
|
|
|
|
* The rdev for the device selected will have nr_pending incremented.
|
|
|
|
*/
|
2011-10-11 05:49:05 +00:00
|
|
|
static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2010-05-07 22:20:17 +00:00
|
|
|
const sector_t this_sector = r1_bio->sector;
|
2011-07-28 01:31:48 +00:00
|
|
|
int sectors;
|
|
|
|
int best_good_sectors;
|
md/raid1: read balance chooses idlest disk for SSD
SSD hasn't spindle, distance between requests means nothing. And the original
distance based algorithm sometimes can cause severe performance issue for SSD
raid.
Considering two thread groups, one accesses file A, the other access file B.
The first group will access one disk and the second will access the other disk,
because requests are near from one group and far between groups. In this case,
read balance might keep one disk very busy but the other relative idle. For
SSD, we should try best to distribute requests to as many disks as possible.
There isn't spindle move penality anyway.
With below patch, I can see more than 50% throughput improvement sometimes
depending on workloads.
The only exception is small requests can be merged to a big request which
typically can drive higher throughput for SSD too. Such small requests are
sequential reads. Unlike hard disk, sequential read which can't be merged (for
example direct IO, or read without readahead) can be ignored for SSD. Again
there is no spindle move penality. readahead dispatches small requests and such
requests can be merged.
Last patch can help detect sequential read well, at least if concurrent read
number isn't greater than raid disk number. In that case, distance based
algorithm doesn't work well too.
V2: For hard disk and SSD mixed raid, doesn't use distance based algorithm for
random IO too. This makes the algorithm generic for raid with SSD.
Signed-off-by: Shaohua Li <shli@fusionio.com>
Signed-off-by: NeilBrown <neilb@suse.de>
2012-07-31 00:03:53 +00:00
|
|
|
int best_disk, best_dist_disk, best_pending_disk;
|
|
|
|
int has_nonrot_disk;
|
2012-07-31 00:03:53 +00:00
|
|
|
int disk;
|
2011-05-11 04:34:56 +00:00
|
|
|
sector_t best_dist;
|
md/raid1: read balance chooses idlest disk for SSD
SSD hasn't spindle, distance between requests means nothing. And the original
distance based algorithm sometimes can cause severe performance issue for SSD
raid.
Considering two thread groups, one accesses file A, the other access file B.
The first group will access one disk and the second will access the other disk,
because requests are near from one group and far between groups. In this case,
read balance might keep one disk very busy but the other relative idle. For
SSD, we should try best to distribute requests to as many disks as possible.
There isn't spindle move penality anyway.
With below patch, I can see more than 50% throughput improvement sometimes
depending on workloads.
The only exception is small requests can be merged to a big request which
typically can drive higher throughput for SSD too. Such small requests are
sequential reads. Unlike hard disk, sequential read which can't be merged (for
example direct IO, or read without readahead) can be ignored for SSD. Again
there is no spindle move penality. readahead dispatches small requests and such
requests can be merged.
Last patch can help detect sequential read well, at least if concurrent read
number isn't greater than raid disk number. In that case, distance based
algorithm doesn't work well too.
V2: For hard disk and SSD mixed raid, doesn't use distance based algorithm for
random IO too. This makes the algorithm generic for raid with SSD.
Signed-off-by: Shaohua Li <shli@fusionio.com>
Signed-off-by: NeilBrown <neilb@suse.de>
2012-07-31 00:03:53 +00:00
|
|
|
unsigned int min_pending;
|
2011-10-11 05:45:26 +00:00
|
|
|
struct md_rdev *rdev;
|
2010-09-06 04:10:08 +00:00
|
|
|
int choose_first;
|
md/raid1: prevent merging too large request
For SSD, if request size exceeds specific value (optimal io size), request size
isn't important for bandwidth. In such condition, if making request size bigger
will cause some disks idle, the total throughput will actually drop. A good
example is doing a readahead in a two-disk raid1 setup.
So when should we split big requests? We absolutly don't want to split big
request to very small requests. Even in SSD, big request transfer is more
efficient. This patch only considers request with size above optimal io size.
If all disks are busy, is it worth doing a split? Say optimal io size is 16k,
two requests 32k and two disks. We can let each disk run one 32k request, or
split the requests to 4 16k requests and each disk runs two. It's hard to say
which case is better, depending on hardware.
So only consider case where there are idle disks. For readahead, split is
always better in this case. And in my test, below patch can improve > 30%
thoughput. Hmm, not 100%, because disk isn't 100% busy.
Such case can happen not just in readahead, for example, in directio. But I
suppose directio usually will have bigger IO depth and make all disks busy, so
I ignored it.
Note: if the raid uses any hard disk, we don't prevent merging. That will make
performace worse.
Signed-off-by: Shaohua Li <shli@fusionio.com>
Signed-off-by: NeilBrown <neilb@suse.de>
2012-07-31 00:03:53 +00:00
|
|
|
int choose_next_idle;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
/*
|
2005-09-09 23:23:45 +00:00
|
|
|
* Check if we can balance. We can balance on the whole
|
2005-04-16 22:20:36 +00:00
|
|
|
* device if no resync is going on, or below the resync window.
|
|
|
|
* We take the first readable disk when above the resync window.
|
|
|
|
*/
|
|
|
|
retry:
|
2011-07-28 01:31:48 +00:00
|
|
|
sectors = r1_bio->sectors;
|
2011-05-11 04:34:56 +00:00
|
|
|
best_disk = -1;
|
md/raid1: read balance chooses idlest disk for SSD
SSD hasn't spindle, distance between requests means nothing. And the original
distance based algorithm sometimes can cause severe performance issue for SSD
raid.
Considering two thread groups, one accesses file A, the other access file B.
The first group will access one disk and the second will access the other disk,
because requests are near from one group and far between groups. In this case,
read balance might keep one disk very busy but the other relative idle. For
SSD, we should try best to distribute requests to as many disks as possible.
There isn't spindle move penality anyway.
With below patch, I can see more than 50% throughput improvement sometimes
depending on workloads.
The only exception is small requests can be merged to a big request which
typically can drive higher throughput for SSD too. Such small requests are
sequential reads. Unlike hard disk, sequential read which can't be merged (for
example direct IO, or read without readahead) can be ignored for SSD. Again
there is no spindle move penality. readahead dispatches small requests and such
requests can be merged.
Last patch can help detect sequential read well, at least if concurrent read
number isn't greater than raid disk number. In that case, distance based
algorithm doesn't work well too.
V2: For hard disk and SSD mixed raid, doesn't use distance based algorithm for
random IO too. This makes the algorithm generic for raid with SSD.
Signed-off-by: Shaohua Li <shli@fusionio.com>
Signed-off-by: NeilBrown <neilb@suse.de>
2012-07-31 00:03:53 +00:00
|
|
|
best_dist_disk = -1;
|
2011-05-11 04:34:56 +00:00
|
|
|
best_dist = MaxSector;
|
md/raid1: read balance chooses idlest disk for SSD
SSD hasn't spindle, distance between requests means nothing. And the original
distance based algorithm sometimes can cause severe performance issue for SSD
raid.
Considering two thread groups, one accesses file A, the other access file B.
The first group will access one disk and the second will access the other disk,
because requests are near from one group and far between groups. In this case,
read balance might keep one disk very busy but the other relative idle. For
SSD, we should try best to distribute requests to as many disks as possible.
There isn't spindle move penality anyway.
With below patch, I can see more than 50% throughput improvement sometimes
depending on workloads.
The only exception is small requests can be merged to a big request which
typically can drive higher throughput for SSD too. Such small requests are
sequential reads. Unlike hard disk, sequential read which can't be merged (for
example direct IO, or read without readahead) can be ignored for SSD. Again
there is no spindle move penality. readahead dispatches small requests and such
requests can be merged.
Last patch can help detect sequential read well, at least if concurrent read
number isn't greater than raid disk number. In that case, distance based
algorithm doesn't work well too.
V2: For hard disk and SSD mixed raid, doesn't use distance based algorithm for
random IO too. This makes the algorithm generic for raid with SSD.
Signed-off-by: Shaohua Li <shli@fusionio.com>
Signed-off-by: NeilBrown <neilb@suse.de>
2012-07-31 00:03:53 +00:00
|
|
|
best_pending_disk = -1;
|
|
|
|
min_pending = UINT_MAX;
|
2011-07-28 01:31:48 +00:00
|
|
|
best_good_sectors = 0;
|
md/raid1: read balance chooses idlest disk for SSD
SSD hasn't spindle, distance between requests means nothing. And the original
distance based algorithm sometimes can cause severe performance issue for SSD
raid.
Considering two thread groups, one accesses file A, the other access file B.
The first group will access one disk and the second will access the other disk,
because requests are near from one group and far between groups. In this case,
read balance might keep one disk very busy but the other relative idle. For
SSD, we should try best to distribute requests to as many disks as possible.
There isn't spindle move penality anyway.
With below patch, I can see more than 50% throughput improvement sometimes
depending on workloads.
The only exception is small requests can be merged to a big request which
typically can drive higher throughput for SSD too. Such small requests are
sequential reads. Unlike hard disk, sequential read which can't be merged (for
example direct IO, or read without readahead) can be ignored for SSD. Again
there is no spindle move penality. readahead dispatches small requests and such
requests can be merged.
Last patch can help detect sequential read well, at least if concurrent read
number isn't greater than raid disk number. In that case, distance based
algorithm doesn't work well too.
V2: For hard disk and SSD mixed raid, doesn't use distance based algorithm for
random IO too. This makes the algorithm generic for raid with SSD.
Signed-off-by: Shaohua Li <shli@fusionio.com>
Signed-off-by: NeilBrown <neilb@suse.de>
2012-07-31 00:03:53 +00:00
|
|
|
has_nonrot_disk = 0;
|
md/raid1: prevent merging too large request
For SSD, if request size exceeds specific value (optimal io size), request size
isn't important for bandwidth. In such condition, if making request size bigger
will cause some disks idle, the total throughput will actually drop. A good
example is doing a readahead in a two-disk raid1 setup.
So when should we split big requests? We absolutly don't want to split big
request to very small requests. Even in SSD, big request transfer is more
efficient. This patch only considers request with size above optimal io size.
If all disks are busy, is it worth doing a split? Say optimal io size is 16k,
two requests 32k and two disks. We can let each disk run one 32k request, or
split the requests to 4 16k requests and each disk runs two. It's hard to say
which case is better, depending on hardware.
So only consider case where there are idle disks. For readahead, split is
always better in this case. And in my test, below patch can improve > 30%
thoughput. Hmm, not 100%, because disk isn't 100% busy.
Such case can happen not just in readahead, for example, in directio. But I
suppose directio usually will have bigger IO depth and make all disks busy, so
I ignored it.
Note: if the raid uses any hard disk, we don't prevent merging. That will make
performace worse.
Signed-off-by: Shaohua Li <shli@fusionio.com>
Signed-off-by: NeilBrown <neilb@suse.de>
2012-07-31 00:03:53 +00:00
|
|
|
choose_next_idle = 0;
|
2016-11-18 05:16:12 +00:00
|
|
|
clear_bit(R1BIO_FailFast, &r1_bio->state);
|
2011-07-28 01:31:48 +00:00
|
|
|
|
2014-08-12 15:13:19 +00:00
|
|
|
if ((conf->mddev->recovery_cp < this_sector + sectors) ||
|
|
|
|
(mddev_is_clustered(conf->mddev) &&
|
2015-06-24 14:30:32 +00:00
|
|
|
md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
|
2014-08-12 15:13:19 +00:00
|
|
|
this_sector + sectors)))
|
|
|
|
choose_first = 1;
|
|
|
|
else
|
|
|
|
choose_first = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2012-07-31 00:03:53 +00:00
|
|
|
for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
|
2011-05-11 04:34:56 +00:00
|
|
|
sector_t dist;
|
2011-07-28 01:31:48 +00:00
|
|
|
sector_t first_bad;
|
|
|
|
int bad_sectors;
|
md/raid1: read balance chooses idlest disk for SSD
SSD hasn't spindle, distance between requests means nothing. And the original
distance based algorithm sometimes can cause severe performance issue for SSD
raid.
Considering two thread groups, one accesses file A, the other access file B.
The first group will access one disk and the second will access the other disk,
because requests are near from one group and far between groups. In this case,
read balance might keep one disk very busy but the other relative idle. For
SSD, we should try best to distribute requests to as many disks as possible.
There isn't spindle move penality anyway.
With below patch, I can see more than 50% throughput improvement sometimes
depending on workloads.
The only exception is small requests can be merged to a big request which
typically can drive higher throughput for SSD too. Such small requests are
sequential reads. Unlike hard disk, sequential read which can't be merged (for
example direct IO, or read without readahead) can be ignored for SSD. Again
there is no spindle move penality. readahead dispatches small requests and such
requests can be merged.
Last patch can help detect sequential read well, at least if concurrent read
number isn't greater than raid disk number. In that case, distance based
algorithm doesn't work well too.
V2: For hard disk and SSD mixed raid, doesn't use distance based algorithm for
random IO too. This makes the algorithm generic for raid with SSD.
Signed-off-by: Shaohua Li <shli@fusionio.com>
Signed-off-by: NeilBrown <neilb@suse.de>
2012-07-31 00:03:53 +00:00
|
|
|
unsigned int pending;
|
md/raid1: prevent merging too large request
For SSD, if request size exceeds specific value (optimal io size), request size
isn't important for bandwidth. In such condition, if making request size bigger
will cause some disks idle, the total throughput will actually drop. A good
example is doing a readahead in a two-disk raid1 setup.
So when should we split big requests? We absolutly don't want to split big
request to very small requests. Even in SSD, big request transfer is more
efficient. This patch only considers request with size above optimal io size.
If all disks are busy, is it worth doing a split? Say optimal io size is 16k,
two requests 32k and two disks. We can let each disk run one 32k request, or
split the requests to 4 16k requests and each disk runs two. It's hard to say
which case is better, depending on hardware.
So only consider case where there are idle disks. For readahead, split is
always better in this case. And in my test, below patch can improve > 30%
thoughput. Hmm, not 100%, because disk isn't 100% busy.
Such case can happen not just in readahead, for example, in directio. But I
suppose directio usually will have bigger IO depth and make all disks busy, so
I ignored it.
Note: if the raid uses any hard disk, we don't prevent merging. That will make
performace worse.
Signed-off-by: Shaohua Li <shli@fusionio.com>
Signed-off-by: NeilBrown <neilb@suse.de>
2012-07-31 00:03:53 +00:00
|
|
|
bool nonrot;
|
2011-07-28 01:31:48 +00:00
|
|
|
|
2010-09-06 04:10:08 +00:00
|
|
|
rdev = rcu_dereference(conf->mirrors[disk].rdev);
|
|
|
|
if (r1_bio->bios[disk] == IO_BLOCKED
|
|
|
|
|| rdev == NULL
|
2011-05-11 04:34:56 +00:00
|
|
|
|| test_bit(Faulty, &rdev->flags))
|
2010-09-06 04:10:08 +00:00
|
|
|
continue;
|
2011-05-11 04:34:56 +00:00
|
|
|
if (!test_bit(In_sync, &rdev->flags) &&
|
|
|
|
rdev->recovery_offset < this_sector + sectors)
|
2005-04-16 22:20:36 +00:00
|
|
|
continue;
|
2011-05-11 04:34:56 +00:00
|
|
|
if (test_bit(WriteMostly, &rdev->flags)) {
|
|
|
|
/* Don't balance among write-mostly, just
|
|
|
|
* use the first as a last resort */
|
2015-02-23 00:00:38 +00:00
|
|
|
if (best_dist_disk < 0) {
|
2012-01-08 14:41:51 +00:00
|
|
|
if (is_badblock(rdev, this_sector, sectors,
|
|
|
|
&first_bad, &bad_sectors)) {
|
2016-03-21 11:18:32 +00:00
|
|
|
if (first_bad <= this_sector)
|
2012-01-08 14:41:51 +00:00
|
|
|
/* Cannot use this */
|
|
|
|
continue;
|
|
|
|
best_good_sectors = first_bad - this_sector;
|
|
|
|
} else
|
|
|
|
best_good_sectors = sectors;
|
2015-02-23 00:00:38 +00:00
|
|
|
best_dist_disk = disk;
|
|
|
|
best_pending_disk = disk;
|
2012-01-08 14:41:51 +00:00
|
|
|
}
|
2011-05-11 04:34:56 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/* This is a reasonable device to use. It might
|
|
|
|
* even be best.
|
|
|
|
*/
|
2011-07-28 01:31:48 +00:00
|
|
|
if (is_badblock(rdev, this_sector, sectors,
|
|
|
|
&first_bad, &bad_sectors)) {
|
|
|
|
if (best_dist < MaxSector)
|
|
|
|
/* already have a better device */
|
|
|
|
continue;
|
|
|
|
if (first_bad <= this_sector) {
|
|
|
|
/* cannot read here. If this is the 'primary'
|
|
|
|
* device, then we must not read beyond
|
|
|
|
* bad_sectors from another device..
|
|
|
|
*/
|
|
|
|
bad_sectors -= (this_sector - first_bad);
|
|
|
|
if (choose_first && sectors > bad_sectors)
|
|
|
|
sectors = bad_sectors;
|
|
|
|
if (best_good_sectors > sectors)
|
|
|
|
best_good_sectors = sectors;
|
|
|
|
|
|
|
|
} else {
|
|
|
|
sector_t good_sectors = first_bad - this_sector;
|
|
|
|
if (good_sectors > best_good_sectors) {
|
|
|
|
best_good_sectors = good_sectors;
|
|
|
|
best_disk = disk;
|
|
|
|
}
|
|
|
|
if (choose_first)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
continue;
|
2017-05-12 12:26:10 +00:00
|
|
|
} else {
|
|
|
|
if ((sectors > best_good_sectors) && (best_disk >= 0))
|
|
|
|
best_disk = -1;
|
2011-07-28 01:31:48 +00:00
|
|
|
best_good_sectors = sectors;
|
2017-05-12 12:26:10 +00:00
|
|
|
}
|
2011-07-28 01:31:48 +00:00
|
|
|
|
2016-11-18 05:16:12 +00:00
|
|
|
if (best_disk >= 0)
|
|
|
|
/* At least two disks to choose from so failfast is OK */
|
|
|
|
set_bit(R1BIO_FailFast, &r1_bio->state);
|
|
|
|
|
md/raid1: prevent merging too large request
For SSD, if request size exceeds specific value (optimal io size), request size
isn't important for bandwidth. In such condition, if making request size bigger
will cause some disks idle, the total throughput will actually drop. A good
example is doing a readahead in a two-disk raid1 setup.
So when should we split big requests? We absolutly don't want to split big
request to very small requests. Even in SSD, big request transfer is more
efficient. This patch only considers request with size above optimal io size.
If all disks are busy, is it worth doing a split? Say optimal io size is 16k,
two requests 32k and two disks. We can let each disk run one 32k request, or
split the requests to 4 16k requests and each disk runs two. It's hard to say
which case is better, depending on hardware.
So only consider case where there are idle disks. For readahead, split is
always better in this case. And in my test, below patch can improve > 30%
thoughput. Hmm, not 100%, because disk isn't 100% busy.
Such case can happen not just in readahead, for example, in directio. But I
suppose directio usually will have bigger IO depth and make all disks busy, so
I ignored it.
Note: if the raid uses any hard disk, we don't prevent merging. That will make
performace worse.
Signed-off-by: Shaohua Li <shli@fusionio.com>
Signed-off-by: NeilBrown <neilb@suse.de>
2012-07-31 00:03:53 +00:00
|
|
|
nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
|
|
|
|
has_nonrot_disk |= nonrot;
|
md/raid1: read balance chooses idlest disk for SSD
SSD hasn't spindle, distance between requests means nothing. And the original
distance based algorithm sometimes can cause severe performance issue for SSD
raid.
Considering two thread groups, one accesses file A, the other access file B.
The first group will access one disk and the second will access the other disk,
because requests are near from one group and far between groups. In this case,
read balance might keep one disk very busy but the other relative idle. For
SSD, we should try best to distribute requests to as many disks as possible.
There isn't spindle move penality anyway.
With below patch, I can see more than 50% throughput improvement sometimes
depending on workloads.
The only exception is small requests can be merged to a big request which
typically can drive higher throughput for SSD too. Such small requests are
sequential reads. Unlike hard disk, sequential read which can't be merged (for
example direct IO, or read without readahead) can be ignored for SSD. Again
there is no spindle move penality. readahead dispatches small requests and such
requests can be merged.
Last patch can help detect sequential read well, at least if concurrent read
number isn't greater than raid disk number. In that case, distance based
algorithm doesn't work well too.
V2: For hard disk and SSD mixed raid, doesn't use distance based algorithm for
random IO too. This makes the algorithm generic for raid with SSD.
Signed-off-by: Shaohua Li <shli@fusionio.com>
Signed-off-by: NeilBrown <neilb@suse.de>
2012-07-31 00:03:53 +00:00
|
|
|
pending = atomic_read(&rdev->nr_pending);
|
2011-05-11 04:34:56 +00:00
|
|
|
dist = abs(this_sector - conf->mirrors[disk].head_position);
|
md/raid1: prevent merging too large request
For SSD, if request size exceeds specific value (optimal io size), request size
isn't important for bandwidth. In such condition, if making request size bigger
will cause some disks idle, the total throughput will actually drop. A good
example is doing a readahead in a two-disk raid1 setup.
So when should we split big requests? We absolutly don't want to split big
request to very small requests. Even in SSD, big request transfer is more
efficient. This patch only considers request with size above optimal io size.
If all disks are busy, is it worth doing a split? Say optimal io size is 16k,
two requests 32k and two disks. We can let each disk run one 32k request, or
split the requests to 4 16k requests and each disk runs two. It's hard to say
which case is better, depending on hardware.
So only consider case where there are idle disks. For readahead, split is
always better in this case. And in my test, below patch can improve > 30%
thoughput. Hmm, not 100%, because disk isn't 100% busy.
Such case can happen not just in readahead, for example, in directio. But I
suppose directio usually will have bigger IO depth and make all disks busy, so
I ignored it.
Note: if the raid uses any hard disk, we don't prevent merging. That will make
performace worse.
Signed-off-by: Shaohua Li <shli@fusionio.com>
Signed-off-by: NeilBrown <neilb@suse.de>
2012-07-31 00:03:53 +00:00
|
|
|
if (choose_first) {
|
2011-05-11 04:34:56 +00:00
|
|
|
best_disk = disk;
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
}
|
md/raid1: prevent merging too large request
For SSD, if request size exceeds specific value (optimal io size), request size
isn't important for bandwidth. In such condition, if making request size bigger
will cause some disks idle, the total throughput will actually drop. A good
example is doing a readahead in a two-disk raid1 setup.
So when should we split big requests? We absolutly don't want to split big
request to very small requests. Even in SSD, big request transfer is more
efficient. This patch only considers request with size above optimal io size.
If all disks are busy, is it worth doing a split? Say optimal io size is 16k,
two requests 32k and two disks. We can let each disk run one 32k request, or
split the requests to 4 16k requests and each disk runs two. It's hard to say
which case is better, depending on hardware.
So only consider case where there are idle disks. For readahead, split is
always better in this case. And in my test, below patch can improve > 30%
thoughput. Hmm, not 100%, because disk isn't 100% busy.
Such case can happen not just in readahead, for example, in directio. But I
suppose directio usually will have bigger IO depth and make all disks busy, so
I ignored it.
Note: if the raid uses any hard disk, we don't prevent merging. That will make
performace worse.
Signed-off-by: Shaohua Li <shli@fusionio.com>
Signed-off-by: NeilBrown <neilb@suse.de>
2012-07-31 00:03:53 +00:00
|
|
|
/* Don't change to another disk for sequential reads */
|
|
|
|
if (conf->mirrors[disk].next_seq_sect == this_sector
|
|
|
|
|| dist == 0) {
|
|
|
|
int opt_iosize = bdev_io_opt(rdev->bdev) >> 9;
|
|
|
|
struct raid1_info *mirror = &conf->mirrors[disk];
|
|
|
|
|
|
|
|
best_disk = disk;
|
|
|
|
/*
|
|
|
|
* If buffered sequential IO size exceeds optimal
|
|
|
|
* iosize, check if there is idle disk. If yes, choose
|
|
|
|
* the idle disk. read_balance could already choose an
|
|
|
|
* idle disk before noticing it's a sequential IO in
|
|
|
|
* this disk. This doesn't matter because this disk
|
|
|
|
* will idle, next time it will be utilized after the
|
|
|
|
* first disk has IO size exceeds optimal iosize. In
|
|
|
|
* this way, iosize of the first disk will be optimal
|
|
|
|
* iosize at least. iosize of the second disk might be
|
|
|
|
* small, but not a big deal since when the second disk
|
|
|
|
* starts IO, the first disk is likely still busy.
|
|
|
|
*/
|
|
|
|
if (nonrot && opt_iosize > 0 &&
|
|
|
|
mirror->seq_start != MaxSector &&
|
|
|
|
mirror->next_seq_sect > opt_iosize &&
|
|
|
|
mirror->next_seq_sect - opt_iosize >=
|
|
|
|
mirror->seq_start) {
|
|
|
|
choose_next_idle = 1;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (choose_next_idle)
|
|
|
|
continue;
|
md/raid1: read balance chooses idlest disk for SSD
SSD hasn't spindle, distance between requests means nothing. And the original
distance based algorithm sometimes can cause severe performance issue for SSD
raid.
Considering two thread groups, one accesses file A, the other access file B.
The first group will access one disk and the second will access the other disk,
because requests are near from one group and far between groups. In this case,
read balance might keep one disk very busy but the other relative idle. For
SSD, we should try best to distribute requests to as many disks as possible.
There isn't spindle move penality anyway.
With below patch, I can see more than 50% throughput improvement sometimes
depending on workloads.
The only exception is small requests can be merged to a big request which
typically can drive higher throughput for SSD too. Such small requests are
sequential reads. Unlike hard disk, sequential read which can't be merged (for
example direct IO, or read without readahead) can be ignored for SSD. Again
there is no spindle move penality. readahead dispatches small requests and such
requests can be merged.
Last patch can help detect sequential read well, at least if concurrent read
number isn't greater than raid disk number. In that case, distance based
algorithm doesn't work well too.
V2: For hard disk and SSD mixed raid, doesn't use distance based algorithm for
random IO too. This makes the algorithm generic for raid with SSD.
Signed-off-by: Shaohua Li <shli@fusionio.com>
Signed-off-by: NeilBrown <neilb@suse.de>
2012-07-31 00:03:53 +00:00
|
|
|
|
|
|
|
if (min_pending > pending) {
|
|
|
|
min_pending = pending;
|
|
|
|
best_pending_disk = disk;
|
|
|
|
}
|
|
|
|
|
2011-05-11 04:34:56 +00:00
|
|
|
if (dist < best_dist) {
|
|
|
|
best_dist = dist;
|
md/raid1: read balance chooses idlest disk for SSD
SSD hasn't spindle, distance between requests means nothing. And the original
distance based algorithm sometimes can cause severe performance issue for SSD
raid.
Considering two thread groups, one accesses file A, the other access file B.
The first group will access one disk and the second will access the other disk,
because requests are near from one group and far between groups. In this case,
read balance might keep one disk very busy but the other relative idle. For
SSD, we should try best to distribute requests to as many disks as possible.
There isn't spindle move penality anyway.
With below patch, I can see more than 50% throughput improvement sometimes
depending on workloads.
The only exception is small requests can be merged to a big request which
typically can drive higher throughput for SSD too. Such small requests are
sequential reads. Unlike hard disk, sequential read which can't be merged (for
example direct IO, or read without readahead) can be ignored for SSD. Again
there is no spindle move penality. readahead dispatches small requests and such
requests can be merged.
Last patch can help detect sequential read well, at least if concurrent read
number isn't greater than raid disk number. In that case, distance based
algorithm doesn't work well too.
V2: For hard disk and SSD mixed raid, doesn't use distance based algorithm for
random IO too. This makes the algorithm generic for raid with SSD.
Signed-off-by: Shaohua Li <shli@fusionio.com>
Signed-off-by: NeilBrown <neilb@suse.de>
2012-07-31 00:03:53 +00:00
|
|
|
best_dist_disk = disk;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2010-09-06 04:10:08 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
md/raid1: read balance chooses idlest disk for SSD
SSD hasn't spindle, distance between requests means nothing. And the original
distance based algorithm sometimes can cause severe performance issue for SSD
raid.
Considering two thread groups, one accesses file A, the other access file B.
The first group will access one disk and the second will access the other disk,
because requests are near from one group and far between groups. In this case,
read balance might keep one disk very busy but the other relative idle. For
SSD, we should try best to distribute requests to as many disks as possible.
There isn't spindle move penality anyway.
With below patch, I can see more than 50% throughput improvement sometimes
depending on workloads.
The only exception is small requests can be merged to a big request which
typically can drive higher throughput for SSD too. Such small requests are
sequential reads. Unlike hard disk, sequential read which can't be merged (for
example direct IO, or read without readahead) can be ignored for SSD. Again
there is no spindle move penality. readahead dispatches small requests and such
requests can be merged.
Last patch can help detect sequential read well, at least if concurrent read
number isn't greater than raid disk number. In that case, distance based
algorithm doesn't work well too.
V2: For hard disk and SSD mixed raid, doesn't use distance based algorithm for
random IO too. This makes the algorithm generic for raid with SSD.
Signed-off-by: Shaohua Li <shli@fusionio.com>
Signed-off-by: NeilBrown <neilb@suse.de>
2012-07-31 00:03:53 +00:00
|
|
|
/*
|
|
|
|
* If all disks are rotational, choose the closest disk. If any disk is
|
|
|
|
* non-rotational, choose the disk with less pending request even the
|
|
|
|
* disk is rotational, which might/might not be optimal for raids with
|
|
|
|
* mixed ratation/non-rotational disks depending on workload.
|
|
|
|
*/
|
|
|
|
if (best_disk == -1) {
|
2016-11-18 05:16:12 +00:00
|
|
|
if (has_nonrot_disk || min_pending == 0)
|
md/raid1: read balance chooses idlest disk for SSD
SSD hasn't spindle, distance between requests means nothing. And the original
distance based algorithm sometimes can cause severe performance issue for SSD
raid.
Considering two thread groups, one accesses file A, the other access file B.
The first group will access one disk and the second will access the other disk,
because requests are near from one group and far between groups. In this case,
read balance might keep one disk very busy but the other relative idle. For
SSD, we should try best to distribute requests to as many disks as possible.
There isn't spindle move penality anyway.
With below patch, I can see more than 50% throughput improvement sometimes
depending on workloads.
The only exception is small requests can be merged to a big request which
typically can drive higher throughput for SSD too. Such small requests are
sequential reads. Unlike hard disk, sequential read which can't be merged (for
example direct IO, or read without readahead) can be ignored for SSD. Again
there is no spindle move penality. readahead dispatches small requests and such
requests can be merged.
Last patch can help detect sequential read well, at least if concurrent read
number isn't greater than raid disk number. In that case, distance based
algorithm doesn't work well too.
V2: For hard disk and SSD mixed raid, doesn't use distance based algorithm for
random IO too. This makes the algorithm generic for raid with SSD.
Signed-off-by: Shaohua Li <shli@fusionio.com>
Signed-off-by: NeilBrown <neilb@suse.de>
2012-07-31 00:03:53 +00:00
|
|
|
best_disk = best_pending_disk;
|
|
|
|
else
|
|
|
|
best_disk = best_dist_disk;
|
|
|
|
}
|
|
|
|
|
2011-05-11 04:34:56 +00:00
|
|
|
if (best_disk >= 0) {
|
|
|
|
rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
|
2005-09-09 23:23:45 +00:00
|
|
|
if (!rdev)
|
|
|
|
goto retry;
|
|
|
|
atomic_inc(&rdev->nr_pending);
|
2011-07-28 01:31:48 +00:00
|
|
|
sectors = best_good_sectors;
|
md/raid1: prevent merging too large request
For SSD, if request size exceeds specific value (optimal io size), request size
isn't important for bandwidth. In such condition, if making request size bigger
will cause some disks idle, the total throughput will actually drop. A good
example is doing a readahead in a two-disk raid1 setup.
So when should we split big requests? We absolutly don't want to split big
request to very small requests. Even in SSD, big request transfer is more
efficient. This patch only considers request with size above optimal io size.
If all disks are busy, is it worth doing a split? Say optimal io size is 16k,
two requests 32k and two disks. We can let each disk run one 32k request, or
split the requests to 4 16k requests and each disk runs two. It's hard to say
which case is better, depending on hardware.
So only consider case where there are idle disks. For readahead, split is
always better in this case. And in my test, below patch can improve > 30%
thoughput. Hmm, not 100%, because disk isn't 100% busy.
Such case can happen not just in readahead, for example, in directio. But I
suppose directio usually will have bigger IO depth and make all disks busy, so
I ignored it.
Note: if the raid uses any hard disk, we don't prevent merging. That will make
performace worse.
Signed-off-by: Shaohua Li <shli@fusionio.com>
Signed-off-by: NeilBrown <neilb@suse.de>
2012-07-31 00:03:53 +00:00
|
|
|
|
|
|
|
if (conf->mirrors[best_disk].next_seq_sect != this_sector)
|
|
|
|
conf->mirrors[best_disk].seq_start = this_sector;
|
|
|
|
|
2012-07-31 00:03:53 +00:00
|
|
|
conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
2011-07-28 01:31:48 +00:00
|
|
|
*max_sectors = sectors;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-05-11 04:34:56 +00:00
|
|
|
return best_disk;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2017-04-05 04:05:51 +00:00
|
|
|
static void flush_bio_list(struct r1conf *conf, struct bio *bio)
|
|
|
|
{
|
|
|
|
/* flush any pending bitmap writes to disk before proceeding w/ I/O */
|
2018-08-01 22:20:50 +00:00
|
|
|
md_bitmap_unplug(conf->mddev->bitmap);
|
2017-04-05 04:05:51 +00:00
|
|
|
wake_up(&conf->wait_barrier);
|
|
|
|
|
|
|
|
while (bio) { /* submit pending writes */
|
|
|
|
struct bio *next = bio->bi_next;
|
2017-08-23 17:10:32 +00:00
|
|
|
struct md_rdev *rdev = (void *)bio->bi_disk;
|
2017-04-05 04:05:51 +00:00
|
|
|
bio->bi_next = NULL;
|
2017-08-23 17:10:32 +00:00
|
|
|
bio_set_dev(bio, rdev->bdev);
|
2017-04-05 04:05:51 +00:00
|
|
|
if (test_bit(Faulty, &rdev->flags)) {
|
2017-07-21 08:33:44 +00:00
|
|
|
bio_io_error(bio);
|
2017-04-05 04:05:51 +00:00
|
|
|
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
|
2017-08-23 17:10:32 +00:00
|
|
|
!blk_queue_discard(bio->bi_disk->queue)))
|
2017-04-05 04:05:51 +00:00
|
|
|
/* Just ignore it */
|
|
|
|
bio_endio(bio);
|
|
|
|
else
|
2020-07-01 08:59:44 +00:00
|
|
|
submit_bio_noacct(bio);
|
2017-04-05 04:05:51 +00:00
|
|
|
bio = next;
|
2019-10-25 07:08:56 +00:00
|
|
|
cond_resched();
|
2017-04-05 04:05:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-10-11 05:49:05 +00:00
|
|
|
static void flush_pending_writes(struct r1conf *conf)
|
2008-03-04 22:29:29 +00:00
|
|
|
{
|
|
|
|
/* Any writes that have been queued but are awaiting
|
|
|
|
* bitmap updates get flushed here.
|
|
|
|
*/
|
|
|
|
spin_lock_irq(&conf->device_lock);
|
|
|
|
|
|
|
|
if (conf->pending_bio_list.head) {
|
2017-12-01 20:12:34 +00:00
|
|
|
struct blk_plug plug;
|
2008-03-04 22:29:29 +00:00
|
|
|
struct bio *bio;
|
2017-12-01 20:12:34 +00:00
|
|
|
|
2008-03-04 22:29:29 +00:00
|
|
|
bio = bio_list_get(&conf->pending_bio_list);
|
2011-10-11 05:50:01 +00:00
|
|
|
conf->pending_count = 0;
|
2008-03-04 22:29:29 +00:00
|
|
|
spin_unlock_irq(&conf->device_lock);
|
2017-12-03 21:21:04 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* As this is called in a wait_event() loop (see freeze_array),
|
|
|
|
* current->state might be TASK_UNINTERRUPTIBLE which will
|
|
|
|
* cause a warning when we prepare to wait again. As it is
|
|
|
|
* rare that this path is taken, it is perfectly safe to force
|
|
|
|
* us to go around the wait_event() loop again, so the warning
|
|
|
|
* is a false-positive. Silence the warning by resetting
|
|
|
|
* thread state
|
|
|
|
*/
|
|
|
|
__set_current_state(TASK_RUNNING);
|
2017-12-01 20:12:34 +00:00
|
|
|
blk_start_plug(&plug);
|
2017-04-05 04:05:51 +00:00
|
|
|
flush_bio_list(conf, bio);
|
2017-12-01 20:12:34 +00:00
|
|
|
blk_finish_plug(&plug);
|
2008-03-04 22:29:29 +00:00
|
|
|
} else
|
|
|
|
spin_unlock_irq(&conf->device_lock);
|
2011-03-10 07:52:07 +00:00
|
|
|
}
|
|
|
|
|
2006-01-06 08:20:12 +00:00
|
|
|
/* Barriers....
|
|
|
|
* Sometimes we need to suspend IO while we do something else,
|
|
|
|
* either some resync/recovery, or reconfigure the array.
|
|
|
|
* To do this we raise a 'barrier'.
|
|
|
|
* The 'barrier' is a counter that can be raised multiple times
|
|
|
|
* to count how many activities are happening which preclude
|
|
|
|
* normal IO.
|
|
|
|
* We can only raise the barrier if there is no pending IO.
|
|
|
|
* i.e. if nr_pending == 0.
|
|
|
|
* We choose only to raise the barrier if no-one is waiting for the
|
|
|
|
* barrier to go down. This means that as soon as an IO request
|
|
|
|
* is ready, no other operations which require a barrier will start
|
|
|
|
* until the IO request has had a chance.
|
|
|
|
*
|
|
|
|
* So: regular IO calls 'wait_barrier'. When that returns there
|
|
|
|
* is no backgroup IO happening, It must arrange to call
|
|
|
|
* allow_barrier when it has finished its IO.
|
|
|
|
* backgroup IO calls must call raise_barrier. Once that returns
|
|
|
|
* there is no normal IO happeing. It must arrange to call
|
|
|
|
* lower_barrier when the particular background IO completes.
|
2019-07-02 14:35:48 +00:00
|
|
|
*
|
|
|
|
* If resync/recovery is interrupted, returns -EINTR;
|
|
|
|
* Otherwise, returns 0.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2019-07-02 14:35:48 +00:00
|
|
|
static int raise_barrier(struct r1conf *conf, sector_t sector_nr)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
int idx = sector_to_idx(sector_nr);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
spin_lock_irq(&conf->resync_lock);
|
2006-01-06 08:20:12 +00:00
|
|
|
|
|
|
|
/* Wait until no block IO is waiting */
|
RAID1: avoid unnecessary spin locks in I/O barrier code
When I run a parallel reading performan testing on a md raid1 device with
two NVMe SSDs, I observe very bad throughput in supprise: by fio with 64KB
block size, 40 seq read I/O jobs, 128 iodepth, overall throughput is
only 2.7GB/s, this is around 50% of the idea performance number.
The perf reports locking contention happens at allow_barrier() and
wait_barrier() code,
- 41.41% fio [kernel.kallsyms] [k] _raw_spin_lock_irqsave
- _raw_spin_lock_irqsave
+ 89.92% allow_barrier
+ 9.34% __wake_up
- 37.30% fio [kernel.kallsyms] [k] _raw_spin_lock_irq
- _raw_spin_lock_irq
- 100.00% wait_barrier
The reason is, in these I/O barrier related functions,
- raise_barrier()
- lower_barrier()
- wait_barrier()
- allow_barrier()
They always hold conf->resync_lock firstly, even there are only regular
reading I/Os and no resync I/O at all. This is a huge performance penalty.
The solution is a lockless-like algorithm in I/O barrier code, and only
holding conf->resync_lock when it has to.
The original idea is from Hannes Reinecke, and Neil Brown provides
comments to improve it. I continue to work on it, and make the patch into
current form.
In the new simpler raid1 I/O barrier implementation, there are two
wait barrier functions,
- wait_barrier()
Which calls _wait_barrier(), is used for regular write I/O. If there is
resync I/O happening on the same I/O barrier bucket, or the whole
array is frozen, task will wait until no barrier on same barrier bucket,
or the whold array is unfreezed.
- wait_read_barrier()
Since regular read I/O won't interfere with resync I/O (read_balance()
will make sure only uptodate data will be read out), it is unnecessary
to wait for barrier in regular read I/Os, waiting in only necessary
when the whole array is frozen.
The operations on conf->nr_pending[idx], conf->nr_waiting[idx], conf->
barrier[idx] are very carefully designed in raise_barrier(),
lower_barrier(), _wait_barrier() and wait_read_barrier(), in order to
avoid unnecessary spin locks in these functions. Once conf->
nr_pengding[idx] is increased, a resync I/O with same barrier bucket index
has to wait in raise_barrier(). Then in _wait_barrier() if no barrier
raised in same barrier bucket index and array is not frozen, the regular
I/O doesn't need to hold conf->resync_lock, it can just increase
conf->nr_pending[idx], and return to its caller. wait_read_barrier() is
very similar to _wait_barrier(), the only difference is it only waits when
array is frozen. For heavy parallel reading I/Os, the lockless I/O barrier
code almostly gets rid of all spin lock cost.
This patch significantly improves raid1 reading peroformance. From my
testing, a raid1 device built by two NVMe SSD, runs fio with 64KB
blocksize, 40 seq read I/O jobs, 128 iodepth, overall throughput
increases from 2.7GB/s to 4.6GB/s (+70%).
Changelog
V4:
- Change conf->nr_queued[] to atomic_t.
- Define BARRIER_BUCKETS_NR_BITS by (PAGE_SHIFT - ilog2(sizeof(atomic_t)))
V3:
- Add smp_mb__after_atomic() as Shaohua and Neil suggested.
- Change conf->nr_queued[] from atomic_t to int.
- Change conf->array_frozen from atomic_t back to int, and use
READ_ONCE(conf->array_frozen) to check value of conf->array_frozen
in _wait_barrier() and wait_read_barrier().
- In _wait_barrier() and wait_read_barrier(), add a call to
wake_up(&conf->wait_barrier) after atomic_dec(&conf->nr_pending[idx]),
to fix a deadlock between _wait_barrier()/wait_read_barrier and
freeze_array().
V2:
- Remove a spin_lock/unlock pair in raid1d().
- Add more code comments to explain why there is no racy when checking two
atomic_t variables at same time.
V1:
- Original RFC patch for comments.
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Shaohua Li <shli@fb.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:57 +00:00
|
|
|
wait_event_lock_irq(conf->wait_barrier,
|
|
|
|
!atomic_read(&conf->nr_waiting[idx]),
|
2012-11-30 10:42:40 +00:00
|
|
|
conf->resync_lock);
|
2006-01-06 08:20:12 +00:00
|
|
|
|
|
|
|
/* block any new IO from starting */
|
RAID1: avoid unnecessary spin locks in I/O barrier code
When I run a parallel reading performan testing on a md raid1 device with
two NVMe SSDs, I observe very bad throughput in supprise: by fio with 64KB
block size, 40 seq read I/O jobs, 128 iodepth, overall throughput is
only 2.7GB/s, this is around 50% of the idea performance number.
The perf reports locking contention happens at allow_barrier() and
wait_barrier() code,
- 41.41% fio [kernel.kallsyms] [k] _raw_spin_lock_irqsave
- _raw_spin_lock_irqsave
+ 89.92% allow_barrier
+ 9.34% __wake_up
- 37.30% fio [kernel.kallsyms] [k] _raw_spin_lock_irq
- _raw_spin_lock_irq
- 100.00% wait_barrier
The reason is, in these I/O barrier related functions,
- raise_barrier()
- lower_barrier()
- wait_barrier()
- allow_barrier()
They always hold conf->resync_lock firstly, even there are only regular
reading I/Os and no resync I/O at all. This is a huge performance penalty.
The solution is a lockless-like algorithm in I/O barrier code, and only
holding conf->resync_lock when it has to.
The original idea is from Hannes Reinecke, and Neil Brown provides
comments to improve it. I continue to work on it, and make the patch into
current form.
In the new simpler raid1 I/O barrier implementation, there are two
wait barrier functions,
- wait_barrier()
Which calls _wait_barrier(), is used for regular write I/O. If there is
resync I/O happening on the same I/O barrier bucket, or the whole
array is frozen, task will wait until no barrier on same barrier bucket,
or the whold array is unfreezed.
- wait_read_barrier()
Since regular read I/O won't interfere with resync I/O (read_balance()
will make sure only uptodate data will be read out), it is unnecessary
to wait for barrier in regular read I/Os, waiting in only necessary
when the whole array is frozen.
The operations on conf->nr_pending[idx], conf->nr_waiting[idx], conf->
barrier[idx] are very carefully designed in raise_barrier(),
lower_barrier(), _wait_barrier() and wait_read_barrier(), in order to
avoid unnecessary spin locks in these functions. Once conf->
nr_pengding[idx] is increased, a resync I/O with same barrier bucket index
has to wait in raise_barrier(). Then in _wait_barrier() if no barrier
raised in same barrier bucket index and array is not frozen, the regular
I/O doesn't need to hold conf->resync_lock, it can just increase
conf->nr_pending[idx], and return to its caller. wait_read_barrier() is
very similar to _wait_barrier(), the only difference is it only waits when
array is frozen. For heavy parallel reading I/Os, the lockless I/O barrier
code almostly gets rid of all spin lock cost.
This patch significantly improves raid1 reading peroformance. From my
testing, a raid1 device built by two NVMe SSD, runs fio with 64KB
blocksize, 40 seq read I/O jobs, 128 iodepth, overall throughput
increases from 2.7GB/s to 4.6GB/s (+70%).
Changelog
V4:
- Change conf->nr_queued[] to atomic_t.
- Define BARRIER_BUCKETS_NR_BITS by (PAGE_SHIFT - ilog2(sizeof(atomic_t)))
V3:
- Add smp_mb__after_atomic() as Shaohua and Neil suggested.
- Change conf->nr_queued[] from atomic_t to int.
- Change conf->array_frozen from atomic_t back to int, and use
READ_ONCE(conf->array_frozen) to check value of conf->array_frozen
in _wait_barrier() and wait_read_barrier().
- In _wait_barrier() and wait_read_barrier(), add a call to
wake_up(&conf->wait_barrier) after atomic_dec(&conf->nr_pending[idx]),
to fix a deadlock between _wait_barrier()/wait_read_barrier and
freeze_array().
V2:
- Remove a spin_lock/unlock pair in raid1d().
- Add more code comments to explain why there is no racy when checking two
atomic_t variables at same time.
V1:
- Original RFC patch for comments.
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Shaohua Li <shli@fb.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:57 +00:00
|
|
|
atomic_inc(&conf->barrier[idx]);
|
|
|
|
/*
|
|
|
|
* In raise_barrier() we firstly increase conf->barrier[idx] then
|
|
|
|
* check conf->nr_pending[idx]. In _wait_barrier() we firstly
|
|
|
|
* increase conf->nr_pending[idx] then check conf->barrier[idx].
|
|
|
|
* A memory barrier here to make sure conf->nr_pending[idx] won't
|
|
|
|
* be fetched before conf->barrier[idx] is increased. Otherwise
|
|
|
|
* there will be a race between raise_barrier() and _wait_barrier().
|
|
|
|
*/
|
|
|
|
smp_mb__after_atomic();
|
2006-01-06 08:20:12 +00:00
|
|
|
|
raid1: Rewrite the implementation of iobarrier.
There is an iobarrier in raid1 because of contention between normal IO and
resync IO. It suspends all normal IO when resync/recovery happens.
However if normal IO is out side the resync window, there is no contention.
So this patch changes the barrier mechanism to only block IO that
could contend with the resync that is currently happening.
We partition the whole space into five parts.
|---------|-----------|------------|----------------|-------|
start next_resync start_next_window end_window
start + RESYNC_WINDOW = next_resync
next_resync + NEXT_NORMALIO_DISTANCE = start_next_window
start_next_window + NEXT_NORMALIO_DISTANCE = end_window
Firstly we introduce some concepts:
1 - RESYNC_WINDOW: For resync, there are 32 resync requests at most at the
same time. A sync request is RESYNC_BLOCK_SIZE(64*1024).
So the RESYNC_WINDOW is 32 * RESYNC_BLOCK_SIZE, that is 2MB.
2 - NEXT_NORMALIO_DISTANCE: the distance between next_resync
and start_next_window. It also indicates the distance between
start_next_window and end_window.
It is currently 3 * RESYNC_WINDOW_SIZE but could be tuned if
this turned out not to be optimal.
3 - next_resync: the next sector at which we will do sync IO.
4 - start: a position which is at most RESYNC_WINDOW before
next_resync.
5 - start_next_window: a position which is NEXT_NORMALIO_DISTANCE
beyond next_resync. Normal-io after this position doesn't need to
wait for resync-io to complete.
6 - end_window: a position which is 2 * NEXT_NORMALIO_DISTANCE beyond
next_resync. This also doesn't need to wait, but is counted
differently.
7 - current_window_requests: the count of normalIO between
start_next_window and end_window.
8 - next_window_requests: the count of normalIO after end_window.
NormalIO will be partitioned into four types:
NormIO1: the end sector of bio is smaller or equal the start
NormIO2: the start sector of bio larger or equal to end_window
NormIO3: the start sector of bio larger or equal to
start_next_window.
NormIO4: the location between start_next_window and end_window
|--------|-----------|--------------------|----------------|-------------|
| start | next_resync | start_next_window | end_window |
NormIO1 NormIO4 NormIO4 NormIO3 NormIO2
For NormIO1, we don't need any io barrier.
For NormIO4, we used a similar approach to the original iobarrier
mechanism. The normalIO and resyncIO must be kept separate.
For NormIO2/3, we add two fields to struct r1conf: "current_window_requests"
and "next_window_requests". They indicate the count of active
requests in the two window.
For these, we don't wait for resync io to complete.
For resync action, if there are NormIO4s, we must wait for it.
If not, we can proceed.
But if resync action reaches start_next_window and
current_window_requests > 0 (that is there are NormIO3s), we must
wait until the current_window_requests becomes zero.
When current_window_requests becomes zero, start_next_window also
moves forward. Then current_window_requests will replaced by
next_window_requests.
There is a problem which when and how to change from NormIO2 to
NormIO3. Only then can sync action progress.
We add a field in struct r1conf "start_next_window".
A: if start_next_window == MaxSector, it means there are no NormIO2/3.
So start_next_window = next_resync + NEXT_NORMALIO_DISTANCE
B: if current_window_requests == 0 && next_window_requests != 0, it
means start_next_window move to end_window
There is another problem which how to differentiate between
old NormIO2(now it is NormIO3) and NormIO2.
For example, there are many bios which are NormIO2 and a bio which is
NormIO3. NormIO3 firstly completed, so the bios of NormIO2 became NormIO3.
We add a field in struct r1bio "start_next_window".
This is used to record the position conf->start_next_window when the call
to wait_barrier() is made in make_request().
In allow_barrier(), we check the conf->start_next_window.
If r1bio->stat_next_window == conf->start_next_window, it means
there is no transition between NormIO2 and NormIO3.
If r1bio->start_next_window != conf->start_next_window, it mean
there was a transition between NormIO2 and NormIO3. There can only
have been one transition. So it only means the bio is old NormIO2.
For one bio, there may be many r1bio's. So we make sure
all the r1bio->start_next_window are the same value.
If we met blocked_dev in make_request(), it must call allow_barrier
and wait_barrier. So the former and the later value of
conf->start_next_window will be change.
If there are many r1bio's with differnet start_next_window,
for the relevant bio, it depend on the last value of r1bio.
It will cause error. To avoid this, we must wait for previous r1bios
to complete.
Signed-off-by: Jianpeng Ma <majianpeng@gmail.com>
Signed-off-by: NeilBrown <neilb@suse.de>
2013-11-15 06:55:02 +00:00
|
|
|
/* For these conditions we must wait:
|
|
|
|
* A: while the array is in frozen state
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
* B: while conf->nr_pending[idx] is not 0, meaning regular I/O
|
|
|
|
* existing in corresponding I/O barrier bucket.
|
|
|
|
* C: while conf->barrier[idx] >= RESYNC_DEPTH, meaning reaches
|
|
|
|
* max resync count which allowed on current I/O barrier bucket.
|
raid1: Rewrite the implementation of iobarrier.
There is an iobarrier in raid1 because of contention between normal IO and
resync IO. It suspends all normal IO when resync/recovery happens.
However if normal IO is out side the resync window, there is no contention.
So this patch changes the barrier mechanism to only block IO that
could contend with the resync that is currently happening.
We partition the whole space into five parts.
|---------|-----------|------------|----------------|-------|
start next_resync start_next_window end_window
start + RESYNC_WINDOW = next_resync
next_resync + NEXT_NORMALIO_DISTANCE = start_next_window
start_next_window + NEXT_NORMALIO_DISTANCE = end_window
Firstly we introduce some concepts:
1 - RESYNC_WINDOW: For resync, there are 32 resync requests at most at the
same time. A sync request is RESYNC_BLOCK_SIZE(64*1024).
So the RESYNC_WINDOW is 32 * RESYNC_BLOCK_SIZE, that is 2MB.
2 - NEXT_NORMALIO_DISTANCE: the distance between next_resync
and start_next_window. It also indicates the distance between
start_next_window and end_window.
It is currently 3 * RESYNC_WINDOW_SIZE but could be tuned if
this turned out not to be optimal.
3 - next_resync: the next sector at which we will do sync IO.
4 - start: a position which is at most RESYNC_WINDOW before
next_resync.
5 - start_next_window: a position which is NEXT_NORMALIO_DISTANCE
beyond next_resync. Normal-io after this position doesn't need to
wait for resync-io to complete.
6 - end_window: a position which is 2 * NEXT_NORMALIO_DISTANCE beyond
next_resync. This also doesn't need to wait, but is counted
differently.
7 - current_window_requests: the count of normalIO between
start_next_window and end_window.
8 - next_window_requests: the count of normalIO after end_window.
NormalIO will be partitioned into four types:
NormIO1: the end sector of bio is smaller or equal the start
NormIO2: the start sector of bio larger or equal to end_window
NormIO3: the start sector of bio larger or equal to
start_next_window.
NormIO4: the location between start_next_window and end_window
|--------|-----------|--------------------|----------------|-------------|
| start | next_resync | start_next_window | end_window |
NormIO1 NormIO4 NormIO4 NormIO3 NormIO2
For NormIO1, we don't need any io barrier.
For NormIO4, we used a similar approach to the original iobarrier
mechanism. The normalIO and resyncIO must be kept separate.
For NormIO2/3, we add two fields to struct r1conf: "current_window_requests"
and "next_window_requests". They indicate the count of active
requests in the two window.
For these, we don't wait for resync io to complete.
For resync action, if there are NormIO4s, we must wait for it.
If not, we can proceed.
But if resync action reaches start_next_window and
current_window_requests > 0 (that is there are NormIO3s), we must
wait until the current_window_requests becomes zero.
When current_window_requests becomes zero, start_next_window also
moves forward. Then current_window_requests will replaced by
next_window_requests.
There is a problem which when and how to change from NormIO2 to
NormIO3. Only then can sync action progress.
We add a field in struct r1conf "start_next_window".
A: if start_next_window == MaxSector, it means there are no NormIO2/3.
So start_next_window = next_resync + NEXT_NORMALIO_DISTANCE
B: if current_window_requests == 0 && next_window_requests != 0, it
means start_next_window move to end_window
There is another problem which how to differentiate between
old NormIO2(now it is NormIO3) and NormIO2.
For example, there are many bios which are NormIO2 and a bio which is
NormIO3. NormIO3 firstly completed, so the bios of NormIO2 became NormIO3.
We add a field in struct r1bio "start_next_window".
This is used to record the position conf->start_next_window when the call
to wait_barrier() is made in make_request().
In allow_barrier(), we check the conf->start_next_window.
If r1bio->stat_next_window == conf->start_next_window, it means
there is no transition between NormIO2 and NormIO3.
If r1bio->start_next_window != conf->start_next_window, it mean
there was a transition between NormIO2 and NormIO3. There can only
have been one transition. So it only means the bio is old NormIO2.
For one bio, there may be many r1bio's. So we make sure
all the r1bio->start_next_window are the same value.
If we met blocked_dev in make_request(), it must call allow_barrier
and wait_barrier. So the former and the later value of
conf->start_next_window will be change.
If there are many r1bio's with differnet start_next_window,
for the relevant bio, it depend on the last value of r1bio.
It will cause error. To avoid this, we must wait for previous r1bios
to complete.
Signed-off-by: Jianpeng Ma <majianpeng@gmail.com>
Signed-off-by: NeilBrown <neilb@suse.de>
2013-11-15 06:55:02 +00:00
|
|
|
*/
|
2006-01-06 08:20:12 +00:00
|
|
|
wait_event_lock_irq(conf->wait_barrier,
|
2018-04-09 01:50:44 +00:00
|
|
|
(!conf->array_frozen &&
|
RAID1: avoid unnecessary spin locks in I/O barrier code
When I run a parallel reading performan testing on a md raid1 device with
two NVMe SSDs, I observe very bad throughput in supprise: by fio with 64KB
block size, 40 seq read I/O jobs, 128 iodepth, overall throughput is
only 2.7GB/s, this is around 50% of the idea performance number.
The perf reports locking contention happens at allow_barrier() and
wait_barrier() code,
- 41.41% fio [kernel.kallsyms] [k] _raw_spin_lock_irqsave
- _raw_spin_lock_irqsave
+ 89.92% allow_barrier
+ 9.34% __wake_up
- 37.30% fio [kernel.kallsyms] [k] _raw_spin_lock_irq
- _raw_spin_lock_irq
- 100.00% wait_barrier
The reason is, in these I/O barrier related functions,
- raise_barrier()
- lower_barrier()
- wait_barrier()
- allow_barrier()
They always hold conf->resync_lock firstly, even there are only regular
reading I/Os and no resync I/O at all. This is a huge performance penalty.
The solution is a lockless-like algorithm in I/O barrier code, and only
holding conf->resync_lock when it has to.
The original idea is from Hannes Reinecke, and Neil Brown provides
comments to improve it. I continue to work on it, and make the patch into
current form.
In the new simpler raid1 I/O barrier implementation, there are two
wait barrier functions,
- wait_barrier()
Which calls _wait_barrier(), is used for regular write I/O. If there is
resync I/O happening on the same I/O barrier bucket, or the whole
array is frozen, task will wait until no barrier on same barrier bucket,
or the whold array is unfreezed.
- wait_read_barrier()
Since regular read I/O won't interfere with resync I/O (read_balance()
will make sure only uptodate data will be read out), it is unnecessary
to wait for barrier in regular read I/Os, waiting in only necessary
when the whole array is frozen.
The operations on conf->nr_pending[idx], conf->nr_waiting[idx], conf->
barrier[idx] are very carefully designed in raise_barrier(),
lower_barrier(), _wait_barrier() and wait_read_barrier(), in order to
avoid unnecessary spin locks in these functions. Once conf->
nr_pengding[idx] is increased, a resync I/O with same barrier bucket index
has to wait in raise_barrier(). Then in _wait_barrier() if no barrier
raised in same barrier bucket index and array is not frozen, the regular
I/O doesn't need to hold conf->resync_lock, it can just increase
conf->nr_pending[idx], and return to its caller. wait_read_barrier() is
very similar to _wait_barrier(), the only difference is it only waits when
array is frozen. For heavy parallel reading I/Os, the lockless I/O barrier
code almostly gets rid of all spin lock cost.
This patch significantly improves raid1 reading peroformance. From my
testing, a raid1 device built by two NVMe SSD, runs fio with 64KB
blocksize, 40 seq read I/O jobs, 128 iodepth, overall throughput
increases from 2.7GB/s to 4.6GB/s (+70%).
Changelog
V4:
- Change conf->nr_queued[] to atomic_t.
- Define BARRIER_BUCKETS_NR_BITS by (PAGE_SHIFT - ilog2(sizeof(atomic_t)))
V3:
- Add smp_mb__after_atomic() as Shaohua and Neil suggested.
- Change conf->nr_queued[] from atomic_t to int.
- Change conf->array_frozen from atomic_t back to int, and use
READ_ONCE(conf->array_frozen) to check value of conf->array_frozen
in _wait_barrier() and wait_read_barrier().
- In _wait_barrier() and wait_read_barrier(), add a call to
wake_up(&conf->wait_barrier) after atomic_dec(&conf->nr_pending[idx]),
to fix a deadlock between _wait_barrier()/wait_read_barrier and
freeze_array().
V2:
- Remove a spin_lock/unlock pair in raid1d().
- Add more code comments to explain why there is no racy when checking two
atomic_t variables at same time.
V1:
- Original RFC patch for comments.
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Shaohua Li <shli@fb.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:57 +00:00
|
|
|
!atomic_read(&conf->nr_pending[idx]) &&
|
2018-04-09 01:50:44 +00:00
|
|
|
atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) ||
|
|
|
|
test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery),
|
2012-11-30 10:42:40 +00:00
|
|
|
conf->resync_lock);
|
2006-01-06 08:20:12 +00:00
|
|
|
|
2018-04-09 01:50:44 +00:00
|
|
|
if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
|
|
|
|
atomic_dec(&conf->barrier[idx]);
|
|
|
|
spin_unlock_irq(&conf->resync_lock);
|
|
|
|
wake_up(&conf->wait_barrier);
|
|
|
|
return -EINTR;
|
|
|
|
}
|
|
|
|
|
2017-04-27 08:28:49 +00:00
|
|
|
atomic_inc(&conf->nr_sync_pending);
|
2006-01-06 08:20:12 +00:00
|
|
|
spin_unlock_irq(&conf->resync_lock);
|
2018-04-09 01:50:44 +00:00
|
|
|
|
|
|
|
return 0;
|
2006-01-06 08:20:12 +00:00
|
|
|
}
|
|
|
|
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
static void lower_barrier(struct r1conf *conf, sector_t sector_nr)
|
2006-01-06 08:20:12 +00:00
|
|
|
{
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
int idx = sector_to_idx(sector_nr);
|
|
|
|
|
RAID1: avoid unnecessary spin locks in I/O barrier code
When I run a parallel reading performan testing on a md raid1 device with
two NVMe SSDs, I observe very bad throughput in supprise: by fio with 64KB
block size, 40 seq read I/O jobs, 128 iodepth, overall throughput is
only 2.7GB/s, this is around 50% of the idea performance number.
The perf reports locking contention happens at allow_barrier() and
wait_barrier() code,
- 41.41% fio [kernel.kallsyms] [k] _raw_spin_lock_irqsave
- _raw_spin_lock_irqsave
+ 89.92% allow_barrier
+ 9.34% __wake_up
- 37.30% fio [kernel.kallsyms] [k] _raw_spin_lock_irq
- _raw_spin_lock_irq
- 100.00% wait_barrier
The reason is, in these I/O barrier related functions,
- raise_barrier()
- lower_barrier()
- wait_barrier()
- allow_barrier()
They always hold conf->resync_lock firstly, even there are only regular
reading I/Os and no resync I/O at all. This is a huge performance penalty.
The solution is a lockless-like algorithm in I/O barrier code, and only
holding conf->resync_lock when it has to.
The original idea is from Hannes Reinecke, and Neil Brown provides
comments to improve it. I continue to work on it, and make the patch into
current form.
In the new simpler raid1 I/O barrier implementation, there are two
wait barrier functions,
- wait_barrier()
Which calls _wait_barrier(), is used for regular write I/O. If there is
resync I/O happening on the same I/O barrier bucket, or the whole
array is frozen, task will wait until no barrier on same barrier bucket,
or the whold array is unfreezed.
- wait_read_barrier()
Since regular read I/O won't interfere with resync I/O (read_balance()
will make sure only uptodate data will be read out), it is unnecessary
to wait for barrier in regular read I/Os, waiting in only necessary
when the whole array is frozen.
The operations on conf->nr_pending[idx], conf->nr_waiting[idx], conf->
barrier[idx] are very carefully designed in raise_barrier(),
lower_barrier(), _wait_barrier() and wait_read_barrier(), in order to
avoid unnecessary spin locks in these functions. Once conf->
nr_pengding[idx] is increased, a resync I/O with same barrier bucket index
has to wait in raise_barrier(). Then in _wait_barrier() if no barrier
raised in same barrier bucket index and array is not frozen, the regular
I/O doesn't need to hold conf->resync_lock, it can just increase
conf->nr_pending[idx], and return to its caller. wait_read_barrier() is
very similar to _wait_barrier(), the only difference is it only waits when
array is frozen. For heavy parallel reading I/Os, the lockless I/O barrier
code almostly gets rid of all spin lock cost.
This patch significantly improves raid1 reading peroformance. From my
testing, a raid1 device built by two NVMe SSD, runs fio with 64KB
blocksize, 40 seq read I/O jobs, 128 iodepth, overall throughput
increases from 2.7GB/s to 4.6GB/s (+70%).
Changelog
V4:
- Change conf->nr_queued[] to atomic_t.
- Define BARRIER_BUCKETS_NR_BITS by (PAGE_SHIFT - ilog2(sizeof(atomic_t)))
V3:
- Add smp_mb__after_atomic() as Shaohua and Neil suggested.
- Change conf->nr_queued[] from atomic_t to int.
- Change conf->array_frozen from atomic_t back to int, and use
READ_ONCE(conf->array_frozen) to check value of conf->array_frozen
in _wait_barrier() and wait_read_barrier().
- In _wait_barrier() and wait_read_barrier(), add a call to
wake_up(&conf->wait_barrier) after atomic_dec(&conf->nr_pending[idx]),
to fix a deadlock between _wait_barrier()/wait_read_barrier and
freeze_array().
V2:
- Remove a spin_lock/unlock pair in raid1d().
- Add more code comments to explain why there is no racy when checking two
atomic_t variables at same time.
V1:
- Original RFC patch for comments.
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Shaohua Li <shli@fb.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:57 +00:00
|
|
|
BUG_ON(atomic_read(&conf->barrier[idx]) <= 0);
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
|
RAID1: avoid unnecessary spin locks in I/O barrier code
When I run a parallel reading performan testing on a md raid1 device with
two NVMe SSDs, I observe very bad throughput in supprise: by fio with 64KB
block size, 40 seq read I/O jobs, 128 iodepth, overall throughput is
only 2.7GB/s, this is around 50% of the idea performance number.
The perf reports locking contention happens at allow_barrier() and
wait_barrier() code,
- 41.41% fio [kernel.kallsyms] [k] _raw_spin_lock_irqsave
- _raw_spin_lock_irqsave
+ 89.92% allow_barrier
+ 9.34% __wake_up
- 37.30% fio [kernel.kallsyms] [k] _raw_spin_lock_irq
- _raw_spin_lock_irq
- 100.00% wait_barrier
The reason is, in these I/O barrier related functions,
- raise_barrier()
- lower_barrier()
- wait_barrier()
- allow_barrier()
They always hold conf->resync_lock firstly, even there are only regular
reading I/Os and no resync I/O at all. This is a huge performance penalty.
The solution is a lockless-like algorithm in I/O barrier code, and only
holding conf->resync_lock when it has to.
The original idea is from Hannes Reinecke, and Neil Brown provides
comments to improve it. I continue to work on it, and make the patch into
current form.
In the new simpler raid1 I/O barrier implementation, there are two
wait barrier functions,
- wait_barrier()
Which calls _wait_barrier(), is used for regular write I/O. If there is
resync I/O happening on the same I/O barrier bucket, or the whole
array is frozen, task will wait until no barrier on same barrier bucket,
or the whold array is unfreezed.
- wait_read_barrier()
Since regular read I/O won't interfere with resync I/O (read_balance()
will make sure only uptodate data will be read out), it is unnecessary
to wait for barrier in regular read I/Os, waiting in only necessary
when the whole array is frozen.
The operations on conf->nr_pending[idx], conf->nr_waiting[idx], conf->
barrier[idx] are very carefully designed in raise_barrier(),
lower_barrier(), _wait_barrier() and wait_read_barrier(), in order to
avoid unnecessary spin locks in these functions. Once conf->
nr_pengding[idx] is increased, a resync I/O with same barrier bucket index
has to wait in raise_barrier(). Then in _wait_barrier() if no barrier
raised in same barrier bucket index and array is not frozen, the regular
I/O doesn't need to hold conf->resync_lock, it can just increase
conf->nr_pending[idx], and return to its caller. wait_read_barrier() is
very similar to _wait_barrier(), the only difference is it only waits when
array is frozen. For heavy parallel reading I/Os, the lockless I/O barrier
code almostly gets rid of all spin lock cost.
This patch significantly improves raid1 reading peroformance. From my
testing, a raid1 device built by two NVMe SSD, runs fio with 64KB
blocksize, 40 seq read I/O jobs, 128 iodepth, overall throughput
increases from 2.7GB/s to 4.6GB/s (+70%).
Changelog
V4:
- Change conf->nr_queued[] to atomic_t.
- Define BARRIER_BUCKETS_NR_BITS by (PAGE_SHIFT - ilog2(sizeof(atomic_t)))
V3:
- Add smp_mb__after_atomic() as Shaohua and Neil suggested.
- Change conf->nr_queued[] from atomic_t to int.
- Change conf->array_frozen from atomic_t back to int, and use
READ_ONCE(conf->array_frozen) to check value of conf->array_frozen
in _wait_barrier() and wait_read_barrier().
- In _wait_barrier() and wait_read_barrier(), add a call to
wake_up(&conf->wait_barrier) after atomic_dec(&conf->nr_pending[idx]),
to fix a deadlock between _wait_barrier()/wait_read_barrier and
freeze_array().
V2:
- Remove a spin_lock/unlock pair in raid1d().
- Add more code comments to explain why there is no racy when checking two
atomic_t variables at same time.
V1:
- Original RFC patch for comments.
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Shaohua Li <shli@fb.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:57 +00:00
|
|
|
atomic_dec(&conf->barrier[idx]);
|
2017-04-27 08:28:49 +00:00
|
|
|
atomic_dec(&conf->nr_sync_pending);
|
2006-01-06 08:20:12 +00:00
|
|
|
wake_up(&conf->wait_barrier);
|
|
|
|
}
|
|
|
|
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
static void _wait_barrier(struct r1conf *conf, int idx)
|
2006-01-06 08:20:12 +00:00
|
|
|
{
|
RAID1: avoid unnecessary spin locks in I/O barrier code
When I run a parallel reading performan testing on a md raid1 device with
two NVMe SSDs, I observe very bad throughput in supprise: by fio with 64KB
block size, 40 seq read I/O jobs, 128 iodepth, overall throughput is
only 2.7GB/s, this is around 50% of the idea performance number.
The perf reports locking contention happens at allow_barrier() and
wait_barrier() code,
- 41.41% fio [kernel.kallsyms] [k] _raw_spin_lock_irqsave
- _raw_spin_lock_irqsave
+ 89.92% allow_barrier
+ 9.34% __wake_up
- 37.30% fio [kernel.kallsyms] [k] _raw_spin_lock_irq
- _raw_spin_lock_irq
- 100.00% wait_barrier
The reason is, in these I/O barrier related functions,
- raise_barrier()
- lower_barrier()
- wait_barrier()
- allow_barrier()
They always hold conf->resync_lock firstly, even there are only regular
reading I/Os and no resync I/O at all. This is a huge performance penalty.
The solution is a lockless-like algorithm in I/O barrier code, and only
holding conf->resync_lock when it has to.
The original idea is from Hannes Reinecke, and Neil Brown provides
comments to improve it. I continue to work on it, and make the patch into
current form.
In the new simpler raid1 I/O barrier implementation, there are two
wait barrier functions,
- wait_barrier()
Which calls _wait_barrier(), is used for regular write I/O. If there is
resync I/O happening on the same I/O barrier bucket, or the whole
array is frozen, task will wait until no barrier on same barrier bucket,
or the whold array is unfreezed.
- wait_read_barrier()
Since regular read I/O won't interfere with resync I/O (read_balance()
will make sure only uptodate data will be read out), it is unnecessary
to wait for barrier in regular read I/Os, waiting in only necessary
when the whole array is frozen.
The operations on conf->nr_pending[idx], conf->nr_waiting[idx], conf->
barrier[idx] are very carefully designed in raise_barrier(),
lower_barrier(), _wait_barrier() and wait_read_barrier(), in order to
avoid unnecessary spin locks in these functions. Once conf->
nr_pengding[idx] is increased, a resync I/O with same barrier bucket index
has to wait in raise_barrier(). Then in _wait_barrier() if no barrier
raised in same barrier bucket index and array is not frozen, the regular
I/O doesn't need to hold conf->resync_lock, it can just increase
conf->nr_pending[idx], and return to its caller. wait_read_barrier() is
very similar to _wait_barrier(), the only difference is it only waits when
array is frozen. For heavy parallel reading I/Os, the lockless I/O barrier
code almostly gets rid of all spin lock cost.
This patch significantly improves raid1 reading peroformance. From my
testing, a raid1 device built by two NVMe SSD, runs fio with 64KB
blocksize, 40 seq read I/O jobs, 128 iodepth, overall throughput
increases from 2.7GB/s to 4.6GB/s (+70%).
Changelog
V4:
- Change conf->nr_queued[] to atomic_t.
- Define BARRIER_BUCKETS_NR_BITS by (PAGE_SHIFT - ilog2(sizeof(atomic_t)))
V3:
- Add smp_mb__after_atomic() as Shaohua and Neil suggested.
- Change conf->nr_queued[] from atomic_t to int.
- Change conf->array_frozen from atomic_t back to int, and use
READ_ONCE(conf->array_frozen) to check value of conf->array_frozen
in _wait_barrier() and wait_read_barrier().
- In _wait_barrier() and wait_read_barrier(), add a call to
wake_up(&conf->wait_barrier) after atomic_dec(&conf->nr_pending[idx]),
to fix a deadlock between _wait_barrier()/wait_read_barrier and
freeze_array().
V2:
- Remove a spin_lock/unlock pair in raid1d().
- Add more code comments to explain why there is no racy when checking two
atomic_t variables at same time.
V1:
- Original RFC patch for comments.
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Shaohua Li <shli@fb.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:57 +00:00
|
|
|
/*
|
|
|
|
* We need to increase conf->nr_pending[idx] very early here,
|
|
|
|
* then raise_barrier() can be blocked when it waits for
|
|
|
|
* conf->nr_pending[idx] to be 0. Then we can avoid holding
|
|
|
|
* conf->resync_lock when there is no barrier raised in same
|
|
|
|
* barrier unit bucket. Also if the array is frozen, I/O
|
|
|
|
* should be blocked until array is unfrozen.
|
|
|
|
*/
|
|
|
|
atomic_inc(&conf->nr_pending[idx]);
|
|
|
|
/*
|
|
|
|
* In _wait_barrier() we firstly increase conf->nr_pending[idx], then
|
|
|
|
* check conf->barrier[idx]. In raise_barrier() we firstly increase
|
|
|
|
* conf->barrier[idx], then check conf->nr_pending[idx]. A memory
|
|
|
|
* barrier is necessary here to make sure conf->barrier[idx] won't be
|
|
|
|
* fetched before conf->nr_pending[idx] is increased. Otherwise there
|
|
|
|
* will be a race between _wait_barrier() and raise_barrier().
|
|
|
|
*/
|
|
|
|
smp_mb__after_atomic();
|
raid1: Rewrite the implementation of iobarrier.
There is an iobarrier in raid1 because of contention between normal IO and
resync IO. It suspends all normal IO when resync/recovery happens.
However if normal IO is out side the resync window, there is no contention.
So this patch changes the barrier mechanism to only block IO that
could contend with the resync that is currently happening.
We partition the whole space into five parts.
|---------|-----------|------------|----------------|-------|
start next_resync start_next_window end_window
start + RESYNC_WINDOW = next_resync
next_resync + NEXT_NORMALIO_DISTANCE = start_next_window
start_next_window + NEXT_NORMALIO_DISTANCE = end_window
Firstly we introduce some concepts:
1 - RESYNC_WINDOW: For resync, there are 32 resync requests at most at the
same time. A sync request is RESYNC_BLOCK_SIZE(64*1024).
So the RESYNC_WINDOW is 32 * RESYNC_BLOCK_SIZE, that is 2MB.
2 - NEXT_NORMALIO_DISTANCE: the distance between next_resync
and start_next_window. It also indicates the distance between
start_next_window and end_window.
It is currently 3 * RESYNC_WINDOW_SIZE but could be tuned if
this turned out not to be optimal.
3 - next_resync: the next sector at which we will do sync IO.
4 - start: a position which is at most RESYNC_WINDOW before
next_resync.
5 - start_next_window: a position which is NEXT_NORMALIO_DISTANCE
beyond next_resync. Normal-io after this position doesn't need to
wait for resync-io to complete.
6 - end_window: a position which is 2 * NEXT_NORMALIO_DISTANCE beyond
next_resync. This also doesn't need to wait, but is counted
differently.
7 - current_window_requests: the count of normalIO between
start_next_window and end_window.
8 - next_window_requests: the count of normalIO after end_window.
NormalIO will be partitioned into four types:
NormIO1: the end sector of bio is smaller or equal the start
NormIO2: the start sector of bio larger or equal to end_window
NormIO3: the start sector of bio larger or equal to
start_next_window.
NormIO4: the location between start_next_window and end_window
|--------|-----------|--------------------|----------------|-------------|
| start | next_resync | start_next_window | end_window |
NormIO1 NormIO4 NormIO4 NormIO3 NormIO2
For NormIO1, we don't need any io barrier.
For NormIO4, we used a similar approach to the original iobarrier
mechanism. The normalIO and resyncIO must be kept separate.
For NormIO2/3, we add two fields to struct r1conf: "current_window_requests"
and "next_window_requests". They indicate the count of active
requests in the two window.
For these, we don't wait for resync io to complete.
For resync action, if there are NormIO4s, we must wait for it.
If not, we can proceed.
But if resync action reaches start_next_window and
current_window_requests > 0 (that is there are NormIO3s), we must
wait until the current_window_requests becomes zero.
When current_window_requests becomes zero, start_next_window also
moves forward. Then current_window_requests will replaced by
next_window_requests.
There is a problem which when and how to change from NormIO2 to
NormIO3. Only then can sync action progress.
We add a field in struct r1conf "start_next_window".
A: if start_next_window == MaxSector, it means there are no NormIO2/3.
So start_next_window = next_resync + NEXT_NORMALIO_DISTANCE
B: if current_window_requests == 0 && next_window_requests != 0, it
means start_next_window move to end_window
There is another problem which how to differentiate between
old NormIO2(now it is NormIO3) and NormIO2.
For example, there are many bios which are NormIO2 and a bio which is
NormIO3. NormIO3 firstly completed, so the bios of NormIO2 became NormIO3.
We add a field in struct r1bio "start_next_window".
This is used to record the position conf->start_next_window when the call
to wait_barrier() is made in make_request().
In allow_barrier(), we check the conf->start_next_window.
If r1bio->stat_next_window == conf->start_next_window, it means
there is no transition between NormIO2 and NormIO3.
If r1bio->start_next_window != conf->start_next_window, it mean
there was a transition between NormIO2 and NormIO3. There can only
have been one transition. So it only means the bio is old NormIO2.
For one bio, there may be many r1bio's. So we make sure
all the r1bio->start_next_window are the same value.
If we met blocked_dev in make_request(), it must call allow_barrier
and wait_barrier. So the former and the later value of
conf->start_next_window will be change.
If there are many r1bio's with differnet start_next_window,
for the relevant bio, it depend on the last value of r1bio.
It will cause error. To avoid this, we must wait for previous r1bios
to complete.
Signed-off-by: Jianpeng Ma <majianpeng@gmail.com>
Signed-off-by: NeilBrown <neilb@suse.de>
2013-11-15 06:55:02 +00:00
|
|
|
|
RAID1: avoid unnecessary spin locks in I/O barrier code
When I run a parallel reading performan testing on a md raid1 device with
two NVMe SSDs, I observe very bad throughput in supprise: by fio with 64KB
block size, 40 seq read I/O jobs, 128 iodepth, overall throughput is
only 2.7GB/s, this is around 50% of the idea performance number.
The perf reports locking contention happens at allow_barrier() and
wait_barrier() code,
- 41.41% fio [kernel.kallsyms] [k] _raw_spin_lock_irqsave
- _raw_spin_lock_irqsave
+ 89.92% allow_barrier
+ 9.34% __wake_up
- 37.30% fio [kernel.kallsyms] [k] _raw_spin_lock_irq
- _raw_spin_lock_irq
- 100.00% wait_barrier
The reason is, in these I/O barrier related functions,
- raise_barrier()
- lower_barrier()
- wait_barrier()
- allow_barrier()
They always hold conf->resync_lock firstly, even there are only regular
reading I/Os and no resync I/O at all. This is a huge performance penalty.
The solution is a lockless-like algorithm in I/O barrier code, and only
holding conf->resync_lock when it has to.
The original idea is from Hannes Reinecke, and Neil Brown provides
comments to improve it. I continue to work on it, and make the patch into
current form.
In the new simpler raid1 I/O barrier implementation, there are two
wait barrier functions,
- wait_barrier()
Which calls _wait_barrier(), is used for regular write I/O. If there is
resync I/O happening on the same I/O barrier bucket, or the whole
array is frozen, task will wait until no barrier on same barrier bucket,
or the whold array is unfreezed.
- wait_read_barrier()
Since regular read I/O won't interfere with resync I/O (read_balance()
will make sure only uptodate data will be read out), it is unnecessary
to wait for barrier in regular read I/Os, waiting in only necessary
when the whole array is frozen.
The operations on conf->nr_pending[idx], conf->nr_waiting[idx], conf->
barrier[idx] are very carefully designed in raise_barrier(),
lower_barrier(), _wait_barrier() and wait_read_barrier(), in order to
avoid unnecessary spin locks in these functions. Once conf->
nr_pengding[idx] is increased, a resync I/O with same barrier bucket index
has to wait in raise_barrier(). Then in _wait_barrier() if no barrier
raised in same barrier bucket index and array is not frozen, the regular
I/O doesn't need to hold conf->resync_lock, it can just increase
conf->nr_pending[idx], and return to its caller. wait_read_barrier() is
very similar to _wait_barrier(), the only difference is it only waits when
array is frozen. For heavy parallel reading I/Os, the lockless I/O barrier
code almostly gets rid of all spin lock cost.
This patch significantly improves raid1 reading peroformance. From my
testing, a raid1 device built by two NVMe SSD, runs fio with 64KB
blocksize, 40 seq read I/O jobs, 128 iodepth, overall throughput
increases from 2.7GB/s to 4.6GB/s (+70%).
Changelog
V4:
- Change conf->nr_queued[] to atomic_t.
- Define BARRIER_BUCKETS_NR_BITS by (PAGE_SHIFT - ilog2(sizeof(atomic_t)))
V3:
- Add smp_mb__after_atomic() as Shaohua and Neil suggested.
- Change conf->nr_queued[] from atomic_t to int.
- Change conf->array_frozen from atomic_t back to int, and use
READ_ONCE(conf->array_frozen) to check value of conf->array_frozen
in _wait_barrier() and wait_read_barrier().
- In _wait_barrier() and wait_read_barrier(), add a call to
wake_up(&conf->wait_barrier) after atomic_dec(&conf->nr_pending[idx]),
to fix a deadlock between _wait_barrier()/wait_read_barrier and
freeze_array().
V2:
- Remove a spin_lock/unlock pair in raid1d().
- Add more code comments to explain why there is no racy when checking two
atomic_t variables at same time.
V1:
- Original RFC patch for comments.
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Shaohua Li <shli@fb.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:57 +00:00
|
|
|
/*
|
|
|
|
* Don't worry about checking two atomic_t variables at same time
|
|
|
|
* here. If during we check conf->barrier[idx], the array is
|
|
|
|
* frozen (conf->array_frozen is 1), and chonf->barrier[idx] is
|
|
|
|
* 0, it is safe to return and make the I/O continue. Because the
|
|
|
|
* array is frozen, all I/O returned here will eventually complete
|
|
|
|
* or be queued, no race will happen. See code comment in
|
|
|
|
* frozen_array().
|
|
|
|
*/
|
|
|
|
if (!READ_ONCE(conf->array_frozen) &&
|
|
|
|
!atomic_read(&conf->barrier[idx]))
|
|
|
|
return;
|
raid1: Rewrite the implementation of iobarrier.
There is an iobarrier in raid1 because of contention between normal IO and
resync IO. It suspends all normal IO when resync/recovery happens.
However if normal IO is out side the resync window, there is no contention.
So this patch changes the barrier mechanism to only block IO that
could contend with the resync that is currently happening.
We partition the whole space into five parts.
|---------|-----------|------------|----------------|-------|
start next_resync start_next_window end_window
start + RESYNC_WINDOW = next_resync
next_resync + NEXT_NORMALIO_DISTANCE = start_next_window
start_next_window + NEXT_NORMALIO_DISTANCE = end_window
Firstly we introduce some concepts:
1 - RESYNC_WINDOW: For resync, there are 32 resync requests at most at the
same time. A sync request is RESYNC_BLOCK_SIZE(64*1024).
So the RESYNC_WINDOW is 32 * RESYNC_BLOCK_SIZE, that is 2MB.
2 - NEXT_NORMALIO_DISTANCE: the distance between next_resync
and start_next_window. It also indicates the distance between
start_next_window and end_window.
It is currently 3 * RESYNC_WINDOW_SIZE but could be tuned if
this turned out not to be optimal.
3 - next_resync: the next sector at which we will do sync IO.
4 - start: a position which is at most RESYNC_WINDOW before
next_resync.
5 - start_next_window: a position which is NEXT_NORMALIO_DISTANCE
beyond next_resync. Normal-io after this position doesn't need to
wait for resync-io to complete.
6 - end_window: a position which is 2 * NEXT_NORMALIO_DISTANCE beyond
next_resync. This also doesn't need to wait, but is counted
differently.
7 - current_window_requests: the count of normalIO between
start_next_window and end_window.
8 - next_window_requests: the count of normalIO after end_window.
NormalIO will be partitioned into four types:
NormIO1: the end sector of bio is smaller or equal the start
NormIO2: the start sector of bio larger or equal to end_window
NormIO3: the start sector of bio larger or equal to
start_next_window.
NormIO4: the location between start_next_window and end_window
|--------|-----------|--------------------|----------------|-------------|
| start | next_resync | start_next_window | end_window |
NormIO1 NormIO4 NormIO4 NormIO3 NormIO2
For NormIO1, we don't need any io barrier.
For NormIO4, we used a similar approach to the original iobarrier
mechanism. The normalIO and resyncIO must be kept separate.
For NormIO2/3, we add two fields to struct r1conf: "current_window_requests"
and "next_window_requests". They indicate the count of active
requests in the two window.
For these, we don't wait for resync io to complete.
For resync action, if there are NormIO4s, we must wait for it.
If not, we can proceed.
But if resync action reaches start_next_window and
current_window_requests > 0 (that is there are NormIO3s), we must
wait until the current_window_requests becomes zero.
When current_window_requests becomes zero, start_next_window also
moves forward. Then current_window_requests will replaced by
next_window_requests.
There is a problem which when and how to change from NormIO2 to
NormIO3. Only then can sync action progress.
We add a field in struct r1conf "start_next_window".
A: if start_next_window == MaxSector, it means there are no NormIO2/3.
So start_next_window = next_resync + NEXT_NORMALIO_DISTANCE
B: if current_window_requests == 0 && next_window_requests != 0, it
means start_next_window move to end_window
There is another problem which how to differentiate between
old NormIO2(now it is NormIO3) and NormIO2.
For example, there are many bios which are NormIO2 and a bio which is
NormIO3. NormIO3 firstly completed, so the bios of NormIO2 became NormIO3.
We add a field in struct r1bio "start_next_window".
This is used to record the position conf->start_next_window when the call
to wait_barrier() is made in make_request().
In allow_barrier(), we check the conf->start_next_window.
If r1bio->stat_next_window == conf->start_next_window, it means
there is no transition between NormIO2 and NormIO3.
If r1bio->start_next_window != conf->start_next_window, it mean
there was a transition between NormIO2 and NormIO3. There can only
have been one transition. So it only means the bio is old NormIO2.
For one bio, there may be many r1bio's. So we make sure
all the r1bio->start_next_window are the same value.
If we met blocked_dev in make_request(), it must call allow_barrier
and wait_barrier. So the former and the later value of
conf->start_next_window will be change.
If there are many r1bio's with differnet start_next_window,
for the relevant bio, it depend on the last value of r1bio.
It will cause error. To avoid this, we must wait for previous r1bios
to complete.
Signed-off-by: Jianpeng Ma <majianpeng@gmail.com>
Signed-off-by: NeilBrown <neilb@suse.de>
2013-11-15 06:55:02 +00:00
|
|
|
|
RAID1: avoid unnecessary spin locks in I/O barrier code
When I run a parallel reading performan testing on a md raid1 device with
two NVMe SSDs, I observe very bad throughput in supprise: by fio with 64KB
block size, 40 seq read I/O jobs, 128 iodepth, overall throughput is
only 2.7GB/s, this is around 50% of the idea performance number.
The perf reports locking contention happens at allow_barrier() and
wait_barrier() code,
- 41.41% fio [kernel.kallsyms] [k] _raw_spin_lock_irqsave
- _raw_spin_lock_irqsave
+ 89.92% allow_barrier
+ 9.34% __wake_up
- 37.30% fio [kernel.kallsyms] [k] _raw_spin_lock_irq
- _raw_spin_lock_irq
- 100.00% wait_barrier
The reason is, in these I/O barrier related functions,
- raise_barrier()
- lower_barrier()
- wait_barrier()
- allow_barrier()
They always hold conf->resync_lock firstly, even there are only regular
reading I/Os and no resync I/O at all. This is a huge performance penalty.
The solution is a lockless-like algorithm in I/O barrier code, and only
holding conf->resync_lock when it has to.
The original idea is from Hannes Reinecke, and Neil Brown provides
comments to improve it. I continue to work on it, and make the patch into
current form.
In the new simpler raid1 I/O barrier implementation, there are two
wait barrier functions,
- wait_barrier()
Which calls _wait_barrier(), is used for regular write I/O. If there is
resync I/O happening on the same I/O barrier bucket, or the whole
array is frozen, task will wait until no barrier on same barrier bucket,
or the whold array is unfreezed.
- wait_read_barrier()
Since regular read I/O won't interfere with resync I/O (read_balance()
will make sure only uptodate data will be read out), it is unnecessary
to wait for barrier in regular read I/Os, waiting in only necessary
when the whole array is frozen.
The operations on conf->nr_pending[idx], conf->nr_waiting[idx], conf->
barrier[idx] are very carefully designed in raise_barrier(),
lower_barrier(), _wait_barrier() and wait_read_barrier(), in order to
avoid unnecessary spin locks in these functions. Once conf->
nr_pengding[idx] is increased, a resync I/O with same barrier bucket index
has to wait in raise_barrier(). Then in _wait_barrier() if no barrier
raised in same barrier bucket index and array is not frozen, the regular
I/O doesn't need to hold conf->resync_lock, it can just increase
conf->nr_pending[idx], and return to its caller. wait_read_barrier() is
very similar to _wait_barrier(), the only difference is it only waits when
array is frozen. For heavy parallel reading I/Os, the lockless I/O barrier
code almostly gets rid of all spin lock cost.
This patch significantly improves raid1 reading peroformance. From my
testing, a raid1 device built by two NVMe SSD, runs fio with 64KB
blocksize, 40 seq read I/O jobs, 128 iodepth, overall throughput
increases from 2.7GB/s to 4.6GB/s (+70%).
Changelog
V4:
- Change conf->nr_queued[] to atomic_t.
- Define BARRIER_BUCKETS_NR_BITS by (PAGE_SHIFT - ilog2(sizeof(atomic_t)))
V3:
- Add smp_mb__after_atomic() as Shaohua and Neil suggested.
- Change conf->nr_queued[] from atomic_t to int.
- Change conf->array_frozen from atomic_t back to int, and use
READ_ONCE(conf->array_frozen) to check value of conf->array_frozen
in _wait_barrier() and wait_read_barrier().
- In _wait_barrier() and wait_read_barrier(), add a call to
wake_up(&conf->wait_barrier) after atomic_dec(&conf->nr_pending[idx]),
to fix a deadlock between _wait_barrier()/wait_read_barrier and
freeze_array().
V2:
- Remove a spin_lock/unlock pair in raid1d().
- Add more code comments to explain why there is no racy when checking two
atomic_t variables at same time.
V1:
- Original RFC patch for comments.
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Shaohua Li <shli@fb.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:57 +00:00
|
|
|
/*
|
|
|
|
* After holding conf->resync_lock, conf->nr_pending[idx]
|
|
|
|
* should be decreased before waiting for barrier to drop.
|
|
|
|
* Otherwise, we may encounter a race condition because
|
|
|
|
* raise_barrer() might be waiting for conf->nr_pending[idx]
|
|
|
|
* to be 0 at same time.
|
|
|
|
*/
|
|
|
|
spin_lock_irq(&conf->resync_lock);
|
|
|
|
atomic_inc(&conf->nr_waiting[idx]);
|
|
|
|
atomic_dec(&conf->nr_pending[idx]);
|
|
|
|
/*
|
|
|
|
* In case freeze_array() is waiting for
|
|
|
|
* get_unqueued_pending() == extra
|
|
|
|
*/
|
|
|
|
wake_up(&conf->wait_barrier);
|
|
|
|
/* Wait for the barrier in same barrier unit bucket to drop. */
|
|
|
|
wait_event_lock_irq(conf->wait_barrier,
|
|
|
|
!conf->array_frozen &&
|
|
|
|
!atomic_read(&conf->barrier[idx]),
|
|
|
|
conf->resync_lock);
|
|
|
|
atomic_inc(&conf->nr_pending[idx]);
|
|
|
|
atomic_dec(&conf->nr_waiting[idx]);
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
spin_unlock_irq(&conf->resync_lock);
|
raid1: Rewrite the implementation of iobarrier.
There is an iobarrier in raid1 because of contention between normal IO and
resync IO. It suspends all normal IO when resync/recovery happens.
However if normal IO is out side the resync window, there is no contention.
So this patch changes the barrier mechanism to only block IO that
could contend with the resync that is currently happening.
We partition the whole space into five parts.
|---------|-----------|------------|----------------|-------|
start next_resync start_next_window end_window
start + RESYNC_WINDOW = next_resync
next_resync + NEXT_NORMALIO_DISTANCE = start_next_window
start_next_window + NEXT_NORMALIO_DISTANCE = end_window
Firstly we introduce some concepts:
1 - RESYNC_WINDOW: For resync, there are 32 resync requests at most at the
same time. A sync request is RESYNC_BLOCK_SIZE(64*1024).
So the RESYNC_WINDOW is 32 * RESYNC_BLOCK_SIZE, that is 2MB.
2 - NEXT_NORMALIO_DISTANCE: the distance between next_resync
and start_next_window. It also indicates the distance between
start_next_window and end_window.
It is currently 3 * RESYNC_WINDOW_SIZE but could be tuned if
this turned out not to be optimal.
3 - next_resync: the next sector at which we will do sync IO.
4 - start: a position which is at most RESYNC_WINDOW before
next_resync.
5 - start_next_window: a position which is NEXT_NORMALIO_DISTANCE
beyond next_resync. Normal-io after this position doesn't need to
wait for resync-io to complete.
6 - end_window: a position which is 2 * NEXT_NORMALIO_DISTANCE beyond
next_resync. This also doesn't need to wait, but is counted
differently.
7 - current_window_requests: the count of normalIO between
start_next_window and end_window.
8 - next_window_requests: the count of normalIO after end_window.
NormalIO will be partitioned into four types:
NormIO1: the end sector of bio is smaller or equal the start
NormIO2: the start sector of bio larger or equal to end_window
NormIO3: the start sector of bio larger or equal to
start_next_window.
NormIO4: the location between start_next_window and end_window
|--------|-----------|--------------------|----------------|-------------|
| start | next_resync | start_next_window | end_window |
NormIO1 NormIO4 NormIO4 NormIO3 NormIO2
For NormIO1, we don't need any io barrier.
For NormIO4, we used a similar approach to the original iobarrier
mechanism. The normalIO and resyncIO must be kept separate.
For NormIO2/3, we add two fields to struct r1conf: "current_window_requests"
and "next_window_requests". They indicate the count of active
requests in the two window.
For these, we don't wait for resync io to complete.
For resync action, if there are NormIO4s, we must wait for it.
If not, we can proceed.
But if resync action reaches start_next_window and
current_window_requests > 0 (that is there are NormIO3s), we must
wait until the current_window_requests becomes zero.
When current_window_requests becomes zero, start_next_window also
moves forward. Then current_window_requests will replaced by
next_window_requests.
There is a problem which when and how to change from NormIO2 to
NormIO3. Only then can sync action progress.
We add a field in struct r1conf "start_next_window".
A: if start_next_window == MaxSector, it means there are no NormIO2/3.
So start_next_window = next_resync + NEXT_NORMALIO_DISTANCE
B: if current_window_requests == 0 && next_window_requests != 0, it
means start_next_window move to end_window
There is another problem which how to differentiate between
old NormIO2(now it is NormIO3) and NormIO2.
For example, there are many bios which are NormIO2 and a bio which is
NormIO3. NormIO3 firstly completed, so the bios of NormIO2 became NormIO3.
We add a field in struct r1bio "start_next_window".
This is used to record the position conf->start_next_window when the call
to wait_barrier() is made in make_request().
In allow_barrier(), we check the conf->start_next_window.
If r1bio->stat_next_window == conf->start_next_window, it means
there is no transition between NormIO2 and NormIO3.
If r1bio->start_next_window != conf->start_next_window, it mean
there was a transition between NormIO2 and NormIO3. There can only
have been one transition. So it only means the bio is old NormIO2.
For one bio, there may be many r1bio's. So we make sure
all the r1bio->start_next_window are the same value.
If we met blocked_dev in make_request(), it must call allow_barrier
and wait_barrier. So the former and the later value of
conf->start_next_window will be change.
If there are many r1bio's with differnet start_next_window,
for the relevant bio, it depend on the last value of r1bio.
It will cause error. To avoid this, we must wait for previous r1bios
to complete.
Signed-off-by: Jianpeng Ma <majianpeng@gmail.com>
Signed-off-by: NeilBrown <neilb@suse.de>
2013-11-15 06:55:02 +00:00
|
|
|
}
|
|
|
|
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)
|
raid1: Rewrite the implementation of iobarrier.
There is an iobarrier in raid1 because of contention between normal IO and
resync IO. It suspends all normal IO when resync/recovery happens.
However if normal IO is out side the resync window, there is no contention.
So this patch changes the barrier mechanism to only block IO that
could contend with the resync that is currently happening.
We partition the whole space into five parts.
|---------|-----------|------------|----------------|-------|
start next_resync start_next_window end_window
start + RESYNC_WINDOW = next_resync
next_resync + NEXT_NORMALIO_DISTANCE = start_next_window
start_next_window + NEXT_NORMALIO_DISTANCE = end_window
Firstly we introduce some concepts:
1 - RESYNC_WINDOW: For resync, there are 32 resync requests at most at the
same time. A sync request is RESYNC_BLOCK_SIZE(64*1024).
So the RESYNC_WINDOW is 32 * RESYNC_BLOCK_SIZE, that is 2MB.
2 - NEXT_NORMALIO_DISTANCE: the distance between next_resync
and start_next_window. It also indicates the distance between
start_next_window and end_window.
It is currently 3 * RESYNC_WINDOW_SIZE but could be tuned if
this turned out not to be optimal.
3 - next_resync: the next sector at which we will do sync IO.
4 - start: a position which is at most RESYNC_WINDOW before
next_resync.
5 - start_next_window: a position which is NEXT_NORMALIO_DISTANCE
beyond next_resync. Normal-io after this position doesn't need to
wait for resync-io to complete.
6 - end_window: a position which is 2 * NEXT_NORMALIO_DISTANCE beyond
next_resync. This also doesn't need to wait, but is counted
differently.
7 - current_window_requests: the count of normalIO between
start_next_window and end_window.
8 - next_window_requests: the count of normalIO after end_window.
NormalIO will be partitioned into four types:
NormIO1: the end sector of bio is smaller or equal the start
NormIO2: the start sector of bio larger or equal to end_window
NormIO3: the start sector of bio larger or equal to
start_next_window.
NormIO4: the location between start_next_window and end_window
|--------|-----------|--------------------|----------------|-------------|
| start | next_resync | start_next_window | end_window |
NormIO1 NormIO4 NormIO4 NormIO3 NormIO2
For NormIO1, we don't need any io barrier.
For NormIO4, we used a similar approach to the original iobarrier
mechanism. The normalIO and resyncIO must be kept separate.
For NormIO2/3, we add two fields to struct r1conf: "current_window_requests"
and "next_window_requests". They indicate the count of active
requests in the two window.
For these, we don't wait for resync io to complete.
For resync action, if there are NormIO4s, we must wait for it.
If not, we can proceed.
But if resync action reaches start_next_window and
current_window_requests > 0 (that is there are NormIO3s), we must
wait until the current_window_requests becomes zero.
When current_window_requests becomes zero, start_next_window also
moves forward. Then current_window_requests will replaced by
next_window_requests.
There is a problem which when and how to change from NormIO2 to
NormIO3. Only then can sync action progress.
We add a field in struct r1conf "start_next_window".
A: if start_next_window == MaxSector, it means there are no NormIO2/3.
So start_next_window = next_resync + NEXT_NORMALIO_DISTANCE
B: if current_window_requests == 0 && next_window_requests != 0, it
means start_next_window move to end_window
There is another problem which how to differentiate between
old NormIO2(now it is NormIO3) and NormIO2.
For example, there are many bios which are NormIO2 and a bio which is
NormIO3. NormIO3 firstly completed, so the bios of NormIO2 became NormIO3.
We add a field in struct r1bio "start_next_window".
This is used to record the position conf->start_next_window when the call
to wait_barrier() is made in make_request().
In allow_barrier(), we check the conf->start_next_window.
If r1bio->stat_next_window == conf->start_next_window, it means
there is no transition between NormIO2 and NormIO3.
If r1bio->start_next_window != conf->start_next_window, it mean
there was a transition between NormIO2 and NormIO3. There can only
have been one transition. So it only means the bio is old NormIO2.
For one bio, there may be many r1bio's. So we make sure
all the r1bio->start_next_window are the same value.
If we met blocked_dev in make_request(), it must call allow_barrier
and wait_barrier. So the former and the later value of
conf->start_next_window will be change.
If there are many r1bio's with differnet start_next_window,
for the relevant bio, it depend on the last value of r1bio.
It will cause error. To avoid this, we must wait for previous r1bios
to complete.
Signed-off-by: Jianpeng Ma <majianpeng@gmail.com>
Signed-off-by: NeilBrown <neilb@suse.de>
2013-11-15 06:55:02 +00:00
|
|
|
{
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
int idx = sector_to_idx(sector_nr);
|
raid1: Rewrite the implementation of iobarrier.
There is an iobarrier in raid1 because of contention between normal IO and
resync IO. It suspends all normal IO when resync/recovery happens.
However if normal IO is out side the resync window, there is no contention.
So this patch changes the barrier mechanism to only block IO that
could contend with the resync that is currently happening.
We partition the whole space into five parts.
|---------|-----------|------------|----------------|-------|
start next_resync start_next_window end_window
start + RESYNC_WINDOW = next_resync
next_resync + NEXT_NORMALIO_DISTANCE = start_next_window
start_next_window + NEXT_NORMALIO_DISTANCE = end_window
Firstly we introduce some concepts:
1 - RESYNC_WINDOW: For resync, there are 32 resync requests at most at the
same time. A sync request is RESYNC_BLOCK_SIZE(64*1024).
So the RESYNC_WINDOW is 32 * RESYNC_BLOCK_SIZE, that is 2MB.
2 - NEXT_NORMALIO_DISTANCE: the distance between next_resync
and start_next_window. It also indicates the distance between
start_next_window and end_window.
It is currently 3 * RESYNC_WINDOW_SIZE but could be tuned if
this turned out not to be optimal.
3 - next_resync: the next sector at which we will do sync IO.
4 - start: a position which is at most RESYNC_WINDOW before
next_resync.
5 - start_next_window: a position which is NEXT_NORMALIO_DISTANCE
beyond next_resync. Normal-io after this position doesn't need to
wait for resync-io to complete.
6 - end_window: a position which is 2 * NEXT_NORMALIO_DISTANCE beyond
next_resync. This also doesn't need to wait, but is counted
differently.
7 - current_window_requests: the count of normalIO between
start_next_window and end_window.
8 - next_window_requests: the count of normalIO after end_window.
NormalIO will be partitioned into four types:
NormIO1: the end sector of bio is smaller or equal the start
NormIO2: the start sector of bio larger or equal to end_window
NormIO3: the start sector of bio larger or equal to
start_next_window.
NormIO4: the location between start_next_window and end_window
|--------|-----------|--------------------|----------------|-------------|
| start | next_resync | start_next_window | end_window |
NormIO1 NormIO4 NormIO4 NormIO3 NormIO2
For NormIO1, we don't need any io barrier.
For NormIO4, we used a similar approach to the original iobarrier
mechanism. The normalIO and resyncIO must be kept separate.
For NormIO2/3, we add two fields to struct r1conf: "current_window_requests"
and "next_window_requests". They indicate the count of active
requests in the two window.
For these, we don't wait for resync io to complete.
For resync action, if there are NormIO4s, we must wait for it.
If not, we can proceed.
But if resync action reaches start_next_window and
current_window_requests > 0 (that is there are NormIO3s), we must
wait until the current_window_requests becomes zero.
When current_window_requests becomes zero, start_next_window also
moves forward. Then current_window_requests will replaced by
next_window_requests.
There is a problem which when and how to change from NormIO2 to
NormIO3. Only then can sync action progress.
We add a field in struct r1conf "start_next_window".
A: if start_next_window == MaxSector, it means there are no NormIO2/3.
So start_next_window = next_resync + NEXT_NORMALIO_DISTANCE
B: if current_window_requests == 0 && next_window_requests != 0, it
means start_next_window move to end_window
There is another problem which how to differentiate between
old NormIO2(now it is NormIO3) and NormIO2.
For example, there are many bios which are NormIO2 and a bio which is
NormIO3. NormIO3 firstly completed, so the bios of NormIO2 became NormIO3.
We add a field in struct r1bio "start_next_window".
This is used to record the position conf->start_next_window when the call
to wait_barrier() is made in make_request().
In allow_barrier(), we check the conf->start_next_window.
If r1bio->stat_next_window == conf->start_next_window, it means
there is no transition between NormIO2 and NormIO3.
If r1bio->start_next_window != conf->start_next_window, it mean
there was a transition between NormIO2 and NormIO3. There can only
have been one transition. So it only means the bio is old NormIO2.
For one bio, there may be many r1bio's. So we make sure
all the r1bio->start_next_window are the same value.
If we met blocked_dev in make_request(), it must call allow_barrier
and wait_barrier. So the former and the later value of
conf->start_next_window will be change.
If there are many r1bio's with differnet start_next_window,
for the relevant bio, it depend on the last value of r1bio.
It will cause error. To avoid this, we must wait for previous r1bios
to complete.
Signed-off-by: Jianpeng Ma <majianpeng@gmail.com>
Signed-off-by: NeilBrown <neilb@suse.de>
2013-11-15 06:55:02 +00:00
|
|
|
|
RAID1: avoid unnecessary spin locks in I/O barrier code
When I run a parallel reading performan testing on a md raid1 device with
two NVMe SSDs, I observe very bad throughput in supprise: by fio with 64KB
block size, 40 seq read I/O jobs, 128 iodepth, overall throughput is
only 2.7GB/s, this is around 50% of the idea performance number.
The perf reports locking contention happens at allow_barrier() and
wait_barrier() code,
- 41.41% fio [kernel.kallsyms] [k] _raw_spin_lock_irqsave
- _raw_spin_lock_irqsave
+ 89.92% allow_barrier
+ 9.34% __wake_up
- 37.30% fio [kernel.kallsyms] [k] _raw_spin_lock_irq
- _raw_spin_lock_irq
- 100.00% wait_barrier
The reason is, in these I/O barrier related functions,
- raise_barrier()
- lower_barrier()
- wait_barrier()
- allow_barrier()
They always hold conf->resync_lock firstly, even there are only regular
reading I/Os and no resync I/O at all. This is a huge performance penalty.
The solution is a lockless-like algorithm in I/O barrier code, and only
holding conf->resync_lock when it has to.
The original idea is from Hannes Reinecke, and Neil Brown provides
comments to improve it. I continue to work on it, and make the patch into
current form.
In the new simpler raid1 I/O barrier implementation, there are two
wait barrier functions,
- wait_barrier()
Which calls _wait_barrier(), is used for regular write I/O. If there is
resync I/O happening on the same I/O barrier bucket, or the whole
array is frozen, task will wait until no barrier on same barrier bucket,
or the whold array is unfreezed.
- wait_read_barrier()
Since regular read I/O won't interfere with resync I/O (read_balance()
will make sure only uptodate data will be read out), it is unnecessary
to wait for barrier in regular read I/Os, waiting in only necessary
when the whole array is frozen.
The operations on conf->nr_pending[idx], conf->nr_waiting[idx], conf->
barrier[idx] are very carefully designed in raise_barrier(),
lower_barrier(), _wait_barrier() and wait_read_barrier(), in order to
avoid unnecessary spin locks in these functions. Once conf->
nr_pengding[idx] is increased, a resync I/O with same barrier bucket index
has to wait in raise_barrier(). Then in _wait_barrier() if no barrier
raised in same barrier bucket index and array is not frozen, the regular
I/O doesn't need to hold conf->resync_lock, it can just increase
conf->nr_pending[idx], and return to its caller. wait_read_barrier() is
very similar to _wait_barrier(), the only difference is it only waits when
array is frozen. For heavy parallel reading I/Os, the lockless I/O barrier
code almostly gets rid of all spin lock cost.
This patch significantly improves raid1 reading peroformance. From my
testing, a raid1 device built by two NVMe SSD, runs fio with 64KB
blocksize, 40 seq read I/O jobs, 128 iodepth, overall throughput
increases from 2.7GB/s to 4.6GB/s (+70%).
Changelog
V4:
- Change conf->nr_queued[] to atomic_t.
- Define BARRIER_BUCKETS_NR_BITS by (PAGE_SHIFT - ilog2(sizeof(atomic_t)))
V3:
- Add smp_mb__after_atomic() as Shaohua and Neil suggested.
- Change conf->nr_queued[] from atomic_t to int.
- Change conf->array_frozen from atomic_t back to int, and use
READ_ONCE(conf->array_frozen) to check value of conf->array_frozen
in _wait_barrier() and wait_read_barrier().
- In _wait_barrier() and wait_read_barrier(), add a call to
wake_up(&conf->wait_barrier) after atomic_dec(&conf->nr_pending[idx]),
to fix a deadlock between _wait_barrier()/wait_read_barrier and
freeze_array().
V2:
- Remove a spin_lock/unlock pair in raid1d().
- Add more code comments to explain why there is no racy when checking two
atomic_t variables at same time.
V1:
- Original RFC patch for comments.
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Shaohua Li <shli@fb.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:57 +00:00
|
|
|
/*
|
|
|
|
* Very similar to _wait_barrier(). The difference is, for read
|
|
|
|
* I/O we don't need wait for sync I/O, but if the whole array
|
|
|
|
* is frozen, the read I/O still has to wait until the array is
|
|
|
|
* unfrozen. Since there is no ordering requirement with
|
|
|
|
* conf->barrier[idx] here, memory barrier is unnecessary as well.
|
|
|
|
*/
|
|
|
|
atomic_inc(&conf->nr_pending[idx]);
|
raid1: Rewrite the implementation of iobarrier.
There is an iobarrier in raid1 because of contention between normal IO and
resync IO. It suspends all normal IO when resync/recovery happens.
However if normal IO is out side the resync window, there is no contention.
So this patch changes the barrier mechanism to only block IO that
could contend with the resync that is currently happening.
We partition the whole space into five parts.
|---------|-----------|------------|----------------|-------|
start next_resync start_next_window end_window
start + RESYNC_WINDOW = next_resync
next_resync + NEXT_NORMALIO_DISTANCE = start_next_window
start_next_window + NEXT_NORMALIO_DISTANCE = end_window
Firstly we introduce some concepts:
1 - RESYNC_WINDOW: For resync, there are 32 resync requests at most at the
same time. A sync request is RESYNC_BLOCK_SIZE(64*1024).
So the RESYNC_WINDOW is 32 * RESYNC_BLOCK_SIZE, that is 2MB.
2 - NEXT_NORMALIO_DISTANCE: the distance between next_resync
and start_next_window. It also indicates the distance between
start_next_window and end_window.
It is currently 3 * RESYNC_WINDOW_SIZE but could be tuned if
this turned out not to be optimal.
3 - next_resync: the next sector at which we will do sync IO.
4 - start: a position which is at most RESYNC_WINDOW before
next_resync.
5 - start_next_window: a position which is NEXT_NORMALIO_DISTANCE
beyond next_resync. Normal-io after this position doesn't need to
wait for resync-io to complete.
6 - end_window: a position which is 2 * NEXT_NORMALIO_DISTANCE beyond
next_resync. This also doesn't need to wait, but is counted
differently.
7 - current_window_requests: the count of normalIO between
start_next_window and end_window.
8 - next_window_requests: the count of normalIO after end_window.
NormalIO will be partitioned into four types:
NormIO1: the end sector of bio is smaller or equal the start
NormIO2: the start sector of bio larger or equal to end_window
NormIO3: the start sector of bio larger or equal to
start_next_window.
NormIO4: the location between start_next_window and end_window
|--------|-----------|--------------------|----------------|-------------|
| start | next_resync | start_next_window | end_window |
NormIO1 NormIO4 NormIO4 NormIO3 NormIO2
For NormIO1, we don't need any io barrier.
For NormIO4, we used a similar approach to the original iobarrier
mechanism. The normalIO and resyncIO must be kept separate.
For NormIO2/3, we add two fields to struct r1conf: "current_window_requests"
and "next_window_requests". They indicate the count of active
requests in the two window.
For these, we don't wait for resync io to complete.
For resync action, if there are NormIO4s, we must wait for it.
If not, we can proceed.
But if resync action reaches start_next_window and
current_window_requests > 0 (that is there are NormIO3s), we must
wait until the current_window_requests becomes zero.
When current_window_requests becomes zero, start_next_window also
moves forward. Then current_window_requests will replaced by
next_window_requests.
There is a problem which when and how to change from NormIO2 to
NormIO3. Only then can sync action progress.
We add a field in struct r1conf "start_next_window".
A: if start_next_window == MaxSector, it means there are no NormIO2/3.
So start_next_window = next_resync + NEXT_NORMALIO_DISTANCE
B: if current_window_requests == 0 && next_window_requests != 0, it
means start_next_window move to end_window
There is another problem which how to differentiate between
old NormIO2(now it is NormIO3) and NormIO2.
For example, there are many bios which are NormIO2 and a bio which is
NormIO3. NormIO3 firstly completed, so the bios of NormIO2 became NormIO3.
We add a field in struct r1bio "start_next_window".
This is used to record the position conf->start_next_window when the call
to wait_barrier() is made in make_request().
In allow_barrier(), we check the conf->start_next_window.
If r1bio->stat_next_window == conf->start_next_window, it means
there is no transition between NormIO2 and NormIO3.
If r1bio->start_next_window != conf->start_next_window, it mean
there was a transition between NormIO2 and NormIO3. There can only
have been one transition. So it only means the bio is old NormIO2.
For one bio, there may be many r1bio's. So we make sure
all the r1bio->start_next_window are the same value.
If we met blocked_dev in make_request(), it must call allow_barrier
and wait_barrier. So the former and the later value of
conf->start_next_window will be change.
If there are many r1bio's with differnet start_next_window,
for the relevant bio, it depend on the last value of r1bio.
It will cause error. To avoid this, we must wait for previous r1bios
to complete.
Signed-off-by: Jianpeng Ma <majianpeng@gmail.com>
Signed-off-by: NeilBrown <neilb@suse.de>
2013-11-15 06:55:02 +00:00
|
|
|
|
RAID1: avoid unnecessary spin locks in I/O barrier code
When I run a parallel reading performan testing on a md raid1 device with
two NVMe SSDs, I observe very bad throughput in supprise: by fio with 64KB
block size, 40 seq read I/O jobs, 128 iodepth, overall throughput is
only 2.7GB/s, this is around 50% of the idea performance number.
The perf reports locking contention happens at allow_barrier() and
wait_barrier() code,
- 41.41% fio [kernel.kallsyms] [k] _raw_spin_lock_irqsave
- _raw_spin_lock_irqsave
+ 89.92% allow_barrier
+ 9.34% __wake_up
- 37.30% fio [kernel.kallsyms] [k] _raw_spin_lock_irq
- _raw_spin_lock_irq
- 100.00% wait_barrier
The reason is, in these I/O barrier related functions,
- raise_barrier()
- lower_barrier()
- wait_barrier()
- allow_barrier()
They always hold conf->resync_lock firstly, even there are only regular
reading I/Os and no resync I/O at all. This is a huge performance penalty.
The solution is a lockless-like algorithm in I/O barrier code, and only
holding conf->resync_lock when it has to.
The original idea is from Hannes Reinecke, and Neil Brown provides
comments to improve it. I continue to work on it, and make the patch into
current form.
In the new simpler raid1 I/O barrier implementation, there are two
wait barrier functions,
- wait_barrier()
Which calls _wait_barrier(), is used for regular write I/O. If there is
resync I/O happening on the same I/O barrier bucket, or the whole
array is frozen, task will wait until no barrier on same barrier bucket,
or the whold array is unfreezed.
- wait_read_barrier()
Since regular read I/O won't interfere with resync I/O (read_balance()
will make sure only uptodate data will be read out), it is unnecessary
to wait for barrier in regular read I/Os, waiting in only necessary
when the whole array is frozen.
The operations on conf->nr_pending[idx], conf->nr_waiting[idx], conf->
barrier[idx] are very carefully designed in raise_barrier(),
lower_barrier(), _wait_barrier() and wait_read_barrier(), in order to
avoid unnecessary spin locks in these functions. Once conf->
nr_pengding[idx] is increased, a resync I/O with same barrier bucket index
has to wait in raise_barrier(). Then in _wait_barrier() if no barrier
raised in same barrier bucket index and array is not frozen, the regular
I/O doesn't need to hold conf->resync_lock, it can just increase
conf->nr_pending[idx], and return to its caller. wait_read_barrier() is
very similar to _wait_barrier(), the only difference is it only waits when
array is frozen. For heavy parallel reading I/Os, the lockless I/O barrier
code almostly gets rid of all spin lock cost.
This patch significantly improves raid1 reading peroformance. From my
testing, a raid1 device built by two NVMe SSD, runs fio with 64KB
blocksize, 40 seq read I/O jobs, 128 iodepth, overall throughput
increases from 2.7GB/s to 4.6GB/s (+70%).
Changelog
V4:
- Change conf->nr_queued[] to atomic_t.
- Define BARRIER_BUCKETS_NR_BITS by (PAGE_SHIFT - ilog2(sizeof(atomic_t)))
V3:
- Add smp_mb__after_atomic() as Shaohua and Neil suggested.
- Change conf->nr_queued[] from atomic_t to int.
- Change conf->array_frozen from atomic_t back to int, and use
READ_ONCE(conf->array_frozen) to check value of conf->array_frozen
in _wait_barrier() and wait_read_barrier().
- In _wait_barrier() and wait_read_barrier(), add a call to
wake_up(&conf->wait_barrier) after atomic_dec(&conf->nr_pending[idx]),
to fix a deadlock between _wait_barrier()/wait_read_barrier and
freeze_array().
V2:
- Remove a spin_lock/unlock pair in raid1d().
- Add more code comments to explain why there is no racy when checking two
atomic_t variables at same time.
V1:
- Original RFC patch for comments.
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Shaohua Li <shli@fb.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:57 +00:00
|
|
|
if (!READ_ONCE(conf->array_frozen))
|
|
|
|
return;
|
|
|
|
|
|
|
|
spin_lock_irq(&conf->resync_lock);
|
|
|
|
atomic_inc(&conf->nr_waiting[idx]);
|
|
|
|
atomic_dec(&conf->nr_pending[idx]);
|
|
|
|
/*
|
|
|
|
* In case freeze_array() is waiting for
|
|
|
|
* get_unqueued_pending() == extra
|
|
|
|
*/
|
|
|
|
wake_up(&conf->wait_barrier);
|
|
|
|
/* Wait for array to be unfrozen */
|
|
|
|
wait_event_lock_irq(conf->wait_barrier,
|
|
|
|
!conf->array_frozen,
|
|
|
|
conf->resync_lock);
|
|
|
|
atomic_inc(&conf->nr_pending[idx]);
|
|
|
|
atomic_dec(&conf->nr_waiting[idx]);
|
2005-04-16 22:20:36 +00:00
|
|
|
spin_unlock_irq(&conf->resync_lock);
|
|
|
|
}
|
|
|
|
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
static void wait_barrier(struct r1conf *conf, sector_t sector_nr)
|
2006-01-06 08:20:12 +00:00
|
|
|
{
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
int idx = sector_to_idx(sector_nr);
|
raid1: Rewrite the implementation of iobarrier.
There is an iobarrier in raid1 because of contention between normal IO and
resync IO. It suspends all normal IO when resync/recovery happens.
However if normal IO is out side the resync window, there is no contention.
So this patch changes the barrier mechanism to only block IO that
could contend with the resync that is currently happening.
We partition the whole space into five parts.
|---------|-----------|------------|----------------|-------|
start next_resync start_next_window end_window
start + RESYNC_WINDOW = next_resync
next_resync + NEXT_NORMALIO_DISTANCE = start_next_window
start_next_window + NEXT_NORMALIO_DISTANCE = end_window
Firstly we introduce some concepts:
1 - RESYNC_WINDOW: For resync, there are 32 resync requests at most at the
same time. A sync request is RESYNC_BLOCK_SIZE(64*1024).
So the RESYNC_WINDOW is 32 * RESYNC_BLOCK_SIZE, that is 2MB.
2 - NEXT_NORMALIO_DISTANCE: the distance between next_resync
and start_next_window. It also indicates the distance between
start_next_window and end_window.
It is currently 3 * RESYNC_WINDOW_SIZE but could be tuned if
this turned out not to be optimal.
3 - next_resync: the next sector at which we will do sync IO.
4 - start: a position which is at most RESYNC_WINDOW before
next_resync.
5 - start_next_window: a position which is NEXT_NORMALIO_DISTANCE
beyond next_resync. Normal-io after this position doesn't need to
wait for resync-io to complete.
6 - end_window: a position which is 2 * NEXT_NORMALIO_DISTANCE beyond
next_resync. This also doesn't need to wait, but is counted
differently.
7 - current_window_requests: the count of normalIO between
start_next_window and end_window.
8 - next_window_requests: the count of normalIO after end_window.
NormalIO will be partitioned into four types:
NormIO1: the end sector of bio is smaller or equal the start
NormIO2: the start sector of bio larger or equal to end_window
NormIO3: the start sector of bio larger or equal to
start_next_window.
NormIO4: the location between start_next_window and end_window
|--------|-----------|--------------------|----------------|-------------|
| start | next_resync | start_next_window | end_window |
NormIO1 NormIO4 NormIO4 NormIO3 NormIO2
For NormIO1, we don't need any io barrier.
For NormIO4, we used a similar approach to the original iobarrier
mechanism. The normalIO and resyncIO must be kept separate.
For NormIO2/3, we add two fields to struct r1conf: "current_window_requests"
and "next_window_requests". They indicate the count of active
requests in the two window.
For these, we don't wait for resync io to complete.
For resync action, if there are NormIO4s, we must wait for it.
If not, we can proceed.
But if resync action reaches start_next_window and
current_window_requests > 0 (that is there are NormIO3s), we must
wait until the current_window_requests becomes zero.
When current_window_requests becomes zero, start_next_window also
moves forward. Then current_window_requests will replaced by
next_window_requests.
There is a problem which when and how to change from NormIO2 to
NormIO3. Only then can sync action progress.
We add a field in struct r1conf "start_next_window".
A: if start_next_window == MaxSector, it means there are no NormIO2/3.
So start_next_window = next_resync + NEXT_NORMALIO_DISTANCE
B: if current_window_requests == 0 && next_window_requests != 0, it
means start_next_window move to end_window
There is another problem which how to differentiate between
old NormIO2(now it is NormIO3) and NormIO2.
For example, there are many bios which are NormIO2 and a bio which is
NormIO3. NormIO3 firstly completed, so the bios of NormIO2 became NormIO3.
We add a field in struct r1bio "start_next_window".
This is used to record the position conf->start_next_window when the call
to wait_barrier() is made in make_request().
In allow_barrier(), we check the conf->start_next_window.
If r1bio->stat_next_window == conf->start_next_window, it means
there is no transition between NormIO2 and NormIO3.
If r1bio->start_next_window != conf->start_next_window, it mean
there was a transition between NormIO2 and NormIO3. There can only
have been one transition. So it only means the bio is old NormIO2.
For one bio, there may be many r1bio's. So we make sure
all the r1bio->start_next_window are the same value.
If we met blocked_dev in make_request(), it must call allow_barrier
and wait_barrier. So the former and the later value of
conf->start_next_window will be change.
If there are many r1bio's with differnet start_next_window,
for the relevant bio, it depend on the last value of r1bio.
It will cause error. To avoid this, we must wait for previous r1bios
to complete.
Signed-off-by: Jianpeng Ma <majianpeng@gmail.com>
Signed-off-by: NeilBrown <neilb@suse.de>
2013-11-15 06:55:02 +00:00
|
|
|
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
_wait_barrier(conf, idx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void _allow_barrier(struct r1conf *conf, int idx)
|
2006-01-06 08:20:12 +00:00
|
|
|
{
|
RAID1: avoid unnecessary spin locks in I/O barrier code
When I run a parallel reading performan testing on a md raid1 device with
two NVMe SSDs, I observe very bad throughput in supprise: by fio with 64KB
block size, 40 seq read I/O jobs, 128 iodepth, overall throughput is
only 2.7GB/s, this is around 50% of the idea performance number.
The perf reports locking contention happens at allow_barrier() and
wait_barrier() code,
- 41.41% fio [kernel.kallsyms] [k] _raw_spin_lock_irqsave
- _raw_spin_lock_irqsave
+ 89.92% allow_barrier
+ 9.34% __wake_up
- 37.30% fio [kernel.kallsyms] [k] _raw_spin_lock_irq
- _raw_spin_lock_irq
- 100.00% wait_barrier
The reason is, in these I/O barrier related functions,
- raise_barrier()
- lower_barrier()
- wait_barrier()
- allow_barrier()
They always hold conf->resync_lock firstly, even there are only regular
reading I/Os and no resync I/O at all. This is a huge performance penalty.
The solution is a lockless-like algorithm in I/O barrier code, and only
holding conf->resync_lock when it has to.
The original idea is from Hannes Reinecke, and Neil Brown provides
comments to improve it. I continue to work on it, and make the patch into
current form.
In the new simpler raid1 I/O barrier implementation, there are two
wait barrier functions,
- wait_barrier()
Which calls _wait_barrier(), is used for regular write I/O. If there is
resync I/O happening on the same I/O barrier bucket, or the whole
array is frozen, task will wait until no barrier on same barrier bucket,
or the whold array is unfreezed.
- wait_read_barrier()
Since regular read I/O won't interfere with resync I/O (read_balance()
will make sure only uptodate data will be read out), it is unnecessary
to wait for barrier in regular read I/Os, waiting in only necessary
when the whole array is frozen.
The operations on conf->nr_pending[idx], conf->nr_waiting[idx], conf->
barrier[idx] are very carefully designed in raise_barrier(),
lower_barrier(), _wait_barrier() and wait_read_barrier(), in order to
avoid unnecessary spin locks in these functions. Once conf->
nr_pengding[idx] is increased, a resync I/O with same barrier bucket index
has to wait in raise_barrier(). Then in _wait_barrier() if no barrier
raised in same barrier bucket index and array is not frozen, the regular
I/O doesn't need to hold conf->resync_lock, it can just increase
conf->nr_pending[idx], and return to its caller. wait_read_barrier() is
very similar to _wait_barrier(), the only difference is it only waits when
array is frozen. For heavy parallel reading I/Os, the lockless I/O barrier
code almostly gets rid of all spin lock cost.
This patch significantly improves raid1 reading peroformance. From my
testing, a raid1 device built by two NVMe SSD, runs fio with 64KB
blocksize, 40 seq read I/O jobs, 128 iodepth, overall throughput
increases from 2.7GB/s to 4.6GB/s (+70%).
Changelog
V4:
- Change conf->nr_queued[] to atomic_t.
- Define BARRIER_BUCKETS_NR_BITS by (PAGE_SHIFT - ilog2(sizeof(atomic_t)))
V3:
- Add smp_mb__after_atomic() as Shaohua and Neil suggested.
- Change conf->nr_queued[] from atomic_t to int.
- Change conf->array_frozen from atomic_t back to int, and use
READ_ONCE(conf->array_frozen) to check value of conf->array_frozen
in _wait_barrier() and wait_read_barrier().
- In _wait_barrier() and wait_read_barrier(), add a call to
wake_up(&conf->wait_barrier) after atomic_dec(&conf->nr_pending[idx]),
to fix a deadlock between _wait_barrier()/wait_read_barrier and
freeze_array().
V2:
- Remove a spin_lock/unlock pair in raid1d().
- Add more code comments to explain why there is no racy when checking two
atomic_t variables at same time.
V1:
- Original RFC patch for comments.
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Shaohua Li <shli@fb.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:57 +00:00
|
|
|
atomic_dec(&conf->nr_pending[idx]);
|
2006-01-06 08:20:12 +00:00
|
|
|
wake_up(&conf->wait_barrier);
|
|
|
|
}
|
|
|
|
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
static void allow_barrier(struct r1conf *conf, sector_t sector_nr)
|
|
|
|
{
|
|
|
|
int idx = sector_to_idx(sector_nr);
|
|
|
|
|
|
|
|
_allow_barrier(conf, idx);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* conf->resync_lock should be held */
|
|
|
|
static int get_unqueued_pending(struct r1conf *conf)
|
|
|
|
{
|
|
|
|
int idx, ret;
|
|
|
|
|
2017-04-27 08:28:49 +00:00
|
|
|
ret = atomic_read(&conf->nr_sync_pending);
|
|
|
|
for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++)
|
RAID1: avoid unnecessary spin locks in I/O barrier code
When I run a parallel reading performan testing on a md raid1 device with
two NVMe SSDs, I observe very bad throughput in supprise: by fio with 64KB
block size, 40 seq read I/O jobs, 128 iodepth, overall throughput is
only 2.7GB/s, this is around 50% of the idea performance number.
The perf reports locking contention happens at allow_barrier() and
wait_barrier() code,
- 41.41% fio [kernel.kallsyms] [k] _raw_spin_lock_irqsave
- _raw_spin_lock_irqsave
+ 89.92% allow_barrier
+ 9.34% __wake_up
- 37.30% fio [kernel.kallsyms] [k] _raw_spin_lock_irq
- _raw_spin_lock_irq
- 100.00% wait_barrier
The reason is, in these I/O barrier related functions,
- raise_barrier()
- lower_barrier()
- wait_barrier()
- allow_barrier()
They always hold conf->resync_lock firstly, even there are only regular
reading I/Os and no resync I/O at all. This is a huge performance penalty.
The solution is a lockless-like algorithm in I/O barrier code, and only
holding conf->resync_lock when it has to.
The original idea is from Hannes Reinecke, and Neil Brown provides
comments to improve it. I continue to work on it, and make the patch into
current form.
In the new simpler raid1 I/O barrier implementation, there are two
wait barrier functions,
- wait_barrier()
Which calls _wait_barrier(), is used for regular write I/O. If there is
resync I/O happening on the same I/O barrier bucket, or the whole
array is frozen, task will wait until no barrier on same barrier bucket,
or the whold array is unfreezed.
- wait_read_barrier()
Since regular read I/O won't interfere with resync I/O (read_balance()
will make sure only uptodate data will be read out), it is unnecessary
to wait for barrier in regular read I/Os, waiting in only necessary
when the whole array is frozen.
The operations on conf->nr_pending[idx], conf->nr_waiting[idx], conf->
barrier[idx] are very carefully designed in raise_barrier(),
lower_barrier(), _wait_barrier() and wait_read_barrier(), in order to
avoid unnecessary spin locks in these functions. Once conf->
nr_pengding[idx] is increased, a resync I/O with same barrier bucket index
has to wait in raise_barrier(). Then in _wait_barrier() if no barrier
raised in same barrier bucket index and array is not frozen, the regular
I/O doesn't need to hold conf->resync_lock, it can just increase
conf->nr_pending[idx], and return to its caller. wait_read_barrier() is
very similar to _wait_barrier(), the only difference is it only waits when
array is frozen. For heavy parallel reading I/Os, the lockless I/O barrier
code almostly gets rid of all spin lock cost.
This patch significantly improves raid1 reading peroformance. From my
testing, a raid1 device built by two NVMe SSD, runs fio with 64KB
blocksize, 40 seq read I/O jobs, 128 iodepth, overall throughput
increases from 2.7GB/s to 4.6GB/s (+70%).
Changelog
V4:
- Change conf->nr_queued[] to atomic_t.
- Define BARRIER_BUCKETS_NR_BITS by (PAGE_SHIFT - ilog2(sizeof(atomic_t)))
V3:
- Add smp_mb__after_atomic() as Shaohua and Neil suggested.
- Change conf->nr_queued[] from atomic_t to int.
- Change conf->array_frozen from atomic_t back to int, and use
READ_ONCE(conf->array_frozen) to check value of conf->array_frozen
in _wait_barrier() and wait_read_barrier().
- In _wait_barrier() and wait_read_barrier(), add a call to
wake_up(&conf->wait_barrier) after atomic_dec(&conf->nr_pending[idx]),
to fix a deadlock between _wait_barrier()/wait_read_barrier and
freeze_array().
V2:
- Remove a spin_lock/unlock pair in raid1d().
- Add more code comments to explain why there is no racy when checking two
atomic_t variables at same time.
V1:
- Original RFC patch for comments.
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Shaohua Li <shli@fb.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:57 +00:00
|
|
|
ret += atomic_read(&conf->nr_pending[idx]) -
|
|
|
|
atomic_read(&conf->nr_queued[idx]);
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-06-12 01:01:22 +00:00
|
|
|
static void freeze_array(struct r1conf *conf, int extra)
|
2006-01-06 08:20:19 +00:00
|
|
|
{
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
/* Stop sync I/O and normal I/O and wait for everything to
|
2017-03-14 07:52:26 +00:00
|
|
|
* go quiet.
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
* This is called in two situations:
|
|
|
|
* 1) management command handlers (reshape, remove disk, quiesce).
|
|
|
|
* 2) one normal I/O request failed.
|
|
|
|
|
|
|
|
* After array_frozen is set to 1, new sync IO will be blocked at
|
|
|
|
* raise_barrier(), and new normal I/O will blocked at _wait_barrier()
|
|
|
|
* or wait_read_barrier(). The flying I/Os will either complete or be
|
|
|
|
* queued. When everything goes quite, there are only queued I/Os left.
|
|
|
|
|
|
|
|
* Every flying I/O contributes to a conf->nr_pending[idx], idx is the
|
|
|
|
* barrier bucket index which this I/O request hits. When all sync and
|
|
|
|
* normal I/O are queued, sum of all conf->nr_pending[] will match sum
|
|
|
|
* of all conf->nr_queued[]. But normal I/O failure is an exception,
|
|
|
|
* in handle_read_error(), we may call freeze_array() before trying to
|
|
|
|
* fix the read error. In this case, the error read I/O is not queued,
|
|
|
|
* so get_unqueued_pending() == 1.
|
|
|
|
*
|
|
|
|
* Therefore before this function returns, we need to wait until
|
|
|
|
* get_unqueued_pendings(conf) gets equal to extra. For
|
|
|
|
* normal I/O context, extra is 1, in rested situations extra is 0.
|
2006-01-06 08:20:19 +00:00
|
|
|
*/
|
|
|
|
spin_lock_irq(&conf->resync_lock);
|
2013-11-14 04:16:18 +00:00
|
|
|
conf->array_frozen = 1;
|
2016-11-14 05:30:21 +00:00
|
|
|
raid1_log(conf->mddev, "wait freeze");
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
wait_event_lock_irq_cmd(
|
|
|
|
conf->wait_barrier,
|
|
|
|
get_unqueued_pending(conf) == extra,
|
|
|
|
conf->resync_lock,
|
|
|
|
flush_pending_writes(conf));
|
2006-01-06 08:20:19 +00:00
|
|
|
spin_unlock_irq(&conf->resync_lock);
|
|
|
|
}
|
2011-10-11 05:49:05 +00:00
|
|
|
static void unfreeze_array(struct r1conf *conf)
|
2006-01-06 08:20:19 +00:00
|
|
|
{
|
|
|
|
/* reverse the effect of the freeze */
|
|
|
|
spin_lock_irq(&conf->resync_lock);
|
2013-11-14 04:16:18 +00:00
|
|
|
conf->array_frozen = 0;
|
2006-01-06 08:20:19 +00:00
|
|
|
spin_unlock_irq(&conf->resync_lock);
|
RAID1: avoid unnecessary spin locks in I/O barrier code
When I run a parallel reading performan testing on a md raid1 device with
two NVMe SSDs, I observe very bad throughput in supprise: by fio with 64KB
block size, 40 seq read I/O jobs, 128 iodepth, overall throughput is
only 2.7GB/s, this is around 50% of the idea performance number.
The perf reports locking contention happens at allow_barrier() and
wait_barrier() code,
- 41.41% fio [kernel.kallsyms] [k] _raw_spin_lock_irqsave
- _raw_spin_lock_irqsave
+ 89.92% allow_barrier
+ 9.34% __wake_up
- 37.30% fio [kernel.kallsyms] [k] _raw_spin_lock_irq
- _raw_spin_lock_irq
- 100.00% wait_barrier
The reason is, in these I/O barrier related functions,
- raise_barrier()
- lower_barrier()
- wait_barrier()
- allow_barrier()
They always hold conf->resync_lock firstly, even there are only regular
reading I/Os and no resync I/O at all. This is a huge performance penalty.
The solution is a lockless-like algorithm in I/O barrier code, and only
holding conf->resync_lock when it has to.
The original idea is from Hannes Reinecke, and Neil Brown provides
comments to improve it. I continue to work on it, and make the patch into
current form.
In the new simpler raid1 I/O barrier implementation, there are two
wait barrier functions,
- wait_barrier()
Which calls _wait_barrier(), is used for regular write I/O. If there is
resync I/O happening on the same I/O barrier bucket, or the whole
array is frozen, task will wait until no barrier on same barrier bucket,
or the whold array is unfreezed.
- wait_read_barrier()
Since regular read I/O won't interfere with resync I/O (read_balance()
will make sure only uptodate data will be read out), it is unnecessary
to wait for barrier in regular read I/Os, waiting in only necessary
when the whole array is frozen.
The operations on conf->nr_pending[idx], conf->nr_waiting[idx], conf->
barrier[idx] are very carefully designed in raise_barrier(),
lower_barrier(), _wait_barrier() and wait_read_barrier(), in order to
avoid unnecessary spin locks in these functions. Once conf->
nr_pengding[idx] is increased, a resync I/O with same barrier bucket index
has to wait in raise_barrier(). Then in _wait_barrier() if no barrier
raised in same barrier bucket index and array is not frozen, the regular
I/O doesn't need to hold conf->resync_lock, it can just increase
conf->nr_pending[idx], and return to its caller. wait_read_barrier() is
very similar to _wait_barrier(), the only difference is it only waits when
array is frozen. For heavy parallel reading I/Os, the lockless I/O barrier
code almostly gets rid of all spin lock cost.
This patch significantly improves raid1 reading peroformance. From my
testing, a raid1 device built by two NVMe SSD, runs fio with 64KB
blocksize, 40 seq read I/O jobs, 128 iodepth, overall throughput
increases from 2.7GB/s to 4.6GB/s (+70%).
Changelog
V4:
- Change conf->nr_queued[] to atomic_t.
- Define BARRIER_BUCKETS_NR_BITS by (PAGE_SHIFT - ilog2(sizeof(atomic_t)))
V3:
- Add smp_mb__after_atomic() as Shaohua and Neil suggested.
- Change conf->nr_queued[] from atomic_t to int.
- Change conf->array_frozen from atomic_t back to int, and use
READ_ONCE(conf->array_frozen) to check value of conf->array_frozen
in _wait_barrier() and wait_read_barrier().
- In _wait_barrier() and wait_read_barrier(), add a call to
wake_up(&conf->wait_barrier) after atomic_dec(&conf->nr_pending[idx]),
to fix a deadlock between _wait_barrier()/wait_read_barrier and
freeze_array().
V2:
- Remove a spin_lock/unlock pair in raid1d().
- Add more code comments to explain why there is no racy when checking two
atomic_t variables at same time.
V1:
- Original RFC patch for comments.
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Shaohua Li <shli@fb.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:57 +00:00
|
|
|
wake_up(&conf->wait_barrier);
|
2006-01-06 08:20:19 +00:00
|
|
|
}
|
|
|
|
|
2017-07-17 21:33:48 +00:00
|
|
|
static void alloc_behind_master_bio(struct r1bio *r1_bio,
|
2017-04-05 04:05:50 +00:00
|
|
|
struct bio *bio)
|
2005-09-09 23:23:47 +00:00
|
|
|
{
|
2017-04-05 04:05:50 +00:00
|
|
|
int size = bio->bi_iter.bi_size;
|
2017-03-16 16:12:31 +00:00
|
|
|
unsigned vcnt = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
|
|
|
int i = 0;
|
|
|
|
struct bio *behind_bio = NULL;
|
|
|
|
|
|
|
|
behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev);
|
|
|
|
if (!behind_bio)
|
2017-07-17 21:33:48 +00:00
|
|
|
return;
|
2005-09-09 23:23:47 +00:00
|
|
|
|
2017-03-24 22:20:47 +00:00
|
|
|
/* discard op, we don't support writezero/writesame yet */
|
2017-07-17 21:33:48 +00:00
|
|
|
if (!bio_has_data(bio)) {
|
|
|
|
behind_bio->bi_iter.bi_size = size;
|
2017-03-24 22:20:47 +00:00
|
|
|
goto skip_copy;
|
2017-07-17 21:33:48 +00:00
|
|
|
}
|
2017-03-24 22:20:47 +00:00
|
|
|
|
2018-02-14 13:23:30 +00:00
|
|
|
behind_bio->bi_write_hint = bio->bi_write_hint;
|
|
|
|
|
2017-03-16 16:12:31 +00:00
|
|
|
while (i < vcnt && size) {
|
|
|
|
struct page *page;
|
|
|
|
int len = min_t(int, PAGE_SIZE, size);
|
|
|
|
|
|
|
|
page = alloc_page(GFP_NOIO);
|
|
|
|
if (unlikely(!page))
|
|
|
|
goto free_pages;
|
|
|
|
|
|
|
|
bio_add_page(behind_bio, page, len, 0);
|
|
|
|
|
|
|
|
size -= len;
|
|
|
|
i++;
|
2005-09-09 23:23:47 +00:00
|
|
|
}
|
2017-03-16 16:12:31 +00:00
|
|
|
|
2017-04-05 04:05:50 +00:00
|
|
|
bio_copy_data(behind_bio, bio);
|
2017-03-24 22:20:47 +00:00
|
|
|
skip_copy:
|
2018-01-17 13:38:02 +00:00
|
|
|
r1_bio->behind_master_bio = behind_bio;
|
2011-05-11 04:51:19 +00:00
|
|
|
set_bit(R1BIO_BehindIO, &r1_bio->state);
|
2005-09-09 23:23:47 +00:00
|
|
|
|
2017-07-17 21:33:48 +00:00
|
|
|
return;
|
2017-03-16 16:12:31 +00:00
|
|
|
|
|
|
|
free_pages:
|
2013-10-11 22:44:27 +00:00
|
|
|
pr_debug("%dB behind alloc failed, doing sync I/O\n",
|
|
|
|
bio->bi_iter.bi_size);
|
2017-03-16 16:12:31 +00:00
|
|
|
bio_free_pages(behind_bio);
|
2017-07-17 21:33:48 +00:00
|
|
|
bio_put(behind_bio);
|
2005-09-09 23:23:47 +00:00
|
|
|
}
|
|
|
|
|
2012-08-01 22:33:20 +00:00
|
|
|
struct raid1_plug_cb {
|
|
|
|
struct blk_plug_cb cb;
|
|
|
|
struct bio_list pending;
|
|
|
|
int pending_cnt;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
|
|
|
|
{
|
|
|
|
struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
|
|
|
|
cb);
|
|
|
|
struct mddev *mddev = plug->cb.data;
|
|
|
|
struct r1conf *conf = mddev->private;
|
|
|
|
struct bio *bio;
|
|
|
|
|
2012-11-27 01:14:40 +00:00
|
|
|
if (from_schedule || current->bio_list) {
|
2012-08-01 22:33:20 +00:00
|
|
|
spin_lock_irq(&conf->device_lock);
|
|
|
|
bio_list_merge(&conf->pending_bio_list, &plug->pending);
|
|
|
|
conf->pending_count += plug->pending_cnt;
|
|
|
|
spin_unlock_irq(&conf->device_lock);
|
2013-02-25 01:38:29 +00:00
|
|
|
wake_up(&conf->wait_barrier);
|
2012-08-01 22:33:20 +00:00
|
|
|
md_wakeup_thread(mddev->thread);
|
|
|
|
kfree(plug);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* we aren't scheduling, so we can do the write-out directly. */
|
|
|
|
bio = bio_list_get(&plug->pending);
|
2017-04-05 04:05:51 +00:00
|
|
|
flush_bio_list(conf, bio);
|
2012-08-01 22:33:20 +00:00
|
|
|
kfree(plug);
|
|
|
|
}
|
|
|
|
|
md/raid1: simplify handle_read_error().
handle_read_error() duplicates a lot of the work that raid1_read_request()
does, so it makes sense to just use that function.
This doesn't quite work as handle_read_error() relies on the same r1bio
being re-used so that, in the case of a read-only array, setting
IO_BLOCKED in r1bio->bios[] ensures read_balance() won't re-use
that device.
So we need to allow a r1bio to be passed to raid1_read_request(), and to
have that function mostly initialise the r1bio, but leave the bios[]
array untouched.
Two parts of handle_read_error() that need to be preserved are the warning
message it prints, so they are conditionally added to raid1_read_request().
Note that this highlights a minor bug on alloc_r1bio(). It doesn't
initalise the bios[] array, so it is possible that old content is there,
which might cause read_balance() to ignore some devices with no good reason.
With this change, we no longer need inc_pending(), or the sectors_handled
arg to alloc_r1bio().
As handle_read_error() is called from raid1d() and allocates memory,
there is tiny chance of a deadlock. All element of various pools
could be queued waiting for raid1 to handle them, and there may be no
extra memory free.
Achieving guaranteed forward progress would probably require a second
thread and another mempool. Instead of that complexity, add
__GFP_HIGH to any allocations when read1_read_request() is called
from raid1d.
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-04-05 04:05:50 +00:00
|
|
|
static void init_r1bio(struct r1bio *r1_bio, struct mddev *mddev, struct bio *bio)
|
|
|
|
{
|
|
|
|
r1_bio->master_bio = bio;
|
|
|
|
r1_bio->sectors = bio_sectors(bio);
|
|
|
|
r1_bio->state = 0;
|
|
|
|
r1_bio->mddev = mddev;
|
|
|
|
r1_bio->sector = bio->bi_iter.bi_sector;
|
|
|
|
}
|
|
|
|
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
static inline struct r1bio *
|
md/raid1: simplify handle_read_error().
handle_read_error() duplicates a lot of the work that raid1_read_request()
does, so it makes sense to just use that function.
This doesn't quite work as handle_read_error() relies on the same r1bio
being re-used so that, in the case of a read-only array, setting
IO_BLOCKED in r1bio->bios[] ensures read_balance() won't re-use
that device.
So we need to allow a r1bio to be passed to raid1_read_request(), and to
have that function mostly initialise the r1bio, but leave the bios[]
array untouched.
Two parts of handle_read_error() that need to be preserved are the warning
message it prints, so they are conditionally added to raid1_read_request().
Note that this highlights a minor bug on alloc_r1bio(). It doesn't
initalise the bios[] array, so it is possible that old content is there,
which might cause read_balance() to ignore some devices with no good reason.
With this change, we no longer need inc_pending(), or the sectors_handled
arg to alloc_r1bio().
As handle_read_error() is called from raid1d() and allocates memory,
there is tiny chance of a deadlock. All element of various pools
could be queued waiting for raid1 to handle them, and there may be no
extra memory free.
Achieving guaranteed forward progress would probably require a second
thread and another mempool. Instead of that complexity, add
__GFP_HIGH to any allocations when read1_read_request() is called
from raid1d.
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-04-05 04:05:50 +00:00
|
|
|
alloc_r1bio(struct mddev *mddev, struct bio *bio)
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
{
|
|
|
|
struct r1conf *conf = mddev->private;
|
|
|
|
struct r1bio *r1_bio;
|
|
|
|
|
2018-05-20 22:25:52 +00:00
|
|
|
r1_bio = mempool_alloc(&conf->r1bio_pool, GFP_NOIO);
|
md/raid1: simplify handle_read_error().
handle_read_error() duplicates a lot of the work that raid1_read_request()
does, so it makes sense to just use that function.
This doesn't quite work as handle_read_error() relies on the same r1bio
being re-used so that, in the case of a read-only array, setting
IO_BLOCKED in r1bio->bios[] ensures read_balance() won't re-use
that device.
So we need to allow a r1bio to be passed to raid1_read_request(), and to
have that function mostly initialise the r1bio, but leave the bios[]
array untouched.
Two parts of handle_read_error() that need to be preserved are the warning
message it prints, so they are conditionally added to raid1_read_request().
Note that this highlights a minor bug on alloc_r1bio(). It doesn't
initalise the bios[] array, so it is possible that old content is there,
which might cause read_balance() to ignore some devices with no good reason.
With this change, we no longer need inc_pending(), or the sectors_handled
arg to alloc_r1bio().
As handle_read_error() is called from raid1d() and allocates memory,
there is tiny chance of a deadlock. All element of various pools
could be queued waiting for raid1 to handle them, and there may be no
extra memory free.
Achieving guaranteed forward progress would probably require a second
thread and another mempool. Instead of that complexity, add
__GFP_HIGH to any allocations when read1_read_request() is called
from raid1d.
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-04-05 04:05:50 +00:00
|
|
|
/* Ensure no bio records IO_BLOCKED */
|
|
|
|
memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0]));
|
|
|
|
init_r1bio(r1_bio, mddev, bio);
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
return r1_bio;
|
|
|
|
}
|
|
|
|
|
md/raid1: simplify the splitting of requests.
raid1 currently splits requests in two different ways for
two different reasons.
First, bio_split() is used to ensure the bio fits within a
resync accounting region.
Second, multiple r1bios are allocated for each bio to handle
the possiblity of known bad blocks on some devices.
This can be simplified to just use bio_split() once, and not
use multiple r1bios.
We delay the split until we know a maximum bio size that can
be handled with a single r1bio, and then split the bio and
queue the remainder for later handling.
This avoids all loops inside raid1.c request handling. Just
a single read, or a single set of writes, is submitted to
lower-level devices for each bio that comes from
generic_make_request().
When the bio needs to be split, generic_make_request() will
do the necessary looping and call md_make_request() multiple
times.
raid1_make_request() no longer queues request for raid1 to handle,
so we can remove that branch from the 'if'.
This patch also creates a new private bio_set
(conf->bio_split) for splitting bios. Using fs_bio_set
is wrong, as it is meant to be used by filesystems, not
block devices. Using it inside md can lead to deadlocks
under high memory pressure.
Delete unused variable in raid1_write_request() (Shaohua)
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-04-05 04:05:50 +00:00
|
|
|
static void raid1_read_request(struct mddev *mddev, struct bio *bio,
|
md/raid1: simplify handle_read_error().
handle_read_error() duplicates a lot of the work that raid1_read_request()
does, so it makes sense to just use that function.
This doesn't quite work as handle_read_error() relies on the same r1bio
being re-used so that, in the case of a read-only array, setting
IO_BLOCKED in r1bio->bios[] ensures read_balance() won't re-use
that device.
So we need to allow a r1bio to be passed to raid1_read_request(), and to
have that function mostly initialise the r1bio, but leave the bios[]
array untouched.
Two parts of handle_read_error() that need to be preserved are the warning
message it prints, so they are conditionally added to raid1_read_request().
Note that this highlights a minor bug on alloc_r1bio(). It doesn't
initalise the bios[] array, so it is possible that old content is there,
which might cause read_balance() to ignore some devices with no good reason.
With this change, we no longer need inc_pending(), or the sectors_handled
arg to alloc_r1bio().
As handle_read_error() is called from raid1d() and allocates memory,
there is tiny chance of a deadlock. All element of various pools
could be queued waiting for raid1 to handle them, and there may be no
extra memory free.
Achieving guaranteed forward progress would probably require a second
thread and another mempool. Instead of that complexity, add
__GFP_HIGH to any allocations when read1_read_request() is called
from raid1d.
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-04-05 04:05:50 +00:00
|
|
|
int max_read_sectors, struct r1bio *r1_bio)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2011-10-11 05:49:05 +00:00
|
|
|
struct r1conf *conf = mddev->private;
|
2012-07-31 00:03:52 +00:00
|
|
|
struct raid1_info *mirror;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct bio *read_bio;
|
2016-12-05 20:02:57 +00:00
|
|
|
struct bitmap *bitmap = mddev->bitmap;
|
|
|
|
const int op = bio_op(bio);
|
|
|
|
const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
|
|
|
|
int max_sectors;
|
|
|
|
int rdisk;
|
md/raid1: simplify handle_read_error().
handle_read_error() duplicates a lot of the work that raid1_read_request()
does, so it makes sense to just use that function.
This doesn't quite work as handle_read_error() relies on the same r1bio
being re-used so that, in the case of a read-only array, setting
IO_BLOCKED in r1bio->bios[] ensures read_balance() won't re-use
that device.
So we need to allow a r1bio to be passed to raid1_read_request(), and to
have that function mostly initialise the r1bio, but leave the bios[]
array untouched.
Two parts of handle_read_error() that need to be preserved are the warning
message it prints, so they are conditionally added to raid1_read_request().
Note that this highlights a minor bug on alloc_r1bio(). It doesn't
initalise the bios[] array, so it is possible that old content is there,
which might cause read_balance() to ignore some devices with no good reason.
With this change, we no longer need inc_pending(), or the sectors_handled
arg to alloc_r1bio().
As handle_read_error() is called from raid1d() and allocates memory,
there is tiny chance of a deadlock. All element of various pools
could be queued waiting for raid1 to handle them, and there may be no
extra memory free.
Achieving guaranteed forward progress would probably require a second
thread and another mempool. Instead of that complexity, add
__GFP_HIGH to any allocations when read1_read_request() is called
from raid1d.
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-04-05 04:05:50 +00:00
|
|
|
bool print_msg = !!r1_bio;
|
|
|
|
char b[BDEVNAME_SIZE];
|
2016-12-05 20:02:57 +00:00
|
|
|
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
/*
|
md/raid1: simplify handle_read_error().
handle_read_error() duplicates a lot of the work that raid1_read_request()
does, so it makes sense to just use that function.
This doesn't quite work as handle_read_error() relies on the same r1bio
being re-used so that, in the case of a read-only array, setting
IO_BLOCKED in r1bio->bios[] ensures read_balance() won't re-use
that device.
So we need to allow a r1bio to be passed to raid1_read_request(), and to
have that function mostly initialise the r1bio, but leave the bios[]
array untouched.
Two parts of handle_read_error() that need to be preserved are the warning
message it prints, so they are conditionally added to raid1_read_request().
Note that this highlights a minor bug on alloc_r1bio(). It doesn't
initalise the bios[] array, so it is possible that old content is there,
which might cause read_balance() to ignore some devices with no good reason.
With this change, we no longer need inc_pending(), or the sectors_handled
arg to alloc_r1bio().
As handle_read_error() is called from raid1d() and allocates memory,
there is tiny chance of a deadlock. All element of various pools
could be queued waiting for raid1 to handle them, and there may be no
extra memory free.
Achieving guaranteed forward progress would probably require a second
thread and another mempool. Instead of that complexity, add
__GFP_HIGH to any allocations when read1_read_request() is called
from raid1d.
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-04-05 04:05:50 +00:00
|
|
|
* If r1_bio is set, we are blocking the raid1d thread
|
|
|
|
* so there is a tiny risk of deadlock. So ask for
|
|
|
|
* emergency memory if needed.
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
*/
|
md/raid1: simplify handle_read_error().
handle_read_error() duplicates a lot of the work that raid1_read_request()
does, so it makes sense to just use that function.
This doesn't quite work as handle_read_error() relies on the same r1bio
being re-used so that, in the case of a read-only array, setting
IO_BLOCKED in r1bio->bios[] ensures read_balance() won't re-use
that device.
So we need to allow a r1bio to be passed to raid1_read_request(), and to
have that function mostly initialise the r1bio, but leave the bios[]
array untouched.
Two parts of handle_read_error() that need to be preserved are the warning
message it prints, so they are conditionally added to raid1_read_request().
Note that this highlights a minor bug on alloc_r1bio(). It doesn't
initalise the bios[] array, so it is possible that old content is there,
which might cause read_balance() to ignore some devices with no good reason.
With this change, we no longer need inc_pending(), or the sectors_handled
arg to alloc_r1bio().
As handle_read_error() is called from raid1d() and allocates memory,
there is tiny chance of a deadlock. All element of various pools
could be queued waiting for raid1 to handle them, and there may be no
extra memory free.
Achieving guaranteed forward progress would probably require a second
thread and another mempool. Instead of that complexity, add
__GFP_HIGH to any allocations when read1_read_request() is called
from raid1d.
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-04-05 04:05:50 +00:00
|
|
|
gfp_t gfp = r1_bio ? (GFP_NOIO | __GFP_HIGH) : GFP_NOIO;
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
|
md/raid1: simplify handle_read_error().
handle_read_error() duplicates a lot of the work that raid1_read_request()
does, so it makes sense to just use that function.
This doesn't quite work as handle_read_error() relies on the same r1bio
being re-used so that, in the case of a read-only array, setting
IO_BLOCKED in r1bio->bios[] ensures read_balance() won't re-use
that device.
So we need to allow a r1bio to be passed to raid1_read_request(), and to
have that function mostly initialise the r1bio, but leave the bios[]
array untouched.
Two parts of handle_read_error() that need to be preserved are the warning
message it prints, so they are conditionally added to raid1_read_request().
Note that this highlights a minor bug on alloc_r1bio(). It doesn't
initalise the bios[] array, so it is possible that old content is there,
which might cause read_balance() to ignore some devices with no good reason.
With this change, we no longer need inc_pending(), or the sectors_handled
arg to alloc_r1bio().
As handle_read_error() is called from raid1d() and allocates memory,
there is tiny chance of a deadlock. All element of various pools
could be queued waiting for raid1 to handle them, and there may be no
extra memory free.
Achieving guaranteed forward progress would probably require a second
thread and another mempool. Instead of that complexity, add
__GFP_HIGH to any allocations when read1_read_request() is called
from raid1d.
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-04-05 04:05:50 +00:00
|
|
|
if (print_msg) {
|
|
|
|
/* Need to get the block device name carefully */
|
|
|
|
struct md_rdev *rdev;
|
|
|
|
rcu_read_lock();
|
|
|
|
rdev = rcu_dereference(conf->mirrors[r1_bio->read_disk].rdev);
|
|
|
|
if (rdev)
|
|
|
|
bdevname(rdev->bdev, b);
|
|
|
|
else
|
|
|
|
strcpy(b, "???");
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
2016-12-05 20:02:57 +00:00
|
|
|
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
/*
|
|
|
|
* Still need barrier for READ in case that whole
|
|
|
|
* array is frozen.
|
|
|
|
*/
|
|
|
|
wait_read_barrier(conf, bio->bi_iter.bi_sector);
|
|
|
|
|
md/raid1: simplify handle_read_error().
handle_read_error() duplicates a lot of the work that raid1_read_request()
does, so it makes sense to just use that function.
This doesn't quite work as handle_read_error() relies on the same r1bio
being re-used so that, in the case of a read-only array, setting
IO_BLOCKED in r1bio->bios[] ensures read_balance() won't re-use
that device.
So we need to allow a r1bio to be passed to raid1_read_request(), and to
have that function mostly initialise the r1bio, but leave the bios[]
array untouched.
Two parts of handle_read_error() that need to be preserved are the warning
message it prints, so they are conditionally added to raid1_read_request().
Note that this highlights a minor bug on alloc_r1bio(). It doesn't
initalise the bios[] array, so it is possible that old content is there,
which might cause read_balance() to ignore some devices with no good reason.
With this change, we no longer need inc_pending(), or the sectors_handled
arg to alloc_r1bio().
As handle_read_error() is called from raid1d() and allocates memory,
there is tiny chance of a deadlock. All element of various pools
could be queued waiting for raid1 to handle them, and there may be no
extra memory free.
Achieving guaranteed forward progress would probably require a second
thread and another mempool. Instead of that complexity, add
__GFP_HIGH to any allocations when read1_read_request() is called
from raid1d.
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-04-05 04:05:50 +00:00
|
|
|
if (!r1_bio)
|
|
|
|
r1_bio = alloc_r1bio(mddev, bio);
|
|
|
|
else
|
|
|
|
init_r1bio(r1_bio, mddev, bio);
|
md/raid1: simplify the splitting of requests.
raid1 currently splits requests in two different ways for
two different reasons.
First, bio_split() is used to ensure the bio fits within a
resync accounting region.
Second, multiple r1bios are allocated for each bio to handle
the possiblity of known bad blocks on some devices.
This can be simplified to just use bio_split() once, and not
use multiple r1bios.
We delay the split until we know a maximum bio size that can
be handled with a single r1bio, and then split the bio and
queue the remainder for later handling.
This avoids all loops inside raid1.c request handling. Just
a single read, or a single set of writes, is submitted to
lower-level devices for each bio that comes from
generic_make_request().
When the bio needs to be split, generic_make_request() will
do the necessary looping and call md_make_request() multiple
times.
raid1_make_request() no longer queues request for raid1 to handle,
so we can remove that branch from the 'if'.
This patch also creates a new private bio_set
(conf->bio_split) for splitting bios. Using fs_bio_set
is wrong, as it is meant to be used by filesystems, not
block devices. Using it inside md can lead to deadlocks
under high memory pressure.
Delete unused variable in raid1_write_request() (Shaohua)
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-04-05 04:05:50 +00:00
|
|
|
r1_bio->sectors = max_read_sectors;
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* make_request() can abort the operation when read-ahead is being
|
|
|
|
* used and no empty request is available.
|
|
|
|
*/
|
2016-12-05 20:02:57 +00:00
|
|
|
rdisk = read_balance(conf, r1_bio, &max_sectors);
|
|
|
|
|
|
|
|
if (rdisk < 0) {
|
|
|
|
/* couldn't find anywhere to read from */
|
md/raid1: simplify handle_read_error().
handle_read_error() duplicates a lot of the work that raid1_read_request()
does, so it makes sense to just use that function.
This doesn't quite work as handle_read_error() relies on the same r1bio
being re-used so that, in the case of a read-only array, setting
IO_BLOCKED in r1bio->bios[] ensures read_balance() won't re-use
that device.
So we need to allow a r1bio to be passed to raid1_read_request(), and to
have that function mostly initialise the r1bio, but leave the bios[]
array untouched.
Two parts of handle_read_error() that need to be preserved are the warning
message it prints, so they are conditionally added to raid1_read_request().
Note that this highlights a minor bug on alloc_r1bio(). It doesn't
initalise the bios[] array, so it is possible that old content is there,
which might cause read_balance() to ignore some devices with no good reason.
With this change, we no longer need inc_pending(), or the sectors_handled
arg to alloc_r1bio().
As handle_read_error() is called from raid1d() and allocates memory,
there is tiny chance of a deadlock. All element of various pools
could be queued waiting for raid1 to handle them, and there may be no
extra memory free.
Achieving guaranteed forward progress would probably require a second
thread and another mempool. Instead of that complexity, add
__GFP_HIGH to any allocations when read1_read_request() is called
from raid1d.
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-04-05 04:05:50 +00:00
|
|
|
if (print_msg) {
|
|
|
|
pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
|
|
|
|
mdname(mddev),
|
|
|
|
b,
|
|
|
|
(unsigned long long)r1_bio->sector);
|
|
|
|
}
|
2016-12-05 20:02:57 +00:00
|
|
|
raid_end_bio_io(r1_bio);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
mirror = conf->mirrors + rdisk;
|
|
|
|
|
md/raid1: simplify handle_read_error().
handle_read_error() duplicates a lot of the work that raid1_read_request()
does, so it makes sense to just use that function.
This doesn't quite work as handle_read_error() relies on the same r1bio
being re-used so that, in the case of a read-only array, setting
IO_BLOCKED in r1bio->bios[] ensures read_balance() won't re-use
that device.
So we need to allow a r1bio to be passed to raid1_read_request(), and to
have that function mostly initialise the r1bio, but leave the bios[]
array untouched.
Two parts of handle_read_error() that need to be preserved are the warning
message it prints, so they are conditionally added to raid1_read_request().
Note that this highlights a minor bug on alloc_r1bio(). It doesn't
initalise the bios[] array, so it is possible that old content is there,
which might cause read_balance() to ignore some devices with no good reason.
With this change, we no longer need inc_pending(), or the sectors_handled
arg to alloc_r1bio().
As handle_read_error() is called from raid1d() and allocates memory,
there is tiny chance of a deadlock. All element of various pools
could be queued waiting for raid1 to handle them, and there may be no
extra memory free.
Achieving guaranteed forward progress would probably require a second
thread and another mempool. Instead of that complexity, add
__GFP_HIGH to any allocations when read1_read_request() is called
from raid1d.
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-04-05 04:05:50 +00:00
|
|
|
if (print_msg)
|
|
|
|
pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n",
|
|
|
|
mdname(mddev),
|
|
|
|
(unsigned long long)r1_bio->sector,
|
|
|
|
bdevname(mirror->rdev->bdev, b));
|
|
|
|
|
2016-12-05 20:02:57 +00:00
|
|
|
if (test_bit(WriteMostly, &mirror->rdev->flags) &&
|
|
|
|
bitmap) {
|
|
|
|
/*
|
|
|
|
* Reading from a write-mostly device must take care not to
|
|
|
|
* over-take any writes that are 'behind'
|
|
|
|
*/
|
|
|
|
raid1_log(mddev, "wait behind writes");
|
|
|
|
wait_event(bitmap->behind_wait,
|
|
|
|
atomic_read(&bitmap->behind_writes) == 0);
|
|
|
|
}
|
md/raid1: simplify the splitting of requests.
raid1 currently splits requests in two different ways for
two different reasons.
First, bio_split() is used to ensure the bio fits within a
resync accounting region.
Second, multiple r1bios are allocated for each bio to handle
the possiblity of known bad blocks on some devices.
This can be simplified to just use bio_split() once, and not
use multiple r1bios.
We delay the split until we know a maximum bio size that can
be handled with a single r1bio, and then split the bio and
queue the remainder for later handling.
This avoids all loops inside raid1.c request handling. Just
a single read, or a single set of writes, is submitted to
lower-level devices for each bio that comes from
generic_make_request().
When the bio needs to be split, generic_make_request() will
do the necessary looping and call md_make_request() multiple
times.
raid1_make_request() no longer queues request for raid1 to handle,
so we can remove that branch from the 'if'.
This patch also creates a new private bio_set
(conf->bio_split) for splitting bios. Using fs_bio_set
is wrong, as it is meant to be used by filesystems, not
block devices. Using it inside md can lead to deadlocks
under high memory pressure.
Delete unused variable in raid1_write_request() (Shaohua)
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-04-05 04:05:50 +00:00
|
|
|
|
|
|
|
if (max_sectors < bio_sectors(bio)) {
|
|
|
|
struct bio *split = bio_split(bio, max_sectors,
|
2018-05-20 22:25:52 +00:00
|
|
|
gfp, &conf->bio_split);
|
md/raid1: simplify the splitting of requests.
raid1 currently splits requests in two different ways for
two different reasons.
First, bio_split() is used to ensure the bio fits within a
resync accounting region.
Second, multiple r1bios are allocated for each bio to handle
the possiblity of known bad blocks on some devices.
This can be simplified to just use bio_split() once, and not
use multiple r1bios.
We delay the split until we know a maximum bio size that can
be handled with a single r1bio, and then split the bio and
queue the remainder for later handling.
This avoids all loops inside raid1.c request handling. Just
a single read, or a single set of writes, is submitted to
lower-level devices for each bio that comes from
generic_make_request().
When the bio needs to be split, generic_make_request() will
do the necessary looping and call md_make_request() multiple
times.
raid1_make_request() no longer queues request for raid1 to handle,
so we can remove that branch from the 'if'.
This patch also creates a new private bio_set
(conf->bio_split) for splitting bios. Using fs_bio_set
is wrong, as it is meant to be used by filesystems, not
block devices. Using it inside md can lead to deadlocks
under high memory pressure.
Delete unused variable in raid1_write_request() (Shaohua)
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-04-05 04:05:50 +00:00
|
|
|
bio_chain(split, bio);
|
2020-07-01 08:59:44 +00:00
|
|
|
submit_bio_noacct(bio);
|
md/raid1: simplify the splitting of requests.
raid1 currently splits requests in two different ways for
two different reasons.
First, bio_split() is used to ensure the bio fits within a
resync accounting region.
Second, multiple r1bios are allocated for each bio to handle
the possiblity of known bad blocks on some devices.
This can be simplified to just use bio_split() once, and not
use multiple r1bios.
We delay the split until we know a maximum bio size that can
be handled with a single r1bio, and then split the bio and
queue the remainder for later handling.
This avoids all loops inside raid1.c request handling. Just
a single read, or a single set of writes, is submitted to
lower-level devices for each bio that comes from
generic_make_request().
When the bio needs to be split, generic_make_request() will
do the necessary looping and call md_make_request() multiple
times.
raid1_make_request() no longer queues request for raid1 to handle,
so we can remove that branch from the 'if'.
This patch also creates a new private bio_set
(conf->bio_split) for splitting bios. Using fs_bio_set
is wrong, as it is meant to be used by filesystems, not
block devices. Using it inside md can lead to deadlocks
under high memory pressure.
Delete unused variable in raid1_write_request() (Shaohua)
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-04-05 04:05:50 +00:00
|
|
|
bio = split;
|
|
|
|
r1_bio->master_bio = bio;
|
|
|
|
r1_bio->sectors = max_sectors;
|
|
|
|
}
|
|
|
|
|
2016-12-05 20:02:57 +00:00
|
|
|
r1_bio->read_disk = rdisk;
|
|
|
|
|
2018-05-20 22:25:52 +00:00
|
|
|
read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set);
|
2016-12-05 20:02:57 +00:00
|
|
|
|
|
|
|
r1_bio->bios[rdisk] = read_bio;
|
|
|
|
|
|
|
|
read_bio->bi_iter.bi_sector = r1_bio->sector +
|
|
|
|
mirror->rdev->data_offset;
|
2017-08-23 17:10:32 +00:00
|
|
|
bio_set_dev(read_bio, mirror->rdev->bdev);
|
2016-12-05 20:02:57 +00:00
|
|
|
read_bio->bi_end_io = raid1_end_read_request;
|
|
|
|
bio_set_op_attrs(read_bio, op, do_sync);
|
|
|
|
if (test_bit(FailFast, &mirror->rdev->flags) &&
|
|
|
|
test_bit(R1BIO_FailFast, &r1_bio->state))
|
|
|
|
read_bio->bi_opf |= MD_FAILFAST;
|
|
|
|
read_bio->bi_private = r1_bio;
|
|
|
|
|
|
|
|
if (mddev->gendisk)
|
2017-08-23 17:10:32 +00:00
|
|
|
trace_block_bio_remap(read_bio->bi_disk->queue, read_bio,
|
|
|
|
disk_devt(mddev->gendisk), r1_bio->sector);
|
2016-12-05 20:02:57 +00:00
|
|
|
|
2020-07-01 08:59:44 +00:00
|
|
|
submit_bio_noacct(read_bio);
|
2016-12-05 20:02:57 +00:00
|
|
|
}
|
|
|
|
|
md/raid1: simplify the splitting of requests.
raid1 currently splits requests in two different ways for
two different reasons.
First, bio_split() is used to ensure the bio fits within a
resync accounting region.
Second, multiple r1bios are allocated for each bio to handle
the possiblity of known bad blocks on some devices.
This can be simplified to just use bio_split() once, and not
use multiple r1bios.
We delay the split until we know a maximum bio size that can
be handled with a single r1bio, and then split the bio and
queue the remainder for later handling.
This avoids all loops inside raid1.c request handling. Just
a single read, or a single set of writes, is submitted to
lower-level devices for each bio that comes from
generic_make_request().
When the bio needs to be split, generic_make_request() will
do the necessary looping and call md_make_request() multiple
times.
raid1_make_request() no longer queues request for raid1 to handle,
so we can remove that branch from the 'if'.
This patch also creates a new private bio_set
(conf->bio_split) for splitting bios. Using fs_bio_set
is wrong, as it is meant to be used by filesystems, not
block devices. Using it inside md can lead to deadlocks
under high memory pressure.
Delete unused variable in raid1_write_request() (Shaohua)
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-04-05 04:05:50 +00:00
|
|
|
static void raid1_write_request(struct mddev *mddev, struct bio *bio,
|
|
|
|
int max_write_sectors)
|
2016-12-05 20:02:57 +00:00
|
|
|
{
|
|
|
|
struct r1conf *conf = mddev->private;
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
struct r1bio *r1_bio;
|
2011-07-28 01:31:48 +00:00
|
|
|
int i, disks;
|
2016-12-05 20:02:57 +00:00
|
|
|
struct bitmap *bitmap = mddev->bitmap;
|
2005-06-22 00:17:23 +00:00
|
|
|
unsigned long flags;
|
2011-10-11 05:45:26 +00:00
|
|
|
struct md_rdev *blocked_rdev;
|
2012-08-01 22:33:20 +00:00
|
|
|
struct blk_plug_cb *cb;
|
|
|
|
struct raid1_plug_cb *plug = NULL;
|
2011-07-28 01:31:48 +00:00
|
|
|
int first_clone;
|
|
|
|
int max_sectors;
|
2005-06-22 00:17:23 +00:00
|
|
|
|
2017-10-17 02:46:43 +00:00
|
|
|
if (mddev_is_clustered(mddev) &&
|
2015-06-24 14:30:32 +00:00
|
|
|
md_cluster_ops->area_resyncing(mddev, WRITE,
|
2017-10-17 02:46:43 +00:00
|
|
|
bio->bi_iter.bi_sector, bio_end_sector(bio))) {
|
2016-12-05 20:02:57 +00:00
|
|
|
|
2009-12-14 01:49:51 +00:00
|
|
|
DEFINE_WAIT(w);
|
|
|
|
for (;;) {
|
|
|
|
prepare_to_wait(&conf->wait_barrier,
|
2017-10-18 23:01:11 +00:00
|
|
|
&w, TASK_IDLE);
|
2017-10-24 07:33:33 +00:00
|
|
|
if (!md_cluster_ops->area_resyncing(mddev, WRITE,
|
2017-09-29 01:16:43 +00:00
|
|
|
bio->bi_iter.bi_sector,
|
2017-10-17 02:46:43 +00:00
|
|
|
bio_end_sector(bio)))
|
2009-12-14 01:49:51 +00:00
|
|
|
break;
|
|
|
|
schedule();
|
|
|
|
}
|
|
|
|
finish_wait(&conf->wait_barrier, &w);
|
|
|
|
}
|
2017-10-24 07:33:33 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Register the new request and wait if the reconstruction
|
|
|
|
* thread has put up a bar for new requests.
|
|
|
|
* Continue immediately if no resync is active currently.
|
|
|
|
*/
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
wait_barrier(conf, bio->bi_iter.bi_sector);
|
|
|
|
|
md/raid1: simplify handle_read_error().
handle_read_error() duplicates a lot of the work that raid1_read_request()
does, so it makes sense to just use that function.
This doesn't quite work as handle_read_error() relies on the same r1bio
being re-used so that, in the case of a read-only array, setting
IO_BLOCKED in r1bio->bios[] ensures read_balance() won't re-use
that device.
So we need to allow a r1bio to be passed to raid1_read_request(), and to
have that function mostly initialise the r1bio, but leave the bios[]
array untouched.
Two parts of handle_read_error() that need to be preserved are the warning
message it prints, so they are conditionally added to raid1_read_request().
Note that this highlights a minor bug on alloc_r1bio(). It doesn't
initalise the bios[] array, so it is possible that old content is there,
which might cause read_balance() to ignore some devices with no good reason.
With this change, we no longer need inc_pending(), or the sectors_handled
arg to alloc_r1bio().
As handle_read_error() is called from raid1d() and allocates memory,
there is tiny chance of a deadlock. All element of various pools
could be queued waiting for raid1 to handle them, and there may be no
extra memory free.
Achieving guaranteed forward progress would probably require a second
thread and another mempool. Instead of that complexity, add
__GFP_HIGH to any allocations when read1_read_request() is called
from raid1d.
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-04-05 04:05:50 +00:00
|
|
|
r1_bio = alloc_r1bio(mddev, bio);
|
md/raid1: simplify the splitting of requests.
raid1 currently splits requests in two different ways for
two different reasons.
First, bio_split() is used to ensure the bio fits within a
resync accounting region.
Second, multiple r1bios are allocated for each bio to handle
the possiblity of known bad blocks on some devices.
This can be simplified to just use bio_split() once, and not
use multiple r1bios.
We delay the split until we know a maximum bio size that can
be handled with a single r1bio, and then split the bio and
queue the remainder for later handling.
This avoids all loops inside raid1.c request handling. Just
a single read, or a single set of writes, is submitted to
lower-level devices for each bio that comes from
generic_make_request().
When the bio needs to be split, generic_make_request() will
do the necessary looping and call md_make_request() multiple
times.
raid1_make_request() no longer queues request for raid1 to handle,
so we can remove that branch from the 'if'.
This patch also creates a new private bio_set
(conf->bio_split) for splitting bios. Using fs_bio_set
is wrong, as it is meant to be used by filesystems, not
block devices. Using it inside md can lead to deadlocks
under high memory pressure.
Delete unused variable in raid1_write_request() (Shaohua)
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-04-05 04:05:50 +00:00
|
|
|
r1_bio->sectors = max_write_sectors;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-10-11 05:50:01 +00:00
|
|
|
if (conf->pending_count >= max_queued_requests) {
|
|
|
|
md_wakeup_thread(mddev->thread);
|
2016-11-14 05:30:21 +00:00
|
|
|
raid1_log(mddev, "wait queued");
|
2011-10-11 05:50:01 +00:00
|
|
|
wait_event(conf->wait_barrier,
|
|
|
|
conf->pending_count < max_queued_requests);
|
|
|
|
}
|
2011-07-28 01:31:48 +00:00
|
|
|
/* first select target devices under rcu_lock and
|
2005-04-16 22:20:36 +00:00
|
|
|
* inc refcount on their rdev. Record them by setting
|
|
|
|
* bios[x] to bio
|
2011-07-28 01:31:48 +00:00
|
|
|
* If there are known/acknowledged bad blocks on any device on
|
|
|
|
* which we have seen a write error, we want to avoid writing those
|
|
|
|
* blocks.
|
|
|
|
* This potentially requires several writes to write around
|
|
|
|
* the bad blocks. Each set of writes gets it's own r1bio
|
|
|
|
* with a set of bios attached.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2011-04-18 08:25:43 +00:00
|
|
|
|
2011-12-22 23:17:56 +00:00
|
|
|
disks = conf->raid_disks * 2;
|
2008-04-30 07:52:32 +00:00
|
|
|
retry_write:
|
|
|
|
blocked_rdev = NULL;
|
2005-04-16 22:20:36 +00:00
|
|
|
rcu_read_lock();
|
2011-07-28 01:31:48 +00:00
|
|
|
max_sectors = r1_bio->sectors;
|
2005-04-16 22:20:36 +00:00
|
|
|
for (i = 0; i < disks; i++) {
|
2011-10-11 05:45:26 +00:00
|
|
|
struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
|
2008-04-30 07:52:32 +00:00
|
|
|
if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
|
|
|
|
atomic_inc(&rdev->nr_pending);
|
|
|
|
blocked_rdev = rdev;
|
|
|
|
break;
|
|
|
|
}
|
2011-07-28 01:31:48 +00:00
|
|
|
r1_bio->bios[i] = NULL;
|
2015-04-28 06:48:34 +00:00
|
|
|
if (!rdev || test_bit(Faulty, &rdev->flags)) {
|
2011-12-22 23:17:56 +00:00
|
|
|
if (i < conf->raid_disks)
|
|
|
|
set_bit(R1BIO_Degraded, &r1_bio->state);
|
2011-07-28 01:31:48 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
atomic_inc(&rdev->nr_pending);
|
|
|
|
if (test_bit(WriteErrorSeen, &rdev->flags)) {
|
|
|
|
sector_t first_bad;
|
|
|
|
int bad_sectors;
|
|
|
|
int is_bad;
|
|
|
|
|
2016-12-05 20:02:57 +00:00
|
|
|
is_bad = is_badblock(rdev, r1_bio->sector, max_sectors,
|
2011-07-28 01:31:48 +00:00
|
|
|
&first_bad, &bad_sectors);
|
|
|
|
if (is_bad < 0) {
|
|
|
|
/* mustn't write here until the bad block is
|
|
|
|
* acknowledged*/
|
|
|
|
set_bit(BlockedBadBlocks, &rdev->flags);
|
|
|
|
blocked_rdev = rdev;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (is_bad && first_bad <= r1_bio->sector) {
|
|
|
|
/* Cannot write here at all */
|
|
|
|
bad_sectors -= (r1_bio->sector - first_bad);
|
|
|
|
if (bad_sectors < max_sectors)
|
|
|
|
/* mustn't write more than bad_sectors
|
|
|
|
* to other devices yet
|
|
|
|
*/
|
|
|
|
max_sectors = bad_sectors;
|
2006-01-06 08:20:46 +00:00
|
|
|
rdev_dec_pending(rdev, mddev);
|
2011-07-28 01:31:48 +00:00
|
|
|
/* We don't set R1BIO_Degraded as that
|
|
|
|
* only applies if the disk is
|
|
|
|
* missing, so it might be re-added,
|
|
|
|
* and we want to know to recover this
|
|
|
|
* chunk.
|
|
|
|
* In this case the device is here,
|
|
|
|
* and the fact that this chunk is not
|
|
|
|
* in-sync is recorded in the bad
|
|
|
|
* block log
|
|
|
|
*/
|
|
|
|
continue;
|
2010-05-18 05:27:13 +00:00
|
|
|
}
|
2011-07-28 01:31:48 +00:00
|
|
|
if (is_bad) {
|
|
|
|
int good_sectors = first_bad - r1_bio->sector;
|
|
|
|
if (good_sectors < max_sectors)
|
|
|
|
max_sectors = good_sectors;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
r1_bio->bios[i] = bio;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
2008-04-30 07:52:32 +00:00
|
|
|
if (unlikely(blocked_rdev)) {
|
|
|
|
/* Wait for this device to become unblocked */
|
|
|
|
int j;
|
|
|
|
|
|
|
|
for (j = 0; j < i; j++)
|
|
|
|
if (r1_bio->bios[j])
|
|
|
|
rdev_dec_pending(conf->mirrors[j].rdev, mddev);
|
2011-07-28 01:31:48 +00:00
|
|
|
r1_bio->state = 0;
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
allow_barrier(conf, bio->bi_iter.bi_sector);
|
2016-11-14 05:30:21 +00:00
|
|
|
raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
|
2008-04-30 07:52:32 +00:00
|
|
|
md_wait_for_blocked_rdev(blocked_rdev, mddev);
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
wait_barrier(conf, bio->bi_iter.bi_sector);
|
2008-04-30 07:52:32 +00:00
|
|
|
goto retry_write;
|
|
|
|
}
|
|
|
|
|
md/raid1: simplify the splitting of requests.
raid1 currently splits requests in two different ways for
two different reasons.
First, bio_split() is used to ensure the bio fits within a
resync accounting region.
Second, multiple r1bios are allocated for each bio to handle
the possiblity of known bad blocks on some devices.
This can be simplified to just use bio_split() once, and not
use multiple r1bios.
We delay the split until we know a maximum bio size that can
be handled with a single r1bio, and then split the bio and
queue the remainder for later handling.
This avoids all loops inside raid1.c request handling. Just
a single read, or a single set of writes, is submitted to
lower-level devices for each bio that comes from
generic_make_request().
When the bio needs to be split, generic_make_request() will
do the necessary looping and call md_make_request() multiple
times.
raid1_make_request() no longer queues request for raid1 to handle,
so we can remove that branch from the 'if'.
This patch also creates a new private bio_set
(conf->bio_split) for splitting bios. Using fs_bio_set
is wrong, as it is meant to be used by filesystems, not
block devices. Using it inside md can lead to deadlocks
under high memory pressure.
Delete unused variable in raid1_write_request() (Shaohua)
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-04-05 04:05:50 +00:00
|
|
|
if (max_sectors < bio_sectors(bio)) {
|
|
|
|
struct bio *split = bio_split(bio, max_sectors,
|
2018-05-20 22:25:52 +00:00
|
|
|
GFP_NOIO, &conf->bio_split);
|
md/raid1: simplify the splitting of requests.
raid1 currently splits requests in two different ways for
two different reasons.
First, bio_split() is used to ensure the bio fits within a
resync accounting region.
Second, multiple r1bios are allocated for each bio to handle
the possiblity of known bad blocks on some devices.
This can be simplified to just use bio_split() once, and not
use multiple r1bios.
We delay the split until we know a maximum bio size that can
be handled with a single r1bio, and then split the bio and
queue the remainder for later handling.
This avoids all loops inside raid1.c request handling. Just
a single read, or a single set of writes, is submitted to
lower-level devices for each bio that comes from
generic_make_request().
When the bio needs to be split, generic_make_request() will
do the necessary looping and call md_make_request() multiple
times.
raid1_make_request() no longer queues request for raid1 to handle,
so we can remove that branch from the 'if'.
This patch also creates a new private bio_set
(conf->bio_split) for splitting bios. Using fs_bio_set
is wrong, as it is meant to be used by filesystems, not
block devices. Using it inside md can lead to deadlocks
under high memory pressure.
Delete unused variable in raid1_write_request() (Shaohua)
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-04-05 04:05:50 +00:00
|
|
|
bio_chain(split, bio);
|
2020-07-01 08:59:44 +00:00
|
|
|
submit_bio_noacct(bio);
|
md/raid1: simplify the splitting of requests.
raid1 currently splits requests in two different ways for
two different reasons.
First, bio_split() is used to ensure the bio fits within a
resync accounting region.
Second, multiple r1bios are allocated for each bio to handle
the possiblity of known bad blocks on some devices.
This can be simplified to just use bio_split() once, and not
use multiple r1bios.
We delay the split until we know a maximum bio size that can
be handled with a single r1bio, and then split the bio and
queue the remainder for later handling.
This avoids all loops inside raid1.c request handling. Just
a single read, or a single set of writes, is submitted to
lower-level devices for each bio that comes from
generic_make_request().
When the bio needs to be split, generic_make_request() will
do the necessary looping and call md_make_request() multiple
times.
raid1_make_request() no longer queues request for raid1 to handle,
so we can remove that branch from the 'if'.
This patch also creates a new private bio_set
(conf->bio_split) for splitting bios. Using fs_bio_set
is wrong, as it is meant to be used by filesystems, not
block devices. Using it inside md can lead to deadlocks
under high memory pressure.
Delete unused variable in raid1_write_request() (Shaohua)
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-04-05 04:05:50 +00:00
|
|
|
bio = split;
|
|
|
|
r1_bio->master_bio = bio;
|
2011-07-28 01:31:48 +00:00
|
|
|
r1_bio->sectors = max_sectors;
|
2005-06-22 00:17:23 +00:00
|
|
|
}
|
2005-09-09 23:23:47 +00:00
|
|
|
|
2010-10-19 01:54:01 +00:00
|
|
|
atomic_set(&r1_bio->remaining, 1);
|
2005-09-09 23:23:47 +00:00
|
|
|
atomic_set(&r1_bio->behind_remaining, 0);
|
2005-06-22 00:17:12 +00:00
|
|
|
|
2011-07-28 01:31:48 +00:00
|
|
|
first_clone = 1;
|
2017-03-16 16:12:30 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
for (i = 0; i < disks; i++) {
|
2017-02-14 15:29:01 +00:00
|
|
|
struct bio *mbio = NULL;
|
2019-12-23 09:48:58 +00:00
|
|
|
struct md_rdev *rdev = conf->mirrors[i].rdev;
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!r1_bio->bios[i])
|
|
|
|
continue;
|
|
|
|
|
2011-07-28 01:31:48 +00:00
|
|
|
if (first_clone) {
|
|
|
|
/* do behind I/O ?
|
|
|
|
* Not if there are too many, or cannot
|
|
|
|
* allocate memory, or a reader on WriteMostly
|
|
|
|
* is waiting for behind writes to flush */
|
|
|
|
if (bitmap &&
|
|
|
|
(atomic_read(&bitmap->behind_writes)
|
|
|
|
< mddev->bitmap_info.max_write_behind) &&
|
2017-02-14 15:29:01 +00:00
|
|
|
!waitqueue_active(&bitmap->behind_wait)) {
|
2017-07-17 21:33:48 +00:00
|
|
|
alloc_behind_master_bio(r1_bio, bio);
|
2017-02-14 15:29:01 +00:00
|
|
|
}
|
2011-07-28 01:31:48 +00:00
|
|
|
|
2018-08-01 22:20:50 +00:00
|
|
|
md_bitmap_startwrite(bitmap, r1_bio->sector, r1_bio->sectors,
|
|
|
|
test_bit(R1BIO_BehindIO, &r1_bio->state));
|
2011-07-28 01:31:48 +00:00
|
|
|
first_clone = 0;
|
|
|
|
}
|
2017-02-14 15:29:01 +00:00
|
|
|
|
2017-07-17 21:33:48 +00:00
|
|
|
if (r1_bio->behind_master_bio)
|
|
|
|
mbio = bio_clone_fast(r1_bio->behind_master_bio,
|
2018-05-20 22:25:52 +00:00
|
|
|
GFP_NOIO, &mddev->bio_set);
|
2017-07-17 21:33:48 +00:00
|
|
|
else
|
2018-05-20 22:25:52 +00:00
|
|
|
mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
|
2017-02-14 15:29:01 +00:00
|
|
|
|
2017-03-16 16:12:31 +00:00
|
|
|
if (r1_bio->behind_master_bio) {
|
2019-12-23 09:48:58 +00:00
|
|
|
if (test_bit(CollisionCheck, &rdev->flags))
|
2019-12-23 09:49:02 +00:00
|
|
|
wait_for_serialization(rdev, r1_bio);
|
2019-06-19 09:30:46 +00:00
|
|
|
if (test_bit(WriteMostly, &rdev->flags))
|
2005-09-09 23:23:47 +00:00
|
|
|
atomic_inc(&r1_bio->behind_remaining);
|
2019-12-23 09:48:58 +00:00
|
|
|
} else if (mddev->serialize_policy)
|
2019-12-23 09:49:02 +00:00
|
|
|
wait_for_serialization(rdev, r1_bio);
|
2005-09-09 23:23:47 +00:00
|
|
|
|
2011-07-28 01:31:48 +00:00
|
|
|
r1_bio->bios[i] = mbio;
|
|
|
|
|
2013-10-11 22:44:27 +00:00
|
|
|
mbio->bi_iter.bi_sector = (r1_bio->sector +
|
2011-07-28 01:31:48 +00:00
|
|
|
conf->mirrors[i].rdev->data_offset);
|
2017-08-23 17:10:32 +00:00
|
|
|
bio_set_dev(mbio, conf->mirrors[i].rdev->bdev);
|
2011-07-28 01:31:48 +00:00
|
|
|
mbio->bi_end_io = raid1_end_write_request;
|
2017-02-24 22:42:19 +00:00
|
|
|
mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA));
|
2016-11-18 05:16:12 +00:00
|
|
|
if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) &&
|
|
|
|
!test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) &&
|
|
|
|
conf->raid_disks - mddev->degraded > 1)
|
|
|
|
mbio->bi_opf |= MD_FAILFAST;
|
2011-07-28 01:31:48 +00:00
|
|
|
mbio->bi_private = r1_bio;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
atomic_inc(&r1_bio->remaining);
|
2012-08-01 22:33:20 +00:00
|
|
|
|
2016-11-18 02:22:04 +00:00
|
|
|
if (mddev->gendisk)
|
2017-08-23 17:10:32 +00:00
|
|
|
trace_block_bio_remap(mbio->bi_disk->queue,
|
2016-11-18 02:22:04 +00:00
|
|
|
mbio, disk_devt(mddev->gendisk),
|
|
|
|
r1_bio->sector);
|
|
|
|
/* flush_pending_writes() needs access to the rdev so...*/
|
2017-08-23 17:10:32 +00:00
|
|
|
mbio->bi_disk = (void *)conf->mirrors[i].rdev;
|
2016-11-18 02:22:04 +00:00
|
|
|
|
2012-08-01 22:33:20 +00:00
|
|
|
cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
|
|
|
|
if (cb)
|
|
|
|
plug = container_of(cb, struct raid1_plug_cb, cb);
|
|
|
|
else
|
|
|
|
plug = NULL;
|
|
|
|
if (plug) {
|
|
|
|
bio_list_add(&plug->pending, mbio);
|
|
|
|
plug->pending_cnt++;
|
|
|
|
} else {
|
2017-05-10 15:47:11 +00:00
|
|
|
spin_lock_irqsave(&conf->device_lock, flags);
|
2012-08-01 22:33:20 +00:00
|
|
|
bio_list_add(&conf->pending_bio_list, mbio);
|
|
|
|
conf->pending_count++;
|
2017-05-10 15:47:11 +00:00
|
|
|
spin_unlock_irqrestore(&conf->device_lock, flags);
|
2012-07-03 07:45:31 +00:00
|
|
|
md_wakeup_thread(mddev->thread);
|
2017-05-10 15:47:11 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2011-07-28 01:31:48 +00:00
|
|
|
|
2011-09-10 07:21:23 +00:00
|
|
|
r1_bio_write_done(r1_bio);
|
|
|
|
|
|
|
|
/* In case raid1d snuck in to freeze_array */
|
|
|
|
wake_up(&conf->wait_barrier);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2017-06-05 06:49:39 +00:00
|
|
|
static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
|
2016-12-05 20:02:57 +00:00
|
|
|
{
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
sector_t sectors;
|
2016-12-05 20:02:57 +00:00
|
|
|
|
2019-09-16 17:15:14 +00:00
|
|
|
if (unlikely(bio->bi_opf & REQ_PREFLUSH)
|
|
|
|
&& md_flush_request(mddev, bio))
|
2017-06-05 06:49:39 +00:00
|
|
|
return true;
|
2016-12-05 20:02:57 +00:00
|
|
|
|
md/raid1: simplify the splitting of requests.
raid1 currently splits requests in two different ways for
two different reasons.
First, bio_split() is used to ensure the bio fits within a
resync accounting region.
Second, multiple r1bios are allocated for each bio to handle
the possiblity of known bad blocks on some devices.
This can be simplified to just use bio_split() once, and not
use multiple r1bios.
We delay the split until we know a maximum bio size that can
be handled with a single r1bio, and then split the bio and
queue the remainder for later handling.
This avoids all loops inside raid1.c request handling. Just
a single read, or a single set of writes, is submitted to
lower-level devices for each bio that comes from
generic_make_request().
When the bio needs to be split, generic_make_request() will
do the necessary looping and call md_make_request() multiple
times.
raid1_make_request() no longer queues request for raid1 to handle,
so we can remove that branch from the 'if'.
This patch also creates a new private bio_set
(conf->bio_split) for splitting bios. Using fs_bio_set
is wrong, as it is meant to be used by filesystems, not
block devices. Using it inside md can lead to deadlocks
under high memory pressure.
Delete unused variable in raid1_write_request() (Shaohua)
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-04-05 04:05:50 +00:00
|
|
|
/*
|
|
|
|
* There is a limit to the maximum size, but
|
|
|
|
* the read/write handler might find a lower limit
|
|
|
|
* due to bad blocks. To avoid multiple splits,
|
|
|
|
* we pass the maximum number of sectors down
|
|
|
|
* and let the lower level perform the split.
|
|
|
|
*/
|
|
|
|
sectors = align_to_barrier_unit_end(
|
|
|
|
bio->bi_iter.bi_sector, bio_sectors(bio));
|
md/raid1/10: fix potential deadlock
Neil Brown pointed out a potential deadlock in raid 10 code with
bio_split/chain. The raid1 code could have the same issue, but recent
barrier rework makes it less likely to happen. The deadlock happens in
below sequence:
1. generic_make_request(bio), this will set current->bio_list
2. raid10_make_request will split bio to bio1 and bio2
3. __make_request(bio1), wait_barrer, add underlayer disk bio to
current->bio_list
4. __make_request(bio2), wait_barrer
If raise_barrier happens between 3 & 4, since wait_barrier runs at 3,
raise_barrier waits for IO completion from 3. And since raise_barrier
sets barrier, 4 waits for raise_barrier. But IO from 3 can't be
dispatched because raid10_make_request() doesn't finished yet.
The solution is to adjust the IO ordering. Quotes from Neil:
"
It is much safer to:
if (need to split) {
split = bio_split(bio, ...)
bio_chain(...)
make_request_fn(split);
generic_make_request(bio);
} else
make_request_fn(mddev, bio);
This way we first process the initial section of the bio (in 'split')
which will queue some requests to the underlying devices. These
requests will be queued in generic_make_request.
Then we queue the remainder of the bio, which will be added to the end
of the generic_make_request queue.
Then we return.
generic_make_request() will pop the lower-level device requests off the
queue and handle them first. Then it will process the remainder
of the original bio once the first section has been fully processed.
"
Note, this only happens in read path. In write path, the bio is flushed to
underlaying disks either by blk flush (from schedule) or offladed to raid1/10d.
It's queued in current->bio_list.
Cc: Coly Li <colyli@suse.de>
Cc: stable@vger.kernel.org (v3.14+, only the raid10 part)
Suggested-by: NeilBrown <neilb@suse.com>
Reviewed-by: Jack Wang <jinpu.wang@profitbricks.com>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-28 21:00:20 +00:00
|
|
|
|
md/raid1: simplify the splitting of requests.
raid1 currently splits requests in two different ways for
two different reasons.
First, bio_split() is used to ensure the bio fits within a
resync accounting region.
Second, multiple r1bios are allocated for each bio to handle
the possiblity of known bad blocks on some devices.
This can be simplified to just use bio_split() once, and not
use multiple r1bios.
We delay the split until we know a maximum bio size that can
be handled with a single r1bio, and then split the bio and
queue the remainder for later handling.
This avoids all loops inside raid1.c request handling. Just
a single read, or a single set of writes, is submitted to
lower-level devices for each bio that comes from
generic_make_request().
When the bio needs to be split, generic_make_request() will
do the necessary looping and call md_make_request() multiple
times.
raid1_make_request() no longer queues request for raid1 to handle,
so we can remove that branch from the 'if'.
This patch also creates a new private bio_set
(conf->bio_split) for splitting bios. Using fs_bio_set
is wrong, as it is meant to be used by filesystems, not
block devices. Using it inside md can lead to deadlocks
under high memory pressure.
Delete unused variable in raid1_write_request() (Shaohua)
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-04-05 04:05:50 +00:00
|
|
|
if (bio_data_dir(bio) == READ)
|
md/raid1: simplify handle_read_error().
handle_read_error() duplicates a lot of the work that raid1_read_request()
does, so it makes sense to just use that function.
This doesn't quite work as handle_read_error() relies on the same r1bio
being re-used so that, in the case of a read-only array, setting
IO_BLOCKED in r1bio->bios[] ensures read_balance() won't re-use
that device.
So we need to allow a r1bio to be passed to raid1_read_request(), and to
have that function mostly initialise the r1bio, but leave the bios[]
array untouched.
Two parts of handle_read_error() that need to be preserved are the warning
message it prints, so they are conditionally added to raid1_read_request().
Note that this highlights a minor bug on alloc_r1bio(). It doesn't
initalise the bios[] array, so it is possible that old content is there,
which might cause read_balance() to ignore some devices with no good reason.
With this change, we no longer need inc_pending(), or the sectors_handled
arg to alloc_r1bio().
As handle_read_error() is called from raid1d() and allocates memory,
there is tiny chance of a deadlock. All element of various pools
could be queued waiting for raid1 to handle them, and there may be no
extra memory free.
Achieving guaranteed forward progress would probably require a second
thread and another mempool. Instead of that complexity, add
__GFP_HIGH to any allocations when read1_read_request() is called
from raid1d.
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-04-05 04:05:50 +00:00
|
|
|
raid1_read_request(mddev, bio, sectors, NULL);
|
2017-06-05 06:49:39 +00:00
|
|
|
else {
|
|
|
|
if (!md_write_start(mddev,bio))
|
|
|
|
return false;
|
md/raid1: simplify the splitting of requests.
raid1 currently splits requests in two different ways for
two different reasons.
First, bio_split() is used to ensure the bio fits within a
resync accounting region.
Second, multiple r1bios are allocated for each bio to handle
the possiblity of known bad blocks on some devices.
This can be simplified to just use bio_split() once, and not
use multiple r1bios.
We delay the split until we know a maximum bio size that can
be handled with a single r1bio, and then split the bio and
queue the remainder for later handling.
This avoids all loops inside raid1.c request handling. Just
a single read, or a single set of writes, is submitted to
lower-level devices for each bio that comes from
generic_make_request().
When the bio needs to be split, generic_make_request() will
do the necessary looping and call md_make_request() multiple
times.
raid1_make_request() no longer queues request for raid1 to handle,
so we can remove that branch from the 'if'.
This patch also creates a new private bio_set
(conf->bio_split) for splitting bios. Using fs_bio_set
is wrong, as it is meant to be used by filesystems, not
block devices. Using it inside md can lead to deadlocks
under high memory pressure.
Delete unused variable in raid1_write_request() (Shaohua)
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-04-05 04:05:50 +00:00
|
|
|
raid1_write_request(mddev, bio, sectors);
|
2017-06-05 06:49:39 +00:00
|
|
|
}
|
|
|
|
return true;
|
2016-12-05 20:02:57 +00:00
|
|
|
}
|
|
|
|
|
2016-01-20 21:52:20 +00:00
|
|
|
static void raid1_status(struct seq_file *seq, struct mddev *mddev)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2011-10-11 05:49:05 +00:00
|
|
|
struct r1conf *conf = mddev->private;
|
2005-04-16 22:20:36 +00:00
|
|
|
int i;
|
|
|
|
|
|
|
|
seq_printf(seq, " [%d/%d] [", conf->raid_disks,
|
2006-10-03 08:15:52 +00:00
|
|
|
conf->raid_disks - mddev->degraded);
|
2006-09-01 04:27:36 +00:00
|
|
|
rcu_read_lock();
|
|
|
|
for (i = 0; i < conf->raid_disks; i++) {
|
2011-10-11 05:45:26 +00:00
|
|
|
struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
|
2005-04-16 22:20:36 +00:00
|
|
|
seq_printf(seq, "%s",
|
2006-09-01 04:27:36 +00:00
|
|
|
rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
2005-04-16 22:20:36 +00:00
|
|
|
seq_printf(seq, "]");
|
|
|
|
}
|
|
|
|
|
2016-01-20 21:52:20 +00:00
|
|
|
static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
char b[BDEVNAME_SIZE];
|
2011-10-11 05:49:05 +00:00
|
|
|
struct r1conf *conf = mddev->private;
|
2015-07-27 01:48:52 +00:00
|
|
|
unsigned long flags;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If it is not operational, then we have already marked it as dead
|
2019-07-24 09:09:19 +00:00
|
|
|
* else if it is the last working disks with "fail_last_dev == false",
|
|
|
|
* ignore the error, let the next level up know.
|
2005-04-16 22:20:36 +00:00
|
|
|
* else mark the drive as failed
|
|
|
|
*/
|
2016-11-18 05:16:12 +00:00
|
|
|
spin_lock_irqsave(&conf->device_lock, flags);
|
2019-07-24 09:09:19 +00:00
|
|
|
if (test_bit(In_sync, &rdev->flags) && !mddev->fail_last_dev
|
2009-01-08 21:31:11 +00:00
|
|
|
&& (conf->raid_disks - mddev->degraded) == 1) {
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Don't fail the drive, act as though we were just a
|
2009-01-08 21:31:11 +00:00
|
|
|
* normal single drive.
|
|
|
|
* However don't try a recovery from this drive as
|
|
|
|
* it is very likely to fail.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2011-07-27 01:00:36 +00:00
|
|
|
conf->recovery_disabled = mddev->recovery_disabled;
|
2016-11-18 05:16:12 +00:00
|
|
|
spin_unlock_irqrestore(&conf->device_lock, flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
return;
|
2009-01-08 21:31:11 +00:00
|
|
|
}
|
2011-07-28 01:31:48 +00:00
|
|
|
set_bit(Blocked, &rdev->flags);
|
2019-02-01 02:45:01 +00:00
|
|
|
if (test_and_clear_bit(In_sync, &rdev->flags))
|
2005-04-16 22:20:36 +00:00
|
|
|
mddev->degraded++;
|
2019-02-01 02:45:01 +00:00
|
|
|
set_bit(Faulty, &rdev->flags);
|
2015-07-27 01:48:52 +00:00
|
|
|
spin_unlock_irqrestore(&conf->device_lock, flags);
|
2014-07-31 00:16:29 +00:00
|
|
|
/*
|
|
|
|
* if recovery is running, make sure it aborts.
|
|
|
|
*/
|
|
|
|
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
|
2016-12-08 23:48:19 +00:00
|
|
|
set_mask_bits(&mddev->sb_flags, 0,
|
|
|
|
BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
|
2016-11-02 03:16:50 +00:00
|
|
|
pr_crit("md/raid1:%s: Disk failure on %s, disabling device.\n"
|
|
|
|
"md/raid1:%s: Operation continuing on %d devices.\n",
|
|
|
|
mdname(mddev), bdevname(rdev->bdev, b),
|
|
|
|
mdname(mddev), conf->raid_disks - mddev->degraded);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2011-10-11 05:49:05 +00:00
|
|
|
static void print_conf(struct r1conf *conf)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2016-11-02 03:16:50 +00:00
|
|
|
pr_debug("RAID1 conf printout:\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!conf) {
|
2016-11-02 03:16:50 +00:00
|
|
|
pr_debug("(!conf)\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
return;
|
|
|
|
}
|
2016-11-02 03:16:50 +00:00
|
|
|
pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
|
|
|
|
conf->raid_disks);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-09-01 04:27:36 +00:00
|
|
|
rcu_read_lock();
|
2005-04-16 22:20:36 +00:00
|
|
|
for (i = 0; i < conf->raid_disks; i++) {
|
|
|
|
char b[BDEVNAME_SIZE];
|
2011-10-11 05:45:26 +00:00
|
|
|
struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
|
2006-09-01 04:27:36 +00:00
|
|
|
if (rdev)
|
2016-11-02 03:16:50 +00:00
|
|
|
pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
|
|
|
|
i, !test_bit(In_sync, &rdev->flags),
|
|
|
|
!test_bit(Faulty, &rdev->flags),
|
|
|
|
bdevname(rdev->bdev,b));
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2006-09-01 04:27:36 +00:00
|
|
|
rcu_read_unlock();
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2011-10-11 05:49:05 +00:00
|
|
|
static void close_sync(struct r1conf *conf)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2017-10-17 12:17:03 +00:00
|
|
|
int idx;
|
|
|
|
|
|
|
|
for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) {
|
|
|
|
_wait_barrier(conf, idx);
|
|
|
|
_allow_barrier(conf, idx);
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2018-05-20 22:25:52 +00:00
|
|
|
mempool_exit(&conf->r1buf_pool);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2011-10-11 05:47:53 +00:00
|
|
|
static int raid1_spare_active(struct mddev *mddev)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
int i;
|
2011-10-11 05:49:05 +00:00
|
|
|
struct r1conf *conf = mddev->private;
|
2010-08-18 01:56:59 +00:00
|
|
|
int count = 0;
|
|
|
|
unsigned long flags;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
2014-09-30 04:23:59 +00:00
|
|
|
* Find all failed disks within the RAID1 configuration
|
2006-09-01 04:27:36 +00:00
|
|
|
* and mark them readable.
|
|
|
|
* Called under mddev lock, so rcu protection not needed.
|
2015-07-27 01:48:52 +00:00
|
|
|
* device_lock used to avoid races with raid1_end_read_request
|
|
|
|
* which expects 'In_sync' flags and ->degraded to be consistent.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2015-07-27 01:48:52 +00:00
|
|
|
spin_lock_irqsave(&conf->device_lock, flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
for (i = 0; i < conf->raid_disks; i++) {
|
2011-10-11 05:45:26 +00:00
|
|
|
struct md_rdev *rdev = conf->mirrors[i].rdev;
|
2011-12-22 23:17:57 +00:00
|
|
|
struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
|
|
|
|
if (repl
|
2014-10-29 23:51:31 +00:00
|
|
|
&& !test_bit(Candidate, &repl->flags)
|
2011-12-22 23:17:57 +00:00
|
|
|
&& repl->recovery_offset == MaxSector
|
|
|
|
&& !test_bit(Faulty, &repl->flags)
|
|
|
|
&& !test_and_set_bit(In_sync, &repl->flags)) {
|
|
|
|
/* replacement has just become active */
|
|
|
|
if (!rdev ||
|
|
|
|
!test_and_clear_bit(In_sync, &rdev->flags))
|
|
|
|
count++;
|
|
|
|
if (rdev) {
|
|
|
|
/* Replaced device not technically
|
|
|
|
* faulty, but we need to be sure
|
|
|
|
* it gets removed and never re-added
|
|
|
|
*/
|
|
|
|
set_bit(Faulty, &rdev->flags);
|
|
|
|
sysfs_notify_dirent_safe(
|
|
|
|
rdev->sysfs_state);
|
|
|
|
}
|
|
|
|
}
|
2006-09-01 04:27:36 +00:00
|
|
|
if (rdev
|
2013-10-24 01:55:17 +00:00
|
|
|
&& rdev->recovery_offset == MaxSector
|
2006-09-01 04:27:36 +00:00
|
|
|
&& !test_bit(Faulty, &rdev->flags)
|
2006-10-03 08:15:53 +00:00
|
|
|
&& !test_and_set_bit(In_sync, &rdev->flags)) {
|
2010-08-18 01:56:59 +00:00
|
|
|
count++;
|
2011-07-27 01:00:36 +00:00
|
|
|
sysfs_notify_dirent_safe(rdev->sysfs_state);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
2010-08-18 01:56:59 +00:00
|
|
|
mddev->degraded -= count;
|
|
|
|
spin_unlock_irqrestore(&conf->device_lock, flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
print_conf(conf);
|
2010-08-18 01:56:59 +00:00
|
|
|
return count;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2011-10-11 05:47:53 +00:00
|
|
|
static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2011-10-11 05:49:05 +00:00
|
|
|
struct r1conf *conf = mddev->private;
|
2008-06-27 22:31:33 +00:00
|
|
|
int err = -EEXIST;
|
2005-06-22 00:17:25 +00:00
|
|
|
int mirror = 0;
|
2012-07-31 00:03:52 +00:00
|
|
|
struct raid1_info *p;
|
2008-06-27 22:31:31 +00:00
|
|
|
int first = 0;
|
2011-12-22 23:17:56 +00:00
|
|
|
int last = conf->raid_disks - 1;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-07-27 01:00:36 +00:00
|
|
|
if (mddev->recovery_disabled == conf->recovery_disabled)
|
|
|
|
return -EBUSY;
|
|
|
|
|
2016-01-14 00:00:07 +00:00
|
|
|
if (md_integrity_add_rdev(rdev, mddev))
|
|
|
|
return -ENXIO;
|
|
|
|
|
2008-06-27 22:31:31 +00:00
|
|
|
if (rdev->raid_disk >= 0)
|
|
|
|
first = last = rdev->raid_disk;
|
|
|
|
|
2015-08-21 15:33:39 +00:00
|
|
|
/*
|
|
|
|
* find the disk ... but prefer rdev->saved_raid_disk
|
|
|
|
* if possible.
|
|
|
|
*/
|
|
|
|
if (rdev->saved_raid_disk >= 0 &&
|
|
|
|
rdev->saved_raid_disk >= first &&
|
2018-10-15 00:05:07 +00:00
|
|
|
rdev->saved_raid_disk < conf->raid_disks &&
|
2015-08-21 15:33:39 +00:00
|
|
|
conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
|
|
|
|
first = last = rdev->saved_raid_disk;
|
|
|
|
|
2011-12-22 23:17:57 +00:00
|
|
|
for (mirror = first; mirror <= last; mirror++) {
|
2019-06-14 22:41:08 +00:00
|
|
|
p = conf->mirrors + mirror;
|
2011-12-22 23:17:57 +00:00
|
|
|
if (!p->rdev) {
|
2013-05-02 19:19:24 +00:00
|
|
|
if (mddev->gendisk)
|
|
|
|
disk_stack_limits(mddev->gendisk, rdev->bdev,
|
|
|
|
rdev->data_offset << 9);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
p->head_position = 0;
|
|
|
|
rdev->raid_disk = mirror;
|
2008-06-27 22:31:33 +00:00
|
|
|
err = 0;
|
2005-11-28 21:44:13 +00:00
|
|
|
/* As all devices are equivalent, we don't need a full recovery
|
|
|
|
* if this was recently any drive of the array
|
|
|
|
*/
|
|
|
|
if (rdev->saved_raid_disk < 0)
|
2005-06-22 00:17:25 +00:00
|
|
|
conf->fullsync = 1;
|
2005-11-09 05:39:27 +00:00
|
|
|
rcu_assign_pointer(p->rdev, rdev);
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
}
|
2011-12-22 23:17:57 +00:00
|
|
|
if (test_bit(WantReplacement, &p->rdev->flags) &&
|
|
|
|
p[conf->raid_disks].rdev == NULL) {
|
|
|
|
/* Add this device as a replacement */
|
|
|
|
clear_bit(In_sync, &rdev->flags);
|
|
|
|
set_bit(Replacement, &rdev->flags);
|
|
|
|
rdev->raid_disk = mirror;
|
|
|
|
err = 0;
|
|
|
|
conf->fullsync = 1;
|
|
|
|
rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2013-05-02 19:19:24 +00:00
|
|
|
if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
|
2018-03-08 01:10:10 +00:00
|
|
|
blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
|
2005-04-16 22:20:36 +00:00
|
|
|
print_conf(conf);
|
2008-06-27 22:31:33 +00:00
|
|
|
return err;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2011-12-22 23:17:51 +00:00
|
|
|
static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2011-10-11 05:49:05 +00:00
|
|
|
struct r1conf *conf = mddev->private;
|
2005-04-16 22:20:36 +00:00
|
|
|
int err = 0;
|
2011-12-22 23:17:51 +00:00
|
|
|
int number = rdev->raid_disk;
|
2012-07-31 00:03:52 +00:00
|
|
|
struct raid1_info *p = conf->mirrors + number;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-12-22 23:17:56 +00:00
|
|
|
if (rdev != p->rdev)
|
|
|
|
p = conf->mirrors + conf->raid_disks + number;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
print_conf(conf);
|
2011-12-22 23:17:51 +00:00
|
|
|
if (rdev == p->rdev) {
|
2005-11-09 05:39:31 +00:00
|
|
|
if (test_bit(In_sync, &rdev->flags) ||
|
2005-04-16 22:20:36 +00:00
|
|
|
atomic_read(&rdev->nr_pending)) {
|
|
|
|
err = -EBUSY;
|
|
|
|
goto abort;
|
|
|
|
}
|
2010-10-26 04:46:20 +00:00
|
|
|
/* Only remove non-faulty devices if recovery
|
md: restart recovery cleanly after device failure.
When we get any IO error during a recovery (rebuilding a spare), we abort
the recovery and restart it.
For RAID6 (and multi-drive RAID1) it may not be best to restart at the
beginning: when multiple failures can be tolerated, the recovery may be
able to continue and re-doing all that has already been done doesn't make
sense.
We already have the infrastructure to record where a recovery is up to
and restart from there, but it is not being used properly.
This is because:
- We sometimes abort with MD_RECOVERY_ERR rather than just MD_RECOVERY_INTR,
which causes the recovery not be be checkpointed.
- We remove spares and then re-added them which loses important state
information.
The distinction between MD_RECOVERY_ERR and MD_RECOVERY_INTR really isn't
needed. If there is an error, the relevant drive will be marked as
Faulty, and that is enough to ensure correct handling of the error. So we
first remove MD_RECOVERY_ERR, changing some of the uses of it to
MD_RECOVERY_INTR.
Then we cause the attempt to remove a non-faulty device from an array to
fail (unless recovery is impossible as the array is too degraded). Then
when remove_and_add_spares attempts to remove the devices on which
recovery can continue, it will fail, they will remain in place, and
recovery will continue on them as desired.
Issue: If we are halfway through rebuilding a spare and another drive
fails, and a new spare is immediately available, do we want to:
1/ complete the current rebuild, then go back and rebuild the new spare or
2/ restart the rebuild from the start and rebuild both devices in
parallel.
Both options can be argued for. The code currently takes option 2 as
a/ this requires least code change
b/ this results in a minimally-degraded array in minimal time.
Cc: "Eivind Sarto" <ivan@kasenna.com>
Signed-off-by: Neil Brown <neilb@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-05-23 20:04:39 +00:00
|
|
|
* is not possible.
|
|
|
|
*/
|
|
|
|
if (!test_bit(Faulty, &rdev->flags) &&
|
2011-07-27 01:00:36 +00:00
|
|
|
mddev->recovery_disabled != conf->recovery_disabled &&
|
md: restart recovery cleanly after device failure.
When we get any IO error during a recovery (rebuilding a spare), we abort
the recovery and restart it.
For RAID6 (and multi-drive RAID1) it may not be best to restart at the
beginning: when multiple failures can be tolerated, the recovery may be
able to continue and re-doing all that has already been done doesn't make
sense.
We already have the infrastructure to record where a recovery is up to
and restart from there, but it is not being used properly.
This is because:
- We sometimes abort with MD_RECOVERY_ERR rather than just MD_RECOVERY_INTR,
which causes the recovery not be be checkpointed.
- We remove spares and then re-added them which loses important state
information.
The distinction between MD_RECOVERY_ERR and MD_RECOVERY_INTR really isn't
needed. If there is an error, the relevant drive will be marked as
Faulty, and that is enough to ensure correct handling of the error. So we
first remove MD_RECOVERY_ERR, changing some of the uses of it to
MD_RECOVERY_INTR.
Then we cause the attempt to remove a non-faulty device from an array to
fail (unless recovery is impossible as the array is too degraded). Then
when remove_and_add_spares attempts to remove the devices on which
recovery can continue, it will fail, they will remain in place, and
recovery will continue on them as desired.
Issue: If we are halfway through rebuilding a spare and another drive
fails, and a new spare is immediately available, do we want to:
1/ complete the current rebuild, then go back and rebuild the new spare or
2/ restart the rebuild from the start and rebuild both devices in
parallel.
Both options can be argued for. The code currently takes option 2 as
a/ this requires least code change
b/ this results in a minimally-degraded array in minimal time.
Cc: "Eivind Sarto" <ivan@kasenna.com>
Signed-off-by: Neil Brown <neilb@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-05-23 20:04:39 +00:00
|
|
|
mddev->degraded < conf->raid_disks) {
|
|
|
|
err = -EBUSY;
|
|
|
|
goto abort;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
p->rdev = NULL;
|
2016-06-02 06:19:53 +00:00
|
|
|
if (!test_bit(RemoveSynchronized, &rdev->flags)) {
|
|
|
|
synchronize_rcu();
|
|
|
|
if (atomic_read(&rdev->nr_pending)) {
|
|
|
|
/* lost the race, try later */
|
|
|
|
err = -EBUSY;
|
|
|
|
p->rdev = rdev;
|
|
|
|
goto abort;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (conf->mirrors[conf->raid_disks + number].rdev) {
|
2011-12-22 23:17:57 +00:00
|
|
|
/* We just removed a device that is being replaced.
|
|
|
|
* Move down the replacement. We drain all IO before
|
|
|
|
* doing this to avoid confusion.
|
|
|
|
*/
|
|
|
|
struct md_rdev *repl =
|
|
|
|
conf->mirrors[conf->raid_disks + number].rdev;
|
2013-06-12 01:01:22 +00:00
|
|
|
freeze_array(conf, 0);
|
md/raid1: fix NULL pointer dereference
In handle_write_finished(), if r1_bio->bios[m] != NULL, it thinks
the corresponding conf->mirrors[m].rdev is also not NULL. But, it
is not always true.
Even if some io hold replacement rdev(i.e. rdev->nr_pending.count > 0),
raid1_remove_disk() can also set the rdev as NULL. That means,
bios[m] != NULL, but mirrors[m].rdev is NULL, resulting in NULL
pointer dereference in handle_write_finished and sync_request_write.
This patch can fix BUGs as follows:
BUG: unable to handle kernel NULL pointer dereference at 0000000000000140
IP: [<ffffffff815bbbbd>] raid1d+0x2bd/0xfc0
PGD 12ab52067 PUD 12f587067 PMD 0
Oops: 0000 [#1] SMP
CPU: 1 PID: 2008 Comm: md3_raid1 Not tainted 4.1.44+ #130
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1.fc26 04/01/2014
Call Trace:
? schedule+0x37/0x90
? prepare_to_wait_event+0x83/0xf0
md_thread+0x144/0x150
? wake_atomic_t_function+0x70/0x70
? md_start_sync+0xf0/0xf0
kthread+0xd8/0xf0
? kthread_worker_fn+0x160/0x160
ret_from_fork+0x42/0x70
? kthread_worker_fn+0x160/0x160
BUG: unable to handle kernel NULL pointer dereference at 00000000000000b8
IP: sync_request_write+0x9e/0x980
PGD 800000007c518067 P4D 800000007c518067 PUD 8002b067 PMD 0
Oops: 0000 [#1] SMP PTI
CPU: 24 PID: 2549 Comm: md3_raid1 Not tainted 4.15.0+ #118
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1.fc26 04/01/2014
Call Trace:
? sched_clock+0x5/0x10
? sched_clock_cpu+0xc/0xb0
? flush_pending_writes+0x3a/0xd0
? pick_next_task_fair+0x4d5/0x5f0
? __switch_to+0xa2/0x430
raid1d+0x65a/0x870
? find_pers+0x70/0x70
? find_pers+0x70/0x70
? md_thread+0x11c/0x160
md_thread+0x11c/0x160
? finish_wait+0x80/0x80
kthread+0x111/0x130
? kthread_create_worker_on_cpu+0x70/0x70
? do_syscall_64+0x6f/0x190
? SyS_exit_group+0x10/0x10
ret_from_fork+0x35/0x40
Reviewed-by: NeilBrown <neilb@suse.com>
Signed-off-by: Yufen Yu <yuyufen@huawei.com>
Signed-off-by: Shaohua Li <sh.li@alibaba-inc.com>
2018-02-24 04:05:56 +00:00
|
|
|
if (atomic_read(&repl->nr_pending)) {
|
|
|
|
/* It means that some queued IO of retry_list
|
|
|
|
* hold repl. Thus, we cannot set replacement
|
|
|
|
* as NULL, avoiding rdev NULL pointer
|
|
|
|
* dereference in sync_request_write and
|
|
|
|
* handle_write_finished.
|
|
|
|
*/
|
|
|
|
err = -EBUSY;
|
|
|
|
unfreeze_array(conf);
|
|
|
|
goto abort;
|
|
|
|
}
|
2011-12-22 23:17:57 +00:00
|
|
|
clear_bit(Replacement, &repl->flags);
|
|
|
|
p->rdev = repl;
|
|
|
|
conf->mirrors[conf->raid_disks + number].rdev = NULL;
|
2013-06-12 01:01:22 +00:00
|
|
|
unfreeze_array(conf);
|
2017-04-24 07:58:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
clear_bit(WantReplacement, &rdev->flags);
|
2011-03-17 10:11:05 +00:00
|
|
|
err = md_integrity_register(mddev);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
abort:
|
|
|
|
|
|
|
|
print_conf(conf);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2015-07-20 13:29:37 +00:00
|
|
|
static void end_sync_read(struct bio *bio)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2017-03-16 16:12:26 +00:00
|
|
|
struct r1bio *r1_bio = get_resync_r1bio(bio);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-10-07 03:22:55 +00:00
|
|
|
update_head_pos(r1_bio->read_disk, r1_bio);
|
2011-10-07 03:22:53 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* we have read a block, now it needs to be re-written,
|
|
|
|
* or re-read if the read failed.
|
|
|
|
* We don't do much here, just schedule handling by raid1d
|
|
|
|
*/
|
2017-06-03 07:38:06 +00:00
|
|
|
if (!bio->bi_status)
|
2005-04-16 22:20:36 +00:00
|
|
|
set_bit(R1BIO_Uptodate, &r1_bio->state);
|
2006-01-06 08:20:26 +00:00
|
|
|
|
|
|
|
if (atomic_dec_and_test(&r1_bio->remaining))
|
|
|
|
reschedule_retry(r1_bio);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2019-02-07 19:19:01 +00:00
|
|
|
static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
|
|
|
|
{
|
|
|
|
sector_t sync_blocks = 0;
|
|
|
|
sector_t s = r1_bio->sector;
|
|
|
|
long sectors_to_go = r1_bio->sectors;
|
|
|
|
|
|
|
|
/* make sure these bits don't get cleared. */
|
|
|
|
do {
|
|
|
|
md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
|
|
|
|
s += sync_blocks;
|
|
|
|
sectors_to_go -= sync_blocks;
|
|
|
|
} while (sectors_to_go > 0);
|
|
|
|
}
|
|
|
|
|
2019-07-27 06:02:58 +00:00
|
|
|
static void put_sync_write_buf(struct r1bio *r1_bio, int uptodate)
|
|
|
|
{
|
|
|
|
if (atomic_dec_and_test(&r1_bio->remaining)) {
|
|
|
|
struct mddev *mddev = r1_bio->mddev;
|
|
|
|
int s = r1_bio->sectors;
|
|
|
|
|
|
|
|
if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
|
|
|
|
test_bit(R1BIO_WriteError, &r1_bio->state))
|
|
|
|
reschedule_retry(r1_bio);
|
|
|
|
else {
|
|
|
|
put_buf(r1_bio);
|
|
|
|
md_done_sync(mddev, s, uptodate);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-20 13:29:37 +00:00
|
|
|
static void end_sync_write(struct bio *bio)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2017-06-03 07:38:06 +00:00
|
|
|
int uptodate = !bio->bi_status;
|
2017-03-16 16:12:26 +00:00
|
|
|
struct r1bio *r1_bio = get_resync_r1bio(bio);
|
2011-10-11 05:47:53 +00:00
|
|
|
struct mddev *mddev = r1_bio->mddev;
|
2011-10-11 05:49:05 +00:00
|
|
|
struct r1conf *conf = mddev->private;
|
2011-07-28 01:31:49 +00:00
|
|
|
sector_t first_bad;
|
|
|
|
int bad_sectors;
|
2016-06-02 06:19:52 +00:00
|
|
|
struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
|
2011-10-07 03:22:53 +00:00
|
|
|
|
2006-03-31 10:31:57 +00:00
|
|
|
if (!uptodate) {
|
2019-02-07 19:19:01 +00:00
|
|
|
abort_sync_write(mddev, r1_bio);
|
2016-06-02 06:19:52 +00:00
|
|
|
set_bit(WriteErrorSeen, &rdev->flags);
|
|
|
|
if (!test_and_set_bit(WantReplacement, &rdev->flags))
|
2011-12-22 23:17:57 +00:00
|
|
|
set_bit(MD_RECOVERY_NEEDED, &
|
|
|
|
mddev->recovery);
|
2011-07-28 01:33:00 +00:00
|
|
|
set_bit(R1BIO_WriteError, &r1_bio->state);
|
2016-06-02 06:19:52 +00:00
|
|
|
} else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
|
2011-07-28 01:33:42 +00:00
|
|
|
&first_bad, &bad_sectors) &&
|
|
|
|
!is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
|
|
|
|
r1_bio->sector,
|
|
|
|
r1_bio->sectors,
|
|
|
|
&first_bad, &bad_sectors)
|
|
|
|
)
|
2011-07-28 01:31:49 +00:00
|
|
|
set_bit(R1BIO_MadeGood, &r1_bio->state);
|
2005-08-04 19:53:34 +00:00
|
|
|
|
2019-07-27 06:02:58 +00:00
|
|
|
put_sync_write_buf(r1_bio, uptodate);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2011-10-11 05:45:26 +00:00
|
|
|
static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
|
2011-07-28 01:33:00 +00:00
|
|
|
int sectors, struct page *page, int rw)
|
|
|
|
{
|
2016-06-05 19:32:07 +00:00
|
|
|
if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
|
2011-07-28 01:33:00 +00:00
|
|
|
/* success */
|
|
|
|
return 1;
|
2011-12-22 23:17:57 +00:00
|
|
|
if (rw == WRITE) {
|
2011-07-28 01:33:00 +00:00
|
|
|
set_bit(WriteErrorSeen, &rdev->flags);
|
2011-12-22 23:17:57 +00:00
|
|
|
if (!test_and_set_bit(WantReplacement,
|
|
|
|
&rdev->flags))
|
|
|
|
set_bit(MD_RECOVERY_NEEDED, &
|
|
|
|
rdev->mddev->recovery);
|
|
|
|
}
|
2011-07-28 01:33:00 +00:00
|
|
|
/* need to record an error - either for the block or the device */
|
|
|
|
if (!rdev_set_badblocks(rdev, sector, sectors, 0))
|
|
|
|
md_error(rdev->mddev, rdev);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-10-11 05:48:43 +00:00
|
|
|
static int fix_sync_read_error(struct r1bio *r1_bio)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2011-05-11 04:40:44 +00:00
|
|
|
/* Try some synchronous reads of other devices to get
|
|
|
|
* good data, much like with normal read errors. Only
|
|
|
|
* read into the pages we already have so we don't
|
|
|
|
* need to re-issue the read request.
|
|
|
|
* We don't need to freeze the array, because being in an
|
|
|
|
* active sync request, there is no normal IO, and
|
|
|
|
* no overlapping syncs.
|
2011-07-28 01:31:48 +00:00
|
|
|
* We don't need to check is_badblock() again as we
|
|
|
|
* made sure that anything with a bad block in range
|
|
|
|
* will have bi_end_io clear.
|
2011-05-11 04:40:44 +00:00
|
|
|
*/
|
2011-10-11 05:47:53 +00:00
|
|
|
struct mddev *mddev = r1_bio->mddev;
|
2011-10-11 05:49:05 +00:00
|
|
|
struct r1conf *conf = mddev->private;
|
2011-05-11 04:40:44 +00:00
|
|
|
struct bio *bio = r1_bio->bios[r1_bio->read_disk];
|
2017-03-16 16:12:27 +00:00
|
|
|
struct page **pages = get_resync_pages(bio)->pages;
|
2011-05-11 04:40:44 +00:00
|
|
|
sector_t sect = r1_bio->sector;
|
|
|
|
int sectors = r1_bio->sectors;
|
|
|
|
int idx = 0;
|
2016-11-18 05:16:12 +00:00
|
|
|
struct md_rdev *rdev;
|
|
|
|
|
|
|
|
rdev = conf->mirrors[r1_bio->read_disk].rdev;
|
|
|
|
if (test_bit(FailFast, &rdev->flags)) {
|
|
|
|
/* Don't try recovering from here - just fail it
|
|
|
|
* ... unless it is the last working device of course */
|
|
|
|
md_error(mddev, rdev);
|
|
|
|
if (test_bit(Faulty, &rdev->flags))
|
|
|
|
/* Don't try to read from here, but make sure
|
|
|
|
* put_buf does it's thing
|
|
|
|
*/
|
|
|
|
bio->bi_end_io = end_sync_write;
|
|
|
|
}
|
2011-05-11 04:40:44 +00:00
|
|
|
|
|
|
|
while(sectors) {
|
|
|
|
int s = sectors;
|
|
|
|
int d = r1_bio->read_disk;
|
|
|
|
int success = 0;
|
2011-05-11 04:48:56 +00:00
|
|
|
int start;
|
2011-05-11 04:40:44 +00:00
|
|
|
|
|
|
|
if (s > (PAGE_SIZE>>9))
|
|
|
|
s = PAGE_SIZE >> 9;
|
|
|
|
do {
|
|
|
|
if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
|
|
|
|
/* No rcu protection needed here devices
|
|
|
|
* can only be removed when no resync is
|
|
|
|
* active, and resync is currently active
|
|
|
|
*/
|
|
|
|
rdev = conf->mirrors[d].rdev;
|
2011-07-27 01:00:36 +00:00
|
|
|
if (sync_page_io(rdev, sect, s<<9,
|
2017-03-16 16:12:27 +00:00
|
|
|
pages[idx],
|
2016-06-05 19:32:07 +00:00
|
|
|
REQ_OP_READ, 0, false)) {
|
2011-05-11 04:40:44 +00:00
|
|
|
success = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
d++;
|
2011-12-22 23:17:56 +00:00
|
|
|
if (d == conf->raid_disks * 2)
|
2011-05-11 04:40:44 +00:00
|
|
|
d = 0;
|
|
|
|
} while (!success && d != r1_bio->read_disk);
|
|
|
|
|
2011-05-11 04:48:56 +00:00
|
|
|
if (!success) {
|
2011-05-11 04:40:44 +00:00
|
|
|
char b[BDEVNAME_SIZE];
|
2011-07-28 01:33:42 +00:00
|
|
|
int abort = 0;
|
|
|
|
/* Cannot read from anywhere, this block is lost.
|
|
|
|
* Record a bad block on each device. If that doesn't
|
|
|
|
* work just disable and interrupt the recovery.
|
|
|
|
* Don't fail devices as that won't really help.
|
|
|
|
*/
|
2016-11-02 03:16:50 +00:00
|
|
|
pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
|
2017-08-23 17:10:32 +00:00
|
|
|
mdname(mddev), bio_devname(bio, b),
|
2016-11-02 03:16:50 +00:00
|
|
|
(unsigned long long)r1_bio->sector);
|
2011-12-22 23:17:56 +00:00
|
|
|
for (d = 0; d < conf->raid_disks * 2; d++) {
|
2011-07-28 01:33:42 +00:00
|
|
|
rdev = conf->mirrors[d].rdev;
|
|
|
|
if (!rdev || test_bit(Faulty, &rdev->flags))
|
|
|
|
continue;
|
|
|
|
if (!rdev_set_badblocks(rdev, sect, s, 0))
|
|
|
|
abort = 1;
|
|
|
|
}
|
|
|
|
if (abort) {
|
2011-10-26 00:54:39 +00:00
|
|
|
conf->recovery_disabled =
|
|
|
|
mddev->recovery_disabled;
|
2011-07-28 01:33:42 +00:00
|
|
|
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
|
|
|
|
md_done_sync(mddev, r1_bio->sectors, 0);
|
|
|
|
put_buf(r1_bio);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
/* Try next page */
|
|
|
|
sectors -= s;
|
|
|
|
sect += s;
|
|
|
|
idx++;
|
|
|
|
continue;
|
2006-01-06 08:20:26 +00:00
|
|
|
}
|
2011-05-11 04:48:56 +00:00
|
|
|
|
|
|
|
start = d;
|
|
|
|
/* write it back and re-read */
|
|
|
|
while (d != r1_bio->read_disk) {
|
|
|
|
if (d == 0)
|
2011-12-22 23:17:56 +00:00
|
|
|
d = conf->raid_disks * 2;
|
2011-05-11 04:48:56 +00:00
|
|
|
d--;
|
|
|
|
if (r1_bio->bios[d]->bi_end_io != end_sync_read)
|
|
|
|
continue;
|
|
|
|
rdev = conf->mirrors[d].rdev;
|
2011-07-28 01:33:00 +00:00
|
|
|
if (r1_sync_page_io(rdev, sect, s,
|
2017-03-16 16:12:27 +00:00
|
|
|
pages[idx],
|
2011-07-28 01:33:00 +00:00
|
|
|
WRITE) == 0) {
|
2011-05-11 04:48:56 +00:00
|
|
|
r1_bio->bios[d]->bi_end_io = NULL;
|
|
|
|
rdev_dec_pending(rdev, mddev);
|
2011-07-27 01:00:36 +00:00
|
|
|
}
|
2011-05-11 04:48:56 +00:00
|
|
|
}
|
|
|
|
d = start;
|
|
|
|
while (d != r1_bio->read_disk) {
|
|
|
|
if (d == 0)
|
2011-12-22 23:17:56 +00:00
|
|
|
d = conf->raid_disks * 2;
|
2011-05-11 04:48:56 +00:00
|
|
|
d--;
|
|
|
|
if (r1_bio->bios[d]->bi_end_io != end_sync_read)
|
|
|
|
continue;
|
|
|
|
rdev = conf->mirrors[d].rdev;
|
2011-07-28 01:33:00 +00:00
|
|
|
if (r1_sync_page_io(rdev, sect, s,
|
2017-03-16 16:12:27 +00:00
|
|
|
pages[idx],
|
2011-07-28 01:33:00 +00:00
|
|
|
READ) != 0)
|
2011-07-27 01:00:36 +00:00
|
|
|
atomic_add(s, &rdev->corrected_errors);
|
2011-05-11 04:48:56 +00:00
|
|
|
}
|
2011-05-11 04:40:44 +00:00
|
|
|
sectors -= s;
|
|
|
|
sect += s;
|
|
|
|
idx ++;
|
|
|
|
}
|
2011-05-11 04:48:56 +00:00
|
|
|
set_bit(R1BIO_Uptodate, &r1_bio->state);
|
2017-06-03 07:38:06 +00:00
|
|
|
bio->bi_status = 0;
|
2011-05-11 04:40:44 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2014-09-09 03:54:11 +00:00
|
|
|
static void process_checks(struct r1bio *r1_bio)
|
2011-05-11 04:40:44 +00:00
|
|
|
{
|
|
|
|
/* We have read all readable devices. If we haven't
|
|
|
|
* got the block, then there is no hope left.
|
|
|
|
* If we have, then we want to do a comparison
|
|
|
|
* and skip the write if everything is the same.
|
|
|
|
* If any blocks failed to read, then we need to
|
|
|
|
* attempt an over-write
|
|
|
|
*/
|
2011-10-11 05:47:53 +00:00
|
|
|
struct mddev *mddev = r1_bio->mddev;
|
2011-10-11 05:49:05 +00:00
|
|
|
struct r1conf *conf = mddev->private;
|
2011-05-11 04:40:44 +00:00
|
|
|
int primary;
|
|
|
|
int i;
|
2012-04-12 06:04:47 +00:00
|
|
|
int vcnt;
|
2011-05-11 04:40:44 +00:00
|
|
|
|
2013-07-17 05:19:29 +00:00
|
|
|
/* Fix variable parts of all bios */
|
|
|
|
vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
|
|
|
|
for (i = 0; i < conf->raid_disks * 2; i++) {
|
2017-06-03 07:38:06 +00:00
|
|
|
blk_status_t status;
|
2013-07-17 05:19:29 +00:00
|
|
|
struct bio *b = r1_bio->bios[i];
|
2017-03-16 16:12:26 +00:00
|
|
|
struct resync_pages *rp = get_resync_pages(b);
|
2013-07-17 05:19:29 +00:00
|
|
|
if (b->bi_end_io != end_sync_read)
|
|
|
|
continue;
|
2015-07-20 13:29:37 +00:00
|
|
|
/* fixup the bio for reuse, but preserve errno */
|
2017-06-03 07:38:06 +00:00
|
|
|
status = b->bi_status;
|
2013-07-17 05:19:29 +00:00
|
|
|
bio_reset(b);
|
2017-06-03 07:38:06 +00:00
|
|
|
b->bi_status = status;
|
2013-10-11 22:44:27 +00:00
|
|
|
b->bi_iter.bi_sector = r1_bio->sector +
|
2013-07-17 05:19:29 +00:00
|
|
|
conf->mirrors[i].rdev->data_offset;
|
2017-08-23 17:10:32 +00:00
|
|
|
bio_set_dev(b, conf->mirrors[i].rdev->bdev);
|
2013-07-17 05:19:29 +00:00
|
|
|
b->bi_end_io = end_sync_read;
|
2017-03-16 16:12:26 +00:00
|
|
|
rp->raid_bio = r1_bio;
|
|
|
|
b->bi_private = rp;
|
2013-07-17 05:19:29 +00:00
|
|
|
|
2017-07-14 08:14:43 +00:00
|
|
|
/* initialize bvec table again */
|
|
|
|
md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9);
|
2013-07-17 05:19:29 +00:00
|
|
|
}
|
2011-12-22 23:17:56 +00:00
|
|
|
for (primary = 0; primary < conf->raid_disks * 2; primary++)
|
2011-05-11 04:40:44 +00:00
|
|
|
if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
|
2017-06-03 07:38:06 +00:00
|
|
|
!r1_bio->bios[primary]->bi_status) {
|
2011-05-11 04:40:44 +00:00
|
|
|
r1_bio->bios[primary]->bi_end_io = NULL;
|
|
|
|
rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
r1_bio->read_disk = primary;
|
2011-12-22 23:17:56 +00:00
|
|
|
for (i = 0; i < conf->raid_disks * 2; i++) {
|
2019-04-25 07:03:00 +00:00
|
|
|
int j = 0;
|
2011-05-11 04:48:56 +00:00
|
|
|
struct bio *pbio = r1_bio->bios[primary];
|
|
|
|
struct bio *sbio = r1_bio->bios[i];
|
2017-06-03 07:38:06 +00:00
|
|
|
blk_status_t status = sbio->bi_status;
|
2017-03-16 16:12:27 +00:00
|
|
|
struct page **ppages = get_resync_pages(pbio)->pages;
|
|
|
|
struct page **spages = get_resync_pages(sbio)->pages;
|
2017-03-16 16:12:28 +00:00
|
|
|
struct bio_vec *bi;
|
2017-03-28 08:17:55 +00:00
|
|
|
int page_len[RESYNC_PAGES] = { 0 };
|
2019-02-15 11:13:19 +00:00
|
|
|
struct bvec_iter_all iter_all;
|
2011-05-11 04:40:44 +00:00
|
|
|
|
2012-09-11 18:26:12 +00:00
|
|
|
if (sbio->bi_end_io != end_sync_read)
|
2011-05-11 04:48:56 +00:00
|
|
|
continue;
|
2015-07-20 13:29:37 +00:00
|
|
|
/* Now we can 'fixup' the error value */
|
2017-06-03 07:38:06 +00:00
|
|
|
sbio->bi_status = 0;
|
2011-05-11 04:48:56 +00:00
|
|
|
|
2019-04-25 07:03:00 +00:00
|
|
|
bio_for_each_segment_all(bi, sbio, iter_all)
|
|
|
|
page_len[j++] = bi->bv_len;
|
2017-03-16 16:12:28 +00:00
|
|
|
|
2017-06-03 07:38:06 +00:00
|
|
|
if (!status) {
|
2011-05-11 04:48:56 +00:00
|
|
|
for (j = vcnt; j-- ; ) {
|
2017-03-16 16:12:27 +00:00
|
|
|
if (memcmp(page_address(ppages[j]),
|
|
|
|
page_address(spages[j]),
|
2017-03-16 16:12:28 +00:00
|
|
|
page_len[j]))
|
2011-05-11 04:48:56 +00:00
|
|
|
break;
|
2006-01-06 08:20:22 +00:00
|
|
|
}
|
2011-05-11 04:48:56 +00:00
|
|
|
} else
|
|
|
|
j = 0;
|
|
|
|
if (j >= 0)
|
2012-10-11 03:17:59 +00:00
|
|
|
atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
|
2011-05-11 04:48:56 +00:00
|
|
|
if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
|
2017-06-03 07:38:06 +00:00
|
|
|
&& !status)) {
|
2011-05-11 04:48:56 +00:00
|
|
|
/* No need to write to this device. */
|
|
|
|
sbio->bi_end_io = NULL;
|
|
|
|
rdev_dec_pending(conf->mirrors[i].rdev, mddev);
|
|
|
|
continue;
|
|
|
|
}
|
2012-09-10 20:49:33 +00:00
|
|
|
|
|
|
|
bio_copy_data(sbio, pbio);
|
2011-05-11 04:48:56 +00:00
|
|
|
}
|
2011-05-11 04:40:44 +00:00
|
|
|
}
|
|
|
|
|
2011-10-11 05:48:43 +00:00
|
|
|
static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
|
2011-05-11 04:40:44 +00:00
|
|
|
{
|
2011-10-11 05:49:05 +00:00
|
|
|
struct r1conf *conf = mddev->private;
|
2011-05-11 04:40:44 +00:00
|
|
|
int i;
|
2011-12-22 23:17:56 +00:00
|
|
|
int disks = conf->raid_disks * 2;
|
2017-06-15 09:00:25 +00:00
|
|
|
struct bio *wbio;
|
2011-05-11 04:40:44 +00:00
|
|
|
|
|
|
|
if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
|
|
|
|
/* ouch - failed to read all of that. */
|
|
|
|
if (!fix_sync_read_error(r1_bio))
|
|
|
|
return;
|
2011-05-11 04:50:37 +00:00
|
|
|
|
|
|
|
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
|
2014-09-09 03:54:11 +00:00
|
|
|
process_checks(r1_bio);
|
|
|
|
|
2006-01-06 08:20:26 +00:00
|
|
|
/*
|
|
|
|
* schedule writes
|
|
|
|
*/
|
2005-04-16 22:20:36 +00:00
|
|
|
atomic_set(&r1_bio->remaining, 1);
|
|
|
|
for (i = 0; i < disks ; i++) {
|
|
|
|
wbio = r1_bio->bios[i];
|
2006-01-06 08:20:21 +00:00
|
|
|
if (wbio->bi_end_io == NULL ||
|
|
|
|
(wbio->bi_end_io == end_sync_read &&
|
|
|
|
(i == r1_bio->read_disk ||
|
|
|
|
!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
|
2005-04-16 22:20:36 +00:00
|
|
|
continue;
|
2019-02-07 19:19:01 +00:00
|
|
|
if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) {
|
|
|
|
abort_sync_write(mddev, r1_bio);
|
2017-04-06 02:06:37 +00:00
|
|
|
continue;
|
2019-02-07 19:19:01 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2016-06-05 19:32:07 +00:00
|
|
|
bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
|
2016-11-18 05:16:12 +00:00
|
|
|
if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
|
|
|
|
wbio->bi_opf |= MD_FAILFAST;
|
|
|
|
|
2006-01-06 08:20:21 +00:00
|
|
|
wbio->bi_end_io = end_sync_write;
|
2005-04-16 22:20:36 +00:00
|
|
|
atomic_inc(&r1_bio->remaining);
|
2013-02-05 23:19:29 +00:00
|
|
|
md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
|
2005-06-22 00:17:23 +00:00
|
|
|
|
2020-07-01 08:59:44 +00:00
|
|
|
submit_bio_noacct(wbio);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2019-07-27 06:02:58 +00:00
|
|
|
put_sync_write_buf(r1_bio, 1);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is a kernel thread which:
|
|
|
|
*
|
|
|
|
* 1. Retries failed read operations on working mirrors.
|
|
|
|
* 2. Updates the raid superblock when problems encounter.
|
2011-07-28 01:31:48 +00:00
|
|
|
* 3. Performs writes following reads for array synchronising.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
|
2011-10-11 05:49:05 +00:00
|
|
|
static void fix_read_error(struct r1conf *conf, int read_disk,
|
2006-10-03 08:15:51 +00:00
|
|
|
sector_t sect, int sectors)
|
|
|
|
{
|
2011-10-11 05:47:53 +00:00
|
|
|
struct mddev *mddev = conf->mddev;
|
2006-10-03 08:15:51 +00:00
|
|
|
while(sectors) {
|
|
|
|
int s = sectors;
|
|
|
|
int d = read_disk;
|
|
|
|
int success = 0;
|
|
|
|
int start;
|
2011-10-11 05:45:26 +00:00
|
|
|
struct md_rdev *rdev;
|
2006-10-03 08:15:51 +00:00
|
|
|
|
|
|
|
if (s > (PAGE_SIZE>>9))
|
|
|
|
s = PAGE_SIZE >> 9;
|
|
|
|
|
|
|
|
do {
|
2011-07-28 01:31:48 +00:00
|
|
|
sector_t first_bad;
|
|
|
|
int bad_sectors;
|
|
|
|
|
2016-06-02 06:19:52 +00:00
|
|
|
rcu_read_lock();
|
|
|
|
rdev = rcu_dereference(conf->mirrors[d].rdev);
|
2006-10-03 08:15:51 +00:00
|
|
|
if (rdev &&
|
2012-05-22 03:55:03 +00:00
|
|
|
(test_bit(In_sync, &rdev->flags) ||
|
|
|
|
(!test_bit(Faulty, &rdev->flags) &&
|
|
|
|
rdev->recovery_offset >= sect + s)) &&
|
2011-07-28 01:31:48 +00:00
|
|
|
is_badblock(rdev, sect, s,
|
2016-06-02 06:19:52 +00:00
|
|
|
&first_bad, &bad_sectors) == 0) {
|
|
|
|
atomic_inc(&rdev->nr_pending);
|
|
|
|
rcu_read_unlock();
|
|
|
|
if (sync_page_io(rdev, sect, s<<9,
|
2016-06-05 19:32:07 +00:00
|
|
|
conf->tmppage, REQ_OP_READ, 0, false))
|
2016-06-02 06:19:52 +00:00
|
|
|
success = 1;
|
|
|
|
rdev_dec_pending(rdev, mddev);
|
|
|
|
if (success)
|
|
|
|
break;
|
|
|
|
} else
|
|
|
|
rcu_read_unlock();
|
|
|
|
d++;
|
|
|
|
if (d == conf->raid_disks * 2)
|
|
|
|
d = 0;
|
2006-10-03 08:15:51 +00:00
|
|
|
} while (!success && d != read_disk);
|
|
|
|
|
|
|
|
if (!success) {
|
2011-07-28 01:33:00 +00:00
|
|
|
/* Cannot read from anywhere - mark it bad */
|
2011-10-11 05:45:26 +00:00
|
|
|
struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
|
2011-07-28 01:33:00 +00:00
|
|
|
if (!rdev_set_badblocks(rdev, sect, s, 0))
|
|
|
|
md_error(mddev, rdev);
|
2006-10-03 08:15:51 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* write it back and re-read */
|
|
|
|
start = d;
|
|
|
|
while (d != read_disk) {
|
|
|
|
if (d==0)
|
2011-12-22 23:17:56 +00:00
|
|
|
d = conf->raid_disks * 2;
|
2006-10-03 08:15:51 +00:00
|
|
|
d--;
|
2016-06-02 06:19:52 +00:00
|
|
|
rcu_read_lock();
|
|
|
|
rdev = rcu_dereference(conf->mirrors[d].rdev);
|
2006-10-03 08:15:51 +00:00
|
|
|
if (rdev &&
|
2016-06-02 06:19:52 +00:00
|
|
|
!test_bit(Faulty, &rdev->flags)) {
|
|
|
|
atomic_inc(&rdev->nr_pending);
|
|
|
|
rcu_read_unlock();
|
2011-07-28 01:33:00 +00:00
|
|
|
r1_sync_page_io(rdev, sect, s,
|
|
|
|
conf->tmppage, WRITE);
|
2016-06-02 06:19:52 +00:00
|
|
|
rdev_dec_pending(rdev, mddev);
|
|
|
|
} else
|
|
|
|
rcu_read_unlock();
|
2006-10-03 08:15:51 +00:00
|
|
|
}
|
|
|
|
d = start;
|
|
|
|
while (d != read_disk) {
|
|
|
|
char b[BDEVNAME_SIZE];
|
|
|
|
if (d==0)
|
2011-12-22 23:17:56 +00:00
|
|
|
d = conf->raid_disks * 2;
|
2006-10-03 08:15:51 +00:00
|
|
|
d--;
|
2016-06-02 06:19:52 +00:00
|
|
|
rcu_read_lock();
|
|
|
|
rdev = rcu_dereference(conf->mirrors[d].rdev);
|
2006-10-03 08:15:51 +00:00
|
|
|
if (rdev &&
|
2014-09-18 01:09:04 +00:00
|
|
|
!test_bit(Faulty, &rdev->flags)) {
|
2016-06-02 06:19:52 +00:00
|
|
|
atomic_inc(&rdev->nr_pending);
|
|
|
|
rcu_read_unlock();
|
2011-07-28 01:33:00 +00:00
|
|
|
if (r1_sync_page_io(rdev, sect, s,
|
|
|
|
conf->tmppage, READ)) {
|
2006-10-03 08:15:51 +00:00
|
|
|
atomic_add(s, &rdev->corrected_errors);
|
2016-11-02 03:16:50 +00:00
|
|
|
pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %s)\n",
|
|
|
|
mdname(mddev), s,
|
|
|
|
(unsigned long long)(sect +
|
|
|
|
rdev->data_offset),
|
|
|
|
bdevname(rdev->bdev, b));
|
2006-10-03 08:15:51 +00:00
|
|
|
}
|
2016-06-02 06:19:52 +00:00
|
|
|
rdev_dec_pending(rdev, mddev);
|
|
|
|
} else
|
|
|
|
rcu_read_unlock();
|
2006-10-03 08:15:51 +00:00
|
|
|
}
|
|
|
|
sectors -= s;
|
|
|
|
sect += s;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-10-11 05:48:43 +00:00
|
|
|
static int narrow_write_error(struct r1bio *r1_bio, int i)
|
2011-07-28 01:32:41 +00:00
|
|
|
{
|
2011-10-11 05:47:53 +00:00
|
|
|
struct mddev *mddev = r1_bio->mddev;
|
2011-10-11 05:49:05 +00:00
|
|
|
struct r1conf *conf = mddev->private;
|
2011-10-11 05:45:26 +00:00
|
|
|
struct md_rdev *rdev = conf->mirrors[i].rdev;
|
2011-07-28 01:32:41 +00:00
|
|
|
|
|
|
|
/* bio has the data to be written to device 'i' where
|
|
|
|
* we just recently had a write error.
|
|
|
|
* We repeatedly clone the bio and trim down to one block,
|
|
|
|
* then try the write. Where the write fails we record
|
|
|
|
* a bad block.
|
|
|
|
* It is conceivable that the bio doesn't exactly align with
|
|
|
|
* blocks. We must handle this somehow.
|
|
|
|
*
|
|
|
|
* We currently own a reference on the rdev.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int block_sectors;
|
|
|
|
sector_t sector;
|
|
|
|
int sectors;
|
|
|
|
int sect_to_write = r1_bio->sectors;
|
|
|
|
int ok = 1;
|
|
|
|
|
|
|
|
if (rdev->badblocks.shift < 0)
|
|
|
|
return 0;
|
|
|
|
|
2015-02-12 17:02:09 +00:00
|
|
|
block_sectors = roundup(1 << rdev->badblocks.shift,
|
|
|
|
bdev_logical_block_size(rdev->bdev) >> 9);
|
2011-07-28 01:32:41 +00:00
|
|
|
sector = r1_bio->sector;
|
|
|
|
sectors = ((sector + block_sectors)
|
|
|
|
& ~(sector_t)(block_sectors - 1))
|
|
|
|
- sector;
|
|
|
|
|
|
|
|
while (sect_to_write) {
|
|
|
|
struct bio *wbio;
|
|
|
|
if (sectors > sect_to_write)
|
|
|
|
sectors = sect_to_write;
|
|
|
|
/* Write at 'sector' for 'sectors'*/
|
|
|
|
|
2012-09-10 22:17:11 +00:00
|
|
|
if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
|
2017-03-16 16:12:31 +00:00
|
|
|
wbio = bio_clone_fast(r1_bio->behind_master_bio,
|
|
|
|
GFP_NOIO,
|
2018-05-20 22:25:52 +00:00
|
|
|
&mddev->bio_set);
|
2012-09-10 22:17:11 +00:00
|
|
|
} else {
|
2017-02-14 15:29:03 +00:00
|
|
|
wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO,
|
2018-05-20 22:25:52 +00:00
|
|
|
&mddev->bio_set);
|
2012-09-10 22:17:11 +00:00
|
|
|
}
|
|
|
|
|
2016-06-05 19:32:07 +00:00
|
|
|
bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
|
2013-10-11 22:44:27 +00:00
|
|
|
wbio->bi_iter.bi_sector = r1_bio->sector;
|
|
|
|
wbio->bi_iter.bi_size = r1_bio->sectors << 9;
|
2011-07-28 01:32:41 +00:00
|
|
|
|
2013-08-07 18:14:32 +00:00
|
|
|
bio_trim(wbio, sector - r1_bio->sector, sectors);
|
2013-10-11 22:44:27 +00:00
|
|
|
wbio->bi_iter.bi_sector += rdev->data_offset;
|
2017-08-23 17:10:32 +00:00
|
|
|
bio_set_dev(wbio, rdev->bdev);
|
2016-06-05 19:31:41 +00:00
|
|
|
|
|
|
|
if (submit_bio_wait(wbio) < 0)
|
2011-07-28 01:32:41 +00:00
|
|
|
/* failure! */
|
|
|
|
ok = rdev_set_badblocks(rdev, sector,
|
|
|
|
sectors, 0)
|
|
|
|
&& ok;
|
|
|
|
|
|
|
|
bio_put(wbio);
|
|
|
|
sect_to_write -= sectors;
|
|
|
|
sector += sectors;
|
|
|
|
sectors = block_sectors;
|
|
|
|
}
|
|
|
|
return ok;
|
|
|
|
}
|
|
|
|
|
2011-10-11 05:49:05 +00:00
|
|
|
static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
|
2011-07-28 01:38:13 +00:00
|
|
|
{
|
|
|
|
int m;
|
|
|
|
int s = r1_bio->sectors;
|
2011-12-22 23:17:56 +00:00
|
|
|
for (m = 0; m < conf->raid_disks * 2 ; m++) {
|
2011-10-11 05:45:26 +00:00
|
|
|
struct md_rdev *rdev = conf->mirrors[m].rdev;
|
2011-07-28 01:38:13 +00:00
|
|
|
struct bio *bio = r1_bio->bios[m];
|
|
|
|
if (bio->bi_end_io == NULL)
|
|
|
|
continue;
|
2017-06-03 07:38:06 +00:00
|
|
|
if (!bio->bi_status &&
|
2011-07-28 01:38:13 +00:00
|
|
|
test_bit(R1BIO_MadeGood, &r1_bio->state)) {
|
2012-05-20 23:27:00 +00:00
|
|
|
rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
|
2011-07-28 01:38:13 +00:00
|
|
|
}
|
2017-06-03 07:38:06 +00:00
|
|
|
if (bio->bi_status &&
|
2011-07-28 01:38:13 +00:00
|
|
|
test_bit(R1BIO_WriteError, &r1_bio->state)) {
|
|
|
|
if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
|
|
|
|
md_error(conf->mddev, rdev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
put_buf(r1_bio);
|
|
|
|
md_done_sync(conf->mddev, s, 1);
|
|
|
|
}
|
|
|
|
|
2011-10-11 05:49:05 +00:00
|
|
|
static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
|
2011-07-28 01:38:13 +00:00
|
|
|
{
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
int m, idx;
|
2015-08-14 01:11:10 +00:00
|
|
|
bool fail = false;
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
|
2011-12-22 23:17:56 +00:00
|
|
|
for (m = 0; m < conf->raid_disks * 2 ; m++)
|
2011-07-28 01:38:13 +00:00
|
|
|
if (r1_bio->bios[m] == IO_MADE_GOOD) {
|
2011-10-11 05:45:26 +00:00
|
|
|
struct md_rdev *rdev = conf->mirrors[m].rdev;
|
2011-07-28 01:38:13 +00:00
|
|
|
rdev_clear_badblocks(rdev,
|
|
|
|
r1_bio->sector,
|
2012-05-20 23:27:00 +00:00
|
|
|
r1_bio->sectors, 0);
|
2011-07-28 01:38:13 +00:00
|
|
|
rdev_dec_pending(rdev, conf->mddev);
|
|
|
|
} else if (r1_bio->bios[m] != NULL) {
|
|
|
|
/* This drive got a write error. We need to
|
|
|
|
* narrow down and record precise write
|
|
|
|
* errors.
|
|
|
|
*/
|
2015-08-14 01:11:10 +00:00
|
|
|
fail = true;
|
2011-07-28 01:38:13 +00:00
|
|
|
if (!narrow_write_error(r1_bio, m)) {
|
|
|
|
md_error(conf->mddev,
|
|
|
|
conf->mirrors[m].rdev);
|
|
|
|
/* an I/O failed, we can't clear the bitmap */
|
|
|
|
set_bit(R1BIO_Degraded, &r1_bio->state);
|
|
|
|
}
|
|
|
|
rdev_dec_pending(conf->mirrors[m].rdev,
|
|
|
|
conf->mddev);
|
|
|
|
}
|
2015-08-14 01:11:10 +00:00
|
|
|
if (fail) {
|
|
|
|
spin_lock_irq(&conf->device_lock);
|
|
|
|
list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
idx = sector_to_idx(r1_bio->sector);
|
RAID1: avoid unnecessary spin locks in I/O barrier code
When I run a parallel reading performan testing on a md raid1 device with
two NVMe SSDs, I observe very bad throughput in supprise: by fio with 64KB
block size, 40 seq read I/O jobs, 128 iodepth, overall throughput is
only 2.7GB/s, this is around 50% of the idea performance number.
The perf reports locking contention happens at allow_barrier() and
wait_barrier() code,
- 41.41% fio [kernel.kallsyms] [k] _raw_spin_lock_irqsave
- _raw_spin_lock_irqsave
+ 89.92% allow_barrier
+ 9.34% __wake_up
- 37.30% fio [kernel.kallsyms] [k] _raw_spin_lock_irq
- _raw_spin_lock_irq
- 100.00% wait_barrier
The reason is, in these I/O barrier related functions,
- raise_barrier()
- lower_barrier()
- wait_barrier()
- allow_barrier()
They always hold conf->resync_lock firstly, even there are only regular
reading I/Os and no resync I/O at all. This is a huge performance penalty.
The solution is a lockless-like algorithm in I/O barrier code, and only
holding conf->resync_lock when it has to.
The original idea is from Hannes Reinecke, and Neil Brown provides
comments to improve it. I continue to work on it, and make the patch into
current form.
In the new simpler raid1 I/O barrier implementation, there are two
wait barrier functions,
- wait_barrier()
Which calls _wait_barrier(), is used for regular write I/O. If there is
resync I/O happening on the same I/O barrier bucket, or the whole
array is frozen, task will wait until no barrier on same barrier bucket,
or the whold array is unfreezed.
- wait_read_barrier()
Since regular read I/O won't interfere with resync I/O (read_balance()
will make sure only uptodate data will be read out), it is unnecessary
to wait for barrier in regular read I/Os, waiting in only necessary
when the whole array is frozen.
The operations on conf->nr_pending[idx], conf->nr_waiting[idx], conf->
barrier[idx] are very carefully designed in raise_barrier(),
lower_barrier(), _wait_barrier() and wait_read_barrier(), in order to
avoid unnecessary spin locks in these functions. Once conf->
nr_pengding[idx] is increased, a resync I/O with same barrier bucket index
has to wait in raise_barrier(). Then in _wait_barrier() if no barrier
raised in same barrier bucket index and array is not frozen, the regular
I/O doesn't need to hold conf->resync_lock, it can just increase
conf->nr_pending[idx], and return to its caller. wait_read_barrier() is
very similar to _wait_barrier(), the only difference is it only waits when
array is frozen. For heavy parallel reading I/Os, the lockless I/O barrier
code almostly gets rid of all spin lock cost.
This patch significantly improves raid1 reading peroformance. From my
testing, a raid1 device built by two NVMe SSD, runs fio with 64KB
blocksize, 40 seq read I/O jobs, 128 iodepth, overall throughput
increases from 2.7GB/s to 4.6GB/s (+70%).
Changelog
V4:
- Change conf->nr_queued[] to atomic_t.
- Define BARRIER_BUCKETS_NR_BITS by (PAGE_SHIFT - ilog2(sizeof(atomic_t)))
V3:
- Add smp_mb__after_atomic() as Shaohua and Neil suggested.
- Change conf->nr_queued[] from atomic_t to int.
- Change conf->array_frozen from atomic_t back to int, and use
READ_ONCE(conf->array_frozen) to check value of conf->array_frozen
in _wait_barrier() and wait_read_barrier().
- In _wait_barrier() and wait_read_barrier(), add a call to
wake_up(&conf->wait_barrier) after atomic_dec(&conf->nr_pending[idx]),
to fix a deadlock between _wait_barrier()/wait_read_barrier and
freeze_array().
V2:
- Remove a spin_lock/unlock pair in raid1d().
- Add more code comments to explain why there is no racy when checking two
atomic_t variables at same time.
V1:
- Original RFC patch for comments.
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Shaohua Li <shli@fb.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:57 +00:00
|
|
|
atomic_inc(&conf->nr_queued[idx]);
|
2015-08-14 01:11:10 +00:00
|
|
|
spin_unlock_irq(&conf->device_lock);
|
RAID1: avoid unnecessary spin locks in I/O barrier code
When I run a parallel reading performan testing on a md raid1 device with
two NVMe SSDs, I observe very bad throughput in supprise: by fio with 64KB
block size, 40 seq read I/O jobs, 128 iodepth, overall throughput is
only 2.7GB/s, this is around 50% of the idea performance number.
The perf reports locking contention happens at allow_barrier() and
wait_barrier() code,
- 41.41% fio [kernel.kallsyms] [k] _raw_spin_lock_irqsave
- _raw_spin_lock_irqsave
+ 89.92% allow_barrier
+ 9.34% __wake_up
- 37.30% fio [kernel.kallsyms] [k] _raw_spin_lock_irq
- _raw_spin_lock_irq
- 100.00% wait_barrier
The reason is, in these I/O barrier related functions,
- raise_barrier()
- lower_barrier()
- wait_barrier()
- allow_barrier()
They always hold conf->resync_lock firstly, even there are only regular
reading I/Os and no resync I/O at all. This is a huge performance penalty.
The solution is a lockless-like algorithm in I/O barrier code, and only
holding conf->resync_lock when it has to.
The original idea is from Hannes Reinecke, and Neil Brown provides
comments to improve it. I continue to work on it, and make the patch into
current form.
In the new simpler raid1 I/O barrier implementation, there are two
wait barrier functions,
- wait_barrier()
Which calls _wait_barrier(), is used for regular write I/O. If there is
resync I/O happening on the same I/O barrier bucket, or the whole
array is frozen, task will wait until no barrier on same barrier bucket,
or the whold array is unfreezed.
- wait_read_barrier()
Since regular read I/O won't interfere with resync I/O (read_balance()
will make sure only uptodate data will be read out), it is unnecessary
to wait for barrier in regular read I/Os, waiting in only necessary
when the whole array is frozen.
The operations on conf->nr_pending[idx], conf->nr_waiting[idx], conf->
barrier[idx] are very carefully designed in raise_barrier(),
lower_barrier(), _wait_barrier() and wait_read_barrier(), in order to
avoid unnecessary spin locks in these functions. Once conf->
nr_pengding[idx] is increased, a resync I/O with same barrier bucket index
has to wait in raise_barrier(). Then in _wait_barrier() if no barrier
raised in same barrier bucket index and array is not frozen, the regular
I/O doesn't need to hold conf->resync_lock, it can just increase
conf->nr_pending[idx], and return to its caller. wait_read_barrier() is
very similar to _wait_barrier(), the only difference is it only waits when
array is frozen. For heavy parallel reading I/Os, the lockless I/O barrier
code almostly gets rid of all spin lock cost.
This patch significantly improves raid1 reading peroformance. From my
testing, a raid1 device built by two NVMe SSD, runs fio with 64KB
blocksize, 40 seq read I/O jobs, 128 iodepth, overall throughput
increases from 2.7GB/s to 4.6GB/s (+70%).
Changelog
V4:
- Change conf->nr_queued[] to atomic_t.
- Define BARRIER_BUCKETS_NR_BITS by (PAGE_SHIFT - ilog2(sizeof(atomic_t)))
V3:
- Add smp_mb__after_atomic() as Shaohua and Neil suggested.
- Change conf->nr_queued[] from atomic_t to int.
- Change conf->array_frozen from atomic_t back to int, and use
READ_ONCE(conf->array_frozen) to check value of conf->array_frozen
in _wait_barrier() and wait_read_barrier().
- In _wait_barrier() and wait_read_barrier(), add a call to
wake_up(&conf->wait_barrier) after atomic_dec(&conf->nr_pending[idx]),
to fix a deadlock between _wait_barrier()/wait_read_barrier and
freeze_array().
V2:
- Remove a spin_lock/unlock pair in raid1d().
- Add more code comments to explain why there is no racy when checking two
atomic_t variables at same time.
V1:
- Original RFC patch for comments.
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Shaohua Li <shli@fb.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:57 +00:00
|
|
|
/*
|
|
|
|
* In case freeze_array() is waiting for condition
|
|
|
|
* get_unqueued_pending() == extra to be true.
|
|
|
|
*/
|
|
|
|
wake_up(&conf->wait_barrier);
|
2015-08-14 01:11:10 +00:00
|
|
|
md_wakeup_thread(conf->mddev->thread);
|
2015-10-24 05:02:16 +00:00
|
|
|
} else {
|
|
|
|
if (test_bit(R1BIO_WriteError, &r1_bio->state))
|
|
|
|
close_write(r1_bio);
|
2015-08-14 01:11:10 +00:00
|
|
|
raid_end_bio_io(r1_bio);
|
2015-10-24 05:02:16 +00:00
|
|
|
}
|
2011-07-28 01:38:13 +00:00
|
|
|
}
|
|
|
|
|
2011-10-11 05:49:05 +00:00
|
|
|
static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
|
2011-07-28 01:38:13 +00:00
|
|
|
{
|
2011-10-11 05:47:53 +00:00
|
|
|
struct mddev *mddev = conf->mddev;
|
2011-07-28 01:38:13 +00:00
|
|
|
struct bio *bio;
|
2011-10-11 05:45:26 +00:00
|
|
|
struct md_rdev *rdev;
|
2011-07-28 01:38:13 +00:00
|
|
|
|
|
|
|
clear_bit(R1BIO_ReadError, &r1_bio->state);
|
|
|
|
/* we got a read error. Maybe the drive is bad. Maybe just
|
|
|
|
* the block and we can fix it.
|
|
|
|
* We freeze all other IO, and try reading the block from
|
|
|
|
* other devices. When we find one, we re-write
|
|
|
|
* and check it that fixes the read error.
|
|
|
|
* This is all done synchronously while the array is
|
|
|
|
* frozen
|
|
|
|
*/
|
2016-10-28 12:45:58 +00:00
|
|
|
|
|
|
|
bio = r1_bio->bios[r1_bio->read_disk];
|
|
|
|
bio_put(bio);
|
|
|
|
r1_bio->bios[r1_bio->read_disk] = NULL;
|
|
|
|
|
2016-11-18 05:16:12 +00:00
|
|
|
rdev = conf->mirrors[r1_bio->read_disk].rdev;
|
|
|
|
if (mddev->ro == 0
|
|
|
|
&& !test_bit(FailFast, &rdev->flags)) {
|
2013-06-12 01:01:22 +00:00
|
|
|
freeze_array(conf, 1);
|
2011-07-28 01:38:13 +00:00
|
|
|
fix_read_error(conf, r1_bio->read_disk,
|
|
|
|
r1_bio->sector, r1_bio->sectors);
|
|
|
|
unfreeze_array(conf);
|
2018-05-02 11:08:11 +00:00
|
|
|
} else if (mddev->ro == 0 && test_bit(FailFast, &rdev->flags)) {
|
|
|
|
md_error(mddev, rdev);
|
2016-10-28 12:45:58 +00:00
|
|
|
} else {
|
|
|
|
r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
|
|
|
|
}
|
|
|
|
|
2016-11-18 05:16:12 +00:00
|
|
|
rdev_dec_pending(rdev, conf->mddev);
|
md/raid1: simplify handle_read_error().
handle_read_error() duplicates a lot of the work that raid1_read_request()
does, so it makes sense to just use that function.
This doesn't quite work as handle_read_error() relies on the same r1bio
being re-used so that, in the case of a read-only array, setting
IO_BLOCKED in r1bio->bios[] ensures read_balance() won't re-use
that device.
So we need to allow a r1bio to be passed to raid1_read_request(), and to
have that function mostly initialise the r1bio, but leave the bios[]
array untouched.
Two parts of handle_read_error() that need to be preserved are the warning
message it prints, so they are conditionally added to raid1_read_request().
Note that this highlights a minor bug on alloc_r1bio(). It doesn't
initalise the bios[] array, so it is possible that old content is there,
which might cause read_balance() to ignore some devices with no good reason.
With this change, we no longer need inc_pending(), or the sectors_handled
arg to alloc_r1bio().
As handle_read_error() is called from raid1d() and allocates memory,
there is tiny chance of a deadlock. All element of various pools
could be queued waiting for raid1 to handle them, and there may be no
extra memory free.
Achieving guaranteed forward progress would probably require a second
thread and another mempool. Instead of that complexity, add
__GFP_HIGH to any allocations when read1_read_request() is called
from raid1d.
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-04-05 04:05:50 +00:00
|
|
|
allow_barrier(conf, r1_bio->sector);
|
|
|
|
bio = r1_bio->master_bio;
|
2011-07-28 01:38:13 +00:00
|
|
|
|
md/raid1: simplify handle_read_error().
handle_read_error() duplicates a lot of the work that raid1_read_request()
does, so it makes sense to just use that function.
This doesn't quite work as handle_read_error() relies on the same r1bio
being re-used so that, in the case of a read-only array, setting
IO_BLOCKED in r1bio->bios[] ensures read_balance() won't re-use
that device.
So we need to allow a r1bio to be passed to raid1_read_request(), and to
have that function mostly initialise the r1bio, but leave the bios[]
array untouched.
Two parts of handle_read_error() that need to be preserved are the warning
message it prints, so they are conditionally added to raid1_read_request().
Note that this highlights a minor bug on alloc_r1bio(). It doesn't
initalise the bios[] array, so it is possible that old content is there,
which might cause read_balance() to ignore some devices with no good reason.
With this change, we no longer need inc_pending(), or the sectors_handled
arg to alloc_r1bio().
As handle_read_error() is called from raid1d() and allocates memory,
there is tiny chance of a deadlock. All element of various pools
could be queued waiting for raid1 to handle them, and there may be no
extra memory free.
Achieving guaranteed forward progress would probably require a second
thread and another mempool. Instead of that complexity, add
__GFP_HIGH to any allocations when read1_read_request() is called
from raid1d.
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-04-05 04:05:50 +00:00
|
|
|
/* Reuse the old r1_bio so that the IO_BLOCKED settings are preserved */
|
|
|
|
r1_bio->state = 0;
|
|
|
|
raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio);
|
2011-07-28 01:38:13 +00:00
|
|
|
}
|
|
|
|
|
2012-10-11 02:34:00 +00:00
|
|
|
static void raid1d(struct md_thread *thread)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2012-10-11 02:34:00 +00:00
|
|
|
struct mddev *mddev = thread->mddev;
|
2011-10-11 05:48:43 +00:00
|
|
|
struct r1bio *r1_bio;
|
2005-04-16 22:20:36 +00:00
|
|
|
unsigned long flags;
|
2011-10-11 05:49:05 +00:00
|
|
|
struct r1conf *conf = mddev->private;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct list_head *head = &conf->retry_list;
|
2011-04-18 08:25:41 +00:00
|
|
|
struct blk_plug plug;
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
int idx;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
md_check_recovery(mddev);
|
2011-04-18 08:25:41 +00:00
|
|
|
|
2015-08-14 01:11:10 +00:00
|
|
|
if (!list_empty_careful(&conf->bio_end_io_list) &&
|
2016-12-08 23:48:19 +00:00
|
|
|
!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
|
2015-08-14 01:11:10 +00:00
|
|
|
LIST_HEAD(tmp);
|
|
|
|
spin_lock_irqsave(&conf->device_lock, flags);
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
|
|
|
|
list_splice_init(&conf->bio_end_io_list, &tmp);
|
2015-08-14 01:11:10 +00:00
|
|
|
spin_unlock_irqrestore(&conf->device_lock, flags);
|
|
|
|
while (!list_empty(&tmp)) {
|
2015-10-01 19:17:43 +00:00
|
|
|
r1_bio = list_first_entry(&tmp, struct r1bio,
|
|
|
|
retry_list);
|
2015-08-14 01:11:10 +00:00
|
|
|
list_del(&r1_bio->retry_list);
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
idx = sector_to_idx(r1_bio->sector);
|
RAID1: avoid unnecessary spin locks in I/O barrier code
When I run a parallel reading performan testing on a md raid1 device with
two NVMe SSDs, I observe very bad throughput in supprise: by fio with 64KB
block size, 40 seq read I/O jobs, 128 iodepth, overall throughput is
only 2.7GB/s, this is around 50% of the idea performance number.
The perf reports locking contention happens at allow_barrier() and
wait_barrier() code,
- 41.41% fio [kernel.kallsyms] [k] _raw_spin_lock_irqsave
- _raw_spin_lock_irqsave
+ 89.92% allow_barrier
+ 9.34% __wake_up
- 37.30% fio [kernel.kallsyms] [k] _raw_spin_lock_irq
- _raw_spin_lock_irq
- 100.00% wait_barrier
The reason is, in these I/O barrier related functions,
- raise_barrier()
- lower_barrier()
- wait_barrier()
- allow_barrier()
They always hold conf->resync_lock firstly, even there are only regular
reading I/Os and no resync I/O at all. This is a huge performance penalty.
The solution is a lockless-like algorithm in I/O barrier code, and only
holding conf->resync_lock when it has to.
The original idea is from Hannes Reinecke, and Neil Brown provides
comments to improve it. I continue to work on it, and make the patch into
current form.
In the new simpler raid1 I/O barrier implementation, there are two
wait barrier functions,
- wait_barrier()
Which calls _wait_barrier(), is used for regular write I/O. If there is
resync I/O happening on the same I/O barrier bucket, or the whole
array is frozen, task will wait until no barrier on same barrier bucket,
or the whold array is unfreezed.
- wait_read_barrier()
Since regular read I/O won't interfere with resync I/O (read_balance()
will make sure only uptodate data will be read out), it is unnecessary
to wait for barrier in regular read I/Os, waiting in only necessary
when the whole array is frozen.
The operations on conf->nr_pending[idx], conf->nr_waiting[idx], conf->
barrier[idx] are very carefully designed in raise_barrier(),
lower_barrier(), _wait_barrier() and wait_read_barrier(), in order to
avoid unnecessary spin locks in these functions. Once conf->
nr_pengding[idx] is increased, a resync I/O with same barrier bucket index
has to wait in raise_barrier(). Then in _wait_barrier() if no barrier
raised in same barrier bucket index and array is not frozen, the regular
I/O doesn't need to hold conf->resync_lock, it can just increase
conf->nr_pending[idx], and return to its caller. wait_read_barrier() is
very similar to _wait_barrier(), the only difference is it only waits when
array is frozen. For heavy parallel reading I/Os, the lockless I/O barrier
code almostly gets rid of all spin lock cost.
This patch significantly improves raid1 reading peroformance. From my
testing, a raid1 device built by two NVMe SSD, runs fio with 64KB
blocksize, 40 seq read I/O jobs, 128 iodepth, overall throughput
increases from 2.7GB/s to 4.6GB/s (+70%).
Changelog
V4:
- Change conf->nr_queued[] to atomic_t.
- Define BARRIER_BUCKETS_NR_BITS by (PAGE_SHIFT - ilog2(sizeof(atomic_t)))
V3:
- Add smp_mb__after_atomic() as Shaohua and Neil suggested.
- Change conf->nr_queued[] from atomic_t to int.
- Change conf->array_frozen from atomic_t back to int, and use
READ_ONCE(conf->array_frozen) to check value of conf->array_frozen
in _wait_barrier() and wait_read_barrier().
- In _wait_barrier() and wait_read_barrier(), add a call to
wake_up(&conf->wait_barrier) after atomic_dec(&conf->nr_pending[idx]),
to fix a deadlock between _wait_barrier()/wait_read_barrier and
freeze_array().
V2:
- Remove a spin_lock/unlock pair in raid1d().
- Add more code comments to explain why there is no racy when checking two
atomic_t variables at same time.
V1:
- Original RFC patch for comments.
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Shaohua Li <shli@fb.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:57 +00:00
|
|
|
atomic_dec(&conf->nr_queued[idx]);
|
2015-10-24 05:02:16 +00:00
|
|
|
if (mddev->degraded)
|
|
|
|
set_bit(R1BIO_Degraded, &r1_bio->state);
|
|
|
|
if (test_bit(R1BIO_WriteError, &r1_bio->state))
|
|
|
|
close_write(r1_bio);
|
2015-08-14 01:11:10 +00:00
|
|
|
raid_end_bio_io(r1_bio);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-04-18 08:25:41 +00:00
|
|
|
blk_start_plug(&plug);
|
2005-04-16 22:20:36 +00:00
|
|
|
for (;;) {
|
2005-06-22 00:17:23 +00:00
|
|
|
|
2012-07-31 07:08:14 +00:00
|
|
|
flush_pending_writes(conf);
|
2005-06-22 00:17:23 +00:00
|
|
|
|
2008-03-04 22:29:29 +00:00
|
|
|
spin_lock_irqsave(&conf->device_lock, flags);
|
|
|
|
if (list_empty(head)) {
|
|
|
|
spin_unlock_irqrestore(&conf->device_lock, flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
2008-03-04 22:29:29 +00:00
|
|
|
}
|
2011-10-11 05:48:43 +00:00
|
|
|
r1_bio = list_entry(head->prev, struct r1bio, retry_list);
|
2005-04-16 22:20:36 +00:00
|
|
|
list_del(head->prev);
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
idx = sector_to_idx(r1_bio->sector);
|
RAID1: avoid unnecessary spin locks in I/O barrier code
When I run a parallel reading performan testing on a md raid1 device with
two NVMe SSDs, I observe very bad throughput in supprise: by fio with 64KB
block size, 40 seq read I/O jobs, 128 iodepth, overall throughput is
only 2.7GB/s, this is around 50% of the idea performance number.
The perf reports locking contention happens at allow_barrier() and
wait_barrier() code,
- 41.41% fio [kernel.kallsyms] [k] _raw_spin_lock_irqsave
- _raw_spin_lock_irqsave
+ 89.92% allow_barrier
+ 9.34% __wake_up
- 37.30% fio [kernel.kallsyms] [k] _raw_spin_lock_irq
- _raw_spin_lock_irq
- 100.00% wait_barrier
The reason is, in these I/O barrier related functions,
- raise_barrier()
- lower_barrier()
- wait_barrier()
- allow_barrier()
They always hold conf->resync_lock firstly, even there are only regular
reading I/Os and no resync I/O at all. This is a huge performance penalty.
The solution is a lockless-like algorithm in I/O barrier code, and only
holding conf->resync_lock when it has to.
The original idea is from Hannes Reinecke, and Neil Brown provides
comments to improve it. I continue to work on it, and make the patch into
current form.
In the new simpler raid1 I/O barrier implementation, there are two
wait barrier functions,
- wait_barrier()
Which calls _wait_barrier(), is used for regular write I/O. If there is
resync I/O happening on the same I/O barrier bucket, or the whole
array is frozen, task will wait until no barrier on same barrier bucket,
or the whold array is unfreezed.
- wait_read_barrier()
Since regular read I/O won't interfere with resync I/O (read_balance()
will make sure only uptodate data will be read out), it is unnecessary
to wait for barrier in regular read I/Os, waiting in only necessary
when the whole array is frozen.
The operations on conf->nr_pending[idx], conf->nr_waiting[idx], conf->
barrier[idx] are very carefully designed in raise_barrier(),
lower_barrier(), _wait_barrier() and wait_read_barrier(), in order to
avoid unnecessary spin locks in these functions. Once conf->
nr_pengding[idx] is increased, a resync I/O with same barrier bucket index
has to wait in raise_barrier(). Then in _wait_barrier() if no barrier
raised in same barrier bucket index and array is not frozen, the regular
I/O doesn't need to hold conf->resync_lock, it can just increase
conf->nr_pending[idx], and return to its caller. wait_read_barrier() is
very similar to _wait_barrier(), the only difference is it only waits when
array is frozen. For heavy parallel reading I/Os, the lockless I/O barrier
code almostly gets rid of all spin lock cost.
This patch significantly improves raid1 reading peroformance. From my
testing, a raid1 device built by two NVMe SSD, runs fio with 64KB
blocksize, 40 seq read I/O jobs, 128 iodepth, overall throughput
increases from 2.7GB/s to 4.6GB/s (+70%).
Changelog
V4:
- Change conf->nr_queued[] to atomic_t.
- Define BARRIER_BUCKETS_NR_BITS by (PAGE_SHIFT - ilog2(sizeof(atomic_t)))
V3:
- Add smp_mb__after_atomic() as Shaohua and Neil suggested.
- Change conf->nr_queued[] from atomic_t to int.
- Change conf->array_frozen from atomic_t back to int, and use
READ_ONCE(conf->array_frozen) to check value of conf->array_frozen
in _wait_barrier() and wait_read_barrier().
- In _wait_barrier() and wait_read_barrier(), add a call to
wake_up(&conf->wait_barrier) after atomic_dec(&conf->nr_pending[idx]),
to fix a deadlock between _wait_barrier()/wait_read_barrier and
freeze_array().
V2:
- Remove a spin_lock/unlock pair in raid1d().
- Add more code comments to explain why there is no racy when checking two
atomic_t variables at same time.
V1:
- Original RFC patch for comments.
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Shaohua Li <shli@fb.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:57 +00:00
|
|
|
atomic_dec(&conf->nr_queued[idx]);
|
2005-04-16 22:20:36 +00:00
|
|
|
spin_unlock_irqrestore(&conf->device_lock, flags);
|
|
|
|
|
|
|
|
mddev = r1_bio->mddev;
|
2009-06-16 06:54:21 +00:00
|
|
|
conf = mddev->private;
|
2011-07-28 01:31:49 +00:00
|
|
|
if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
|
2011-07-28 01:33:00 +00:00
|
|
|
if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
|
2011-07-28 01:38:13 +00:00
|
|
|
test_bit(R1BIO_WriteError, &r1_bio->state))
|
|
|
|
handle_sync_write_finished(conf, r1_bio);
|
|
|
|
else
|
2011-07-28 01:31:49 +00:00
|
|
|
sync_request_write(mddev, r1_bio);
|
2011-07-28 01:32:41 +00:00
|
|
|
} else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
|
2011-07-28 01:38:13 +00:00
|
|
|
test_bit(R1BIO_WriteError, &r1_bio->state))
|
|
|
|
handle_write_finished(conf, r1_bio);
|
|
|
|
else if (test_bit(R1BIO_ReadError, &r1_bio->state))
|
|
|
|
handle_read_error(conf, r1_bio);
|
|
|
|
else
|
md/raid1: simplify the splitting of requests.
raid1 currently splits requests in two different ways for
two different reasons.
First, bio_split() is used to ensure the bio fits within a
resync accounting region.
Second, multiple r1bios are allocated for each bio to handle
the possiblity of known bad blocks on some devices.
This can be simplified to just use bio_split() once, and not
use multiple r1bios.
We delay the split until we know a maximum bio size that can
be handled with a single r1bio, and then split the bio and
queue the remainder for later handling.
This avoids all loops inside raid1.c request handling. Just
a single read, or a single set of writes, is submitted to
lower-level devices for each bio that comes from
generic_make_request().
When the bio needs to be split, generic_make_request() will
do the necessary looping and call md_make_request() multiple
times.
raid1_make_request() no longer queues request for raid1 to handle,
so we can remove that branch from the 'if'.
This patch also creates a new private bio_set
(conf->bio_split) for splitting bios. Using fs_bio_set
is wrong, as it is meant to be used by filesystems, not
block devices. Using it inside md can lead to deadlocks
under high memory pressure.
Delete unused variable in raid1_write_request() (Shaohua)
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-04-05 04:05:50 +00:00
|
|
|
WARN_ON_ONCE(1);
|
2011-07-28 01:38:13 +00:00
|
|
|
|
2009-10-16 04:55:32 +00:00
|
|
|
cond_resched();
|
2016-12-08 23:48:19 +00:00
|
|
|
if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
|
2011-07-28 01:31:48 +00:00
|
|
|
md_check_recovery(mddev);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2011-04-18 08:25:41 +00:00
|
|
|
blk_finish_plug(&plug);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2011-10-11 05:49:05 +00:00
|
|
|
static int init_resync(struct r1conf *conf)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
int buffs;
|
|
|
|
|
|
|
|
buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
|
2018-05-20 22:25:52 +00:00
|
|
|
BUG_ON(mempool_initialized(&conf->r1buf_pool));
|
|
|
|
|
|
|
|
return mempool_init(&conf->r1buf_pool, buffs, r1buf_pool_alloc,
|
|
|
|
r1buf_pool_free, conf->poolinfo);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2017-08-25 00:50:40 +00:00
|
|
|
static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
|
|
|
|
{
|
2018-05-20 22:25:52 +00:00
|
|
|
struct r1bio *r1bio = mempool_alloc(&conf->r1buf_pool, GFP_NOIO);
|
2017-08-25 00:50:40 +00:00
|
|
|
struct resync_pages *rps;
|
|
|
|
struct bio *bio;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = conf->poolinfo->raid_disks; i--; ) {
|
|
|
|
bio = r1bio->bios[i];
|
|
|
|
rps = bio->bi_private;
|
|
|
|
bio_reset(bio);
|
|
|
|
bio->bi_private = rps;
|
|
|
|
}
|
|
|
|
r1bio->master_bio = NULL;
|
|
|
|
return r1bio;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* perform a "sync" on one "block"
|
|
|
|
*
|
|
|
|
* We need to make sure that no normal I/O request - particularly write
|
|
|
|
* requests - conflict with active sync requests.
|
|
|
|
*
|
|
|
|
* This is achieved by tracking pending requests and a 'barrier' concept
|
|
|
|
* that can be installed to exclude normal IO requests.
|
|
|
|
*/
|
|
|
|
|
2016-01-20 21:52:20 +00:00
|
|
|
static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
|
|
|
|
int *skipped)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2011-10-11 05:49:05 +00:00
|
|
|
struct r1conf *conf = mddev->private;
|
2011-10-11 05:48:43 +00:00
|
|
|
struct r1bio *r1_bio;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct bio *bio;
|
|
|
|
sector_t max_sector, nr_sectors;
|
2006-01-06 08:20:21 +00:00
|
|
|
int disk = -1;
|
2005-04-16 22:20:36 +00:00
|
|
|
int i;
|
2006-01-06 08:20:21 +00:00
|
|
|
int wonly = -1;
|
|
|
|
int write_targets = 0, read_targets = 0;
|
2010-10-18 23:03:39 +00:00
|
|
|
sector_t sync_blocks;
|
2005-08-04 19:53:34 +00:00
|
|
|
int still_degraded = 0;
|
2011-07-28 01:31:48 +00:00
|
|
|
int good_sectors = RESYNC_SECTORS;
|
|
|
|
int min_bad = 0; /* number of sectors that are bad in all devices */
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
int idx = sector_to_idx(sector_nr);
|
2017-07-14 08:14:42 +00:00
|
|
|
int page_idx = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2018-05-20 22:25:52 +00:00
|
|
|
if (!mempool_initialized(&conf->r1buf_pool))
|
2005-04-16 22:20:36 +00:00
|
|
|
if (init_resync(conf))
|
2005-06-22 00:17:13 +00:00
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-03-31 03:33:13 +00:00
|
|
|
max_sector = mddev->dev_sectors;
|
2005-04-16 22:20:36 +00:00
|
|
|
if (sector_nr >= max_sector) {
|
2005-06-22 00:17:23 +00:00
|
|
|
/* If we aborted, we need to abort the
|
|
|
|
* sync on the 'current' bitmap chunk (there will
|
|
|
|
* only be one in raid1 resync.
|
|
|
|
* We can find the current addess in mddev->curr_resync
|
|
|
|
*/
|
2005-07-15 10:56:35 +00:00
|
|
|
if (mddev->curr_resync < max_sector) /* aborted */
|
2018-08-01 22:20:50 +00:00
|
|
|
md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
|
|
|
|
&sync_blocks, 1);
|
2005-07-15 10:56:35 +00:00
|
|
|
else /* completed sync */
|
2005-06-22 00:17:23 +00:00
|
|
|
conf->fullsync = 0;
|
2005-07-15 10:56:35 +00:00
|
|
|
|
2018-08-01 22:20:50 +00:00
|
|
|
md_bitmap_close_sync(mddev->bitmap);
|
2005-04-16 22:20:36 +00:00
|
|
|
close_sync(conf);
|
2015-08-18 22:14:42 +00:00
|
|
|
|
|
|
|
if (mddev_is_clustered(mddev)) {
|
|
|
|
conf->cluster_sync_low = 0;
|
|
|
|
conf->cluster_sync_high = 0;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-06-26 07:27:56 +00:00
|
|
|
if (mddev->bitmap == NULL &&
|
|
|
|
mddev->recovery_cp == MaxSector &&
|
2006-08-27 08:23:50 +00:00
|
|
|
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
|
2006-06-26 07:27:56 +00:00
|
|
|
conf->fullsync == 0) {
|
|
|
|
*skipped = 1;
|
|
|
|
return max_sector - sector_nr;
|
|
|
|
}
|
2006-08-27 08:23:50 +00:00
|
|
|
/* before building a request, check if we can skip these blocks..
|
|
|
|
* This call the bitmap_start_sync doesn't actually record anything
|
|
|
|
*/
|
2018-08-01 22:20:50 +00:00
|
|
|
if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
|
2005-11-09 05:39:38 +00:00
|
|
|
!conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
|
2005-06-22 00:17:23 +00:00
|
|
|
/* We can skip this block, and probably several more */
|
|
|
|
*skipped = 1;
|
|
|
|
return sync_blocks;
|
|
|
|
}
|
2006-01-06 08:20:12 +00:00
|
|
|
|
2016-06-13 13:51:19 +00:00
|
|
|
/*
|
|
|
|
* If there is non-resync activity waiting for a turn, then let it
|
|
|
|
* though before starting on this new sync request.
|
|
|
|
*/
|
RAID1: avoid unnecessary spin locks in I/O barrier code
When I run a parallel reading performan testing on a md raid1 device with
two NVMe SSDs, I observe very bad throughput in supprise: by fio with 64KB
block size, 40 seq read I/O jobs, 128 iodepth, overall throughput is
only 2.7GB/s, this is around 50% of the idea performance number.
The perf reports locking contention happens at allow_barrier() and
wait_barrier() code,
- 41.41% fio [kernel.kallsyms] [k] _raw_spin_lock_irqsave
- _raw_spin_lock_irqsave
+ 89.92% allow_barrier
+ 9.34% __wake_up
- 37.30% fio [kernel.kallsyms] [k] _raw_spin_lock_irq
- _raw_spin_lock_irq
- 100.00% wait_barrier
The reason is, in these I/O barrier related functions,
- raise_barrier()
- lower_barrier()
- wait_barrier()
- allow_barrier()
They always hold conf->resync_lock firstly, even there are only regular
reading I/Os and no resync I/O at all. This is a huge performance penalty.
The solution is a lockless-like algorithm in I/O barrier code, and only
holding conf->resync_lock when it has to.
The original idea is from Hannes Reinecke, and Neil Brown provides
comments to improve it. I continue to work on it, and make the patch into
current form.
In the new simpler raid1 I/O barrier implementation, there are two
wait barrier functions,
- wait_barrier()
Which calls _wait_barrier(), is used for regular write I/O. If there is
resync I/O happening on the same I/O barrier bucket, or the whole
array is frozen, task will wait until no barrier on same barrier bucket,
or the whold array is unfreezed.
- wait_read_barrier()
Since regular read I/O won't interfere with resync I/O (read_balance()
will make sure only uptodate data will be read out), it is unnecessary
to wait for barrier in regular read I/Os, waiting in only necessary
when the whole array is frozen.
The operations on conf->nr_pending[idx], conf->nr_waiting[idx], conf->
barrier[idx] are very carefully designed in raise_barrier(),
lower_barrier(), _wait_barrier() and wait_read_barrier(), in order to
avoid unnecessary spin locks in these functions. Once conf->
nr_pengding[idx] is increased, a resync I/O with same barrier bucket index
has to wait in raise_barrier(). Then in _wait_barrier() if no barrier
raised in same barrier bucket index and array is not frozen, the regular
I/O doesn't need to hold conf->resync_lock, it can just increase
conf->nr_pending[idx], and return to its caller. wait_read_barrier() is
very similar to _wait_barrier(), the only difference is it only waits when
array is frozen. For heavy parallel reading I/Os, the lockless I/O barrier
code almostly gets rid of all spin lock cost.
This patch significantly improves raid1 reading peroformance. From my
testing, a raid1 device built by two NVMe SSD, runs fio with 64KB
blocksize, 40 seq read I/O jobs, 128 iodepth, overall throughput
increases from 2.7GB/s to 4.6GB/s (+70%).
Changelog
V4:
- Change conf->nr_queued[] to atomic_t.
- Define BARRIER_BUCKETS_NR_BITS by (PAGE_SHIFT - ilog2(sizeof(atomic_t)))
V3:
- Add smp_mb__after_atomic() as Shaohua and Neil suggested.
- Change conf->nr_queued[] from atomic_t to int.
- Change conf->array_frozen from atomic_t back to int, and use
READ_ONCE(conf->array_frozen) to check value of conf->array_frozen
in _wait_barrier() and wait_read_barrier().
- In _wait_barrier() and wait_read_barrier(), add a call to
wake_up(&conf->wait_barrier) after atomic_dec(&conf->nr_pending[idx]),
to fix a deadlock between _wait_barrier()/wait_read_barrier and
freeze_array().
V2:
- Remove a spin_lock/unlock pair in raid1d().
- Add more code comments to explain why there is no racy when checking two
atomic_t variables at same time.
V1:
- Original RFC patch for comments.
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Shaohua Li <shli@fb.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:57 +00:00
|
|
|
if (atomic_read(&conf->nr_waiting[idx]))
|
2016-06-13 13:51:19 +00:00
|
|
|
schedule_timeout_uninterruptible(1);
|
|
|
|
|
2015-08-18 22:14:42 +00:00
|
|
|
/* we are incrementing sector_nr below. To be safe, we check against
|
|
|
|
* sector_nr + two times RESYNC_SECTORS
|
|
|
|
*/
|
|
|
|
|
2018-08-01 22:20:50 +00:00
|
|
|
md_bitmap_cond_end_sync(mddev->bitmap, sector_nr,
|
2015-08-18 22:14:42 +00:00
|
|
|
mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
|
2006-01-06 08:20:12 +00:00
|
|
|
|
2018-04-09 01:50:44 +00:00
|
|
|
|
|
|
|
if (raise_barrier(conf, sector_nr))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
r1_bio = raid1_alloc_init_r1buf(conf);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-01-06 08:20:21 +00:00
|
|
|
rcu_read_lock();
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2006-01-06 08:20:21 +00:00
|
|
|
* If we get a correctably read error during resync or recovery,
|
|
|
|
* we might want to read from a different device. So we
|
|
|
|
* flag all drives that could conceivably be read from for READ,
|
|
|
|
* and any others (which will be non-In_sync devices) for WRITE.
|
|
|
|
* If a read fails, we try reading from something else for which READ
|
|
|
|
* is OK.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
r1_bio->mddev = mddev;
|
|
|
|
r1_bio->sector = sector_nr;
|
2005-06-22 00:17:23 +00:00
|
|
|
r1_bio->state = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
set_bit(R1BIO_IsSync, &r1_bio->state);
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
/* make sure good_sectors won't go across barrier unit boundary */
|
|
|
|
good_sectors = align_to_barrier_unit_end(sector_nr, good_sectors);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-12-22 23:17:56 +00:00
|
|
|
for (i = 0; i < conf->raid_disks * 2; i++) {
|
2011-10-11 05:45:26 +00:00
|
|
|
struct md_rdev *rdev;
|
2005-04-16 22:20:36 +00:00
|
|
|
bio = r1_bio->bios[i];
|
|
|
|
|
2006-01-06 08:20:21 +00:00
|
|
|
rdev = rcu_dereference(conf->mirrors[i].rdev);
|
|
|
|
if (rdev == NULL ||
|
2011-07-28 01:31:48 +00:00
|
|
|
test_bit(Faulty, &rdev->flags)) {
|
2011-12-22 23:17:56 +00:00
|
|
|
if (i < conf->raid_disks)
|
|
|
|
still_degraded = 1;
|
2006-01-06 08:20:21 +00:00
|
|
|
} else if (!test_bit(In_sync, &rdev->flags)) {
|
2016-06-05 19:32:07 +00:00
|
|
|
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
|
2005-04-16 22:20:36 +00:00
|
|
|
bio->bi_end_io = end_sync_write;
|
|
|
|
write_targets ++;
|
2006-01-06 08:20:21 +00:00
|
|
|
} else {
|
|
|
|
/* may need to read from here */
|
2011-07-28 01:31:48 +00:00
|
|
|
sector_t first_bad = MaxSector;
|
|
|
|
int bad_sectors;
|
|
|
|
|
|
|
|
if (is_badblock(rdev, sector_nr, good_sectors,
|
|
|
|
&first_bad, &bad_sectors)) {
|
|
|
|
if (first_bad > sector_nr)
|
|
|
|
good_sectors = first_bad - sector_nr;
|
|
|
|
else {
|
|
|
|
bad_sectors -= (sector_nr - first_bad);
|
|
|
|
if (min_bad == 0 ||
|
|
|
|
min_bad > bad_sectors)
|
|
|
|
min_bad = bad_sectors;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (sector_nr < first_bad) {
|
|
|
|
if (test_bit(WriteMostly, &rdev->flags)) {
|
|
|
|
if (wonly < 0)
|
|
|
|
wonly = i;
|
|
|
|
} else {
|
|
|
|
if (disk < 0)
|
|
|
|
disk = i;
|
|
|
|
}
|
2016-06-05 19:32:07 +00:00
|
|
|
bio_set_op_attrs(bio, REQ_OP_READ, 0);
|
2011-07-28 01:31:48 +00:00
|
|
|
bio->bi_end_io = end_sync_read;
|
|
|
|
read_targets++;
|
2012-07-17 10:17:55 +00:00
|
|
|
} else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
|
|
|
|
test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
|
|
|
|
!test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
|
|
|
|
/*
|
|
|
|
* The device is suitable for reading (InSync),
|
|
|
|
* but has bad block(s) here. Let's try to correct them,
|
|
|
|
* if we are doing resync or repair. Otherwise, leave
|
|
|
|
* this device alone for this sync request.
|
|
|
|
*/
|
2016-06-05 19:32:07 +00:00
|
|
|
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
|
2012-07-17 10:17:55 +00:00
|
|
|
bio->bi_end_io = end_sync_write;
|
|
|
|
write_targets++;
|
2006-01-06 08:20:21 +00:00
|
|
|
}
|
|
|
|
}
|
2019-12-10 02:42:25 +00:00
|
|
|
if (rdev && bio->bi_end_io) {
|
2011-07-28 01:31:48 +00:00
|
|
|
atomic_inc(&rdev->nr_pending);
|
2013-10-11 22:44:27 +00:00
|
|
|
bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
|
2017-08-23 17:10:32 +00:00
|
|
|
bio_set_dev(bio, rdev->bdev);
|
2016-11-18 05:16:12 +00:00
|
|
|
if (test_bit(FailFast, &rdev->flags))
|
|
|
|
bio->bi_opf |= MD_FAILFAST;
|
2011-07-28 01:31:48 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2006-01-06 08:20:21 +00:00
|
|
|
rcu_read_unlock();
|
|
|
|
if (disk < 0)
|
|
|
|
disk = wonly;
|
|
|
|
r1_bio->read_disk = disk;
|
2005-06-22 00:17:23 +00:00
|
|
|
|
2011-07-28 01:31:48 +00:00
|
|
|
if (read_targets == 0 && min_bad > 0) {
|
|
|
|
/* These sectors are bad on all InSync devices, so we
|
|
|
|
* need to mark them bad on all write targets
|
|
|
|
*/
|
|
|
|
int ok = 1;
|
2011-12-22 23:17:56 +00:00
|
|
|
for (i = 0 ; i < conf->raid_disks * 2 ; i++)
|
2011-07-28 01:31:48 +00:00
|
|
|
if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
|
2012-04-01 15:04:19 +00:00
|
|
|
struct md_rdev *rdev = conf->mirrors[i].rdev;
|
2011-07-28 01:31:48 +00:00
|
|
|
ok = rdev_set_badblocks(rdev, sector_nr,
|
|
|
|
min_bad, 0
|
|
|
|
) && ok;
|
|
|
|
}
|
2016-12-08 23:48:19 +00:00
|
|
|
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
|
2011-07-28 01:31:48 +00:00
|
|
|
*skipped = 1;
|
|
|
|
put_buf(r1_bio);
|
|
|
|
|
|
|
|
if (!ok) {
|
|
|
|
/* Cannot record the badblocks, so need to
|
|
|
|
* abort the resync.
|
|
|
|
* If there are multiple read targets, could just
|
|
|
|
* fail the really bad ones ???
|
|
|
|
*/
|
|
|
|
conf->recovery_disabled = mddev->recovery_disabled;
|
|
|
|
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
|
|
|
|
return 0;
|
|
|
|
} else
|
|
|
|
return min_bad;
|
|
|
|
|
|
|
|
}
|
|
|
|
if (min_bad > 0 && min_bad < good_sectors) {
|
|
|
|
/* only resync enough to reach the next bad->good
|
|
|
|
* transition */
|
|
|
|
good_sectors = min_bad;
|
|
|
|
}
|
|
|
|
|
2006-01-06 08:20:21 +00:00
|
|
|
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
|
|
|
|
/* extra read targets are also write targets */
|
|
|
|
write_targets += read_targets-1;
|
|
|
|
|
|
|
|
if (write_targets == 0 || read_targets == 0) {
|
2005-04-16 22:20:36 +00:00
|
|
|
/* There is nowhere to write, so all non-sync
|
|
|
|
* drives must be failed - so we are finished
|
|
|
|
*/
|
2012-07-31 00:05:34 +00:00
|
|
|
sector_t rv;
|
|
|
|
if (min_bad > 0)
|
|
|
|
max_sector = sector_nr + min_bad;
|
|
|
|
rv = max_sector - sector_nr;
|
2005-06-22 00:17:13 +00:00
|
|
|
*skipped = 1;
|
2005-04-16 22:20:36 +00:00
|
|
|
put_buf(r1_bio);
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
2008-02-06 09:39:52 +00:00
|
|
|
if (max_sector > mddev->resync_max)
|
|
|
|
max_sector = mddev->resync_max; /* Don't do IO beyond here */
|
2011-07-28 01:31:48 +00:00
|
|
|
if (max_sector > sector_nr + good_sectors)
|
|
|
|
max_sector = sector_nr + good_sectors;
|
2005-04-16 22:20:36 +00:00
|
|
|
nr_sectors = 0;
|
2005-06-22 00:17:24 +00:00
|
|
|
sync_blocks = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
do {
|
|
|
|
struct page *page;
|
|
|
|
int len = PAGE_SIZE;
|
|
|
|
if (sector_nr + (len>>9) > max_sector)
|
|
|
|
len = (max_sector - sector_nr) << 9;
|
|
|
|
if (len == 0)
|
|
|
|
break;
|
2005-07-15 10:56:35 +00:00
|
|
|
if (sync_blocks == 0) {
|
2018-08-01 22:20:50 +00:00
|
|
|
if (!md_bitmap_start_sync(mddev->bitmap, sector_nr,
|
|
|
|
&sync_blocks, still_degraded) &&
|
2005-11-09 05:39:38 +00:00
|
|
|
!conf->fullsync &&
|
|
|
|
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
|
2005-07-15 10:56:35 +00:00
|
|
|
break;
|
2010-10-07 00:54:46 +00:00
|
|
|
if ((len >> 9) > sync_blocks)
|
2005-07-15 10:56:35 +00:00
|
|
|
len = sync_blocks<<9;
|
2005-06-22 00:17:23 +00:00
|
|
|
}
|
2005-06-22 00:17:23 +00:00
|
|
|
|
2011-12-22 23:17:56 +00:00
|
|
|
for (i = 0 ; i < conf->raid_disks * 2; i++) {
|
2017-03-16 16:12:26 +00:00
|
|
|
struct resync_pages *rp;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
bio = r1_bio->bios[i];
|
2017-03-16 16:12:26 +00:00
|
|
|
rp = get_resync_pages(bio);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (bio->bi_end_io) {
|
2017-07-14 08:14:42 +00:00
|
|
|
page = resync_fetch_page(rp, page_idx);
|
2017-03-16 16:12:22 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* won't fail because the vec table is big
|
|
|
|
* enough to hold all these pages
|
|
|
|
*/
|
|
|
|
bio_add_page(bio, page, len, 0);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
nr_sectors += len>>9;
|
|
|
|
sector_nr += len>>9;
|
2005-06-22 00:17:23 +00:00
|
|
|
sync_blocks -= (len>>9);
|
2017-07-14 08:14:42 +00:00
|
|
|
} while (++page_idx < RESYNC_PAGES);
|
2017-03-16 16:12:26 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
r1_bio->sectors = nr_sectors;
|
|
|
|
|
2015-08-18 22:14:42 +00:00
|
|
|
if (mddev_is_clustered(mddev) &&
|
|
|
|
conf->cluster_sync_high < sector_nr + nr_sectors) {
|
|
|
|
conf->cluster_sync_low = mddev->curr_resync_completed;
|
|
|
|
conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS;
|
|
|
|
/* Send resync message */
|
|
|
|
md_cluster_ops->resync_info_update(mddev,
|
|
|
|
conf->cluster_sync_low,
|
|
|
|
conf->cluster_sync_high);
|
|
|
|
}
|
|
|
|
|
2006-01-06 08:20:26 +00:00
|
|
|
/* For a user-requested sync, we read all readable devices and do a
|
|
|
|
* compare
|
|
|
|
*/
|
|
|
|
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
|
|
|
|
atomic_set(&r1_bio->remaining, read_targets);
|
2012-07-09 01:34:13 +00:00
|
|
|
for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
|
2006-01-06 08:20:26 +00:00
|
|
|
bio = r1_bio->bios[i];
|
|
|
|
if (bio->bi_end_io == end_sync_read) {
|
2012-07-09 01:34:13 +00:00
|
|
|
read_targets--;
|
2017-08-23 17:10:32 +00:00
|
|
|
md_sync_acct_bio(bio, nr_sectors);
|
2016-11-18 05:16:12 +00:00
|
|
|
if (read_targets == 1)
|
|
|
|
bio->bi_opf &= ~MD_FAILFAST;
|
2020-07-01 08:59:44 +00:00
|
|
|
submit_bio_noacct(bio);
|
2006-01-06 08:20:26 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
atomic_set(&r1_bio->remaining, 1);
|
|
|
|
bio = r1_bio->bios[r1_bio->read_disk];
|
2017-08-23 17:10:32 +00:00
|
|
|
md_sync_acct_bio(bio, nr_sectors);
|
2016-11-18 05:16:12 +00:00
|
|
|
if (read_targets == 1)
|
|
|
|
bio->bi_opf &= ~MD_FAILFAST;
|
2020-07-01 08:59:44 +00:00
|
|
|
submit_bio_noacct(bio);
|
2006-01-06 08:20:26 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
return nr_sectors;
|
|
|
|
}
|
|
|
|
|
2011-10-11 05:47:53 +00:00
|
|
|
static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks)
|
2009-03-18 01:10:40 +00:00
|
|
|
{
|
|
|
|
if (sectors)
|
|
|
|
return sectors;
|
|
|
|
|
|
|
|
return mddev->dev_sectors;
|
|
|
|
}
|
|
|
|
|
2011-10-11 05:49:05 +00:00
|
|
|
static struct r1conf *setup_conf(struct mddev *mddev)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2011-10-11 05:49:05 +00:00
|
|
|
struct r1conf *conf;
|
2009-12-14 01:49:51 +00:00
|
|
|
int i;
|
2012-07-31 00:03:52 +00:00
|
|
|
struct raid1_info *disk;
|
2011-10-11 05:45:26 +00:00
|
|
|
struct md_rdev *rdev;
|
2009-12-14 01:49:51 +00:00
|
|
|
int err = -ENOMEM;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-10-11 05:49:05 +00:00
|
|
|
conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!conf)
|
2009-12-14 01:49:51 +00:00
|
|
|
goto abort;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
conf->nr_pending = kcalloc(BARRIER_BUCKETS_NR,
|
RAID1: avoid unnecessary spin locks in I/O barrier code
When I run a parallel reading performan testing on a md raid1 device with
two NVMe SSDs, I observe very bad throughput in supprise: by fio with 64KB
block size, 40 seq read I/O jobs, 128 iodepth, overall throughput is
only 2.7GB/s, this is around 50% of the idea performance number.
The perf reports locking contention happens at allow_barrier() and
wait_barrier() code,
- 41.41% fio [kernel.kallsyms] [k] _raw_spin_lock_irqsave
- _raw_spin_lock_irqsave
+ 89.92% allow_barrier
+ 9.34% __wake_up
- 37.30% fio [kernel.kallsyms] [k] _raw_spin_lock_irq
- _raw_spin_lock_irq
- 100.00% wait_barrier
The reason is, in these I/O barrier related functions,
- raise_barrier()
- lower_barrier()
- wait_barrier()
- allow_barrier()
They always hold conf->resync_lock firstly, even there are only regular
reading I/Os and no resync I/O at all. This is a huge performance penalty.
The solution is a lockless-like algorithm in I/O barrier code, and only
holding conf->resync_lock when it has to.
The original idea is from Hannes Reinecke, and Neil Brown provides
comments to improve it. I continue to work on it, and make the patch into
current form.
In the new simpler raid1 I/O barrier implementation, there are two
wait barrier functions,
- wait_barrier()
Which calls _wait_barrier(), is used for regular write I/O. If there is
resync I/O happening on the same I/O barrier bucket, or the whole
array is frozen, task will wait until no barrier on same barrier bucket,
or the whold array is unfreezed.
- wait_read_barrier()
Since regular read I/O won't interfere with resync I/O (read_balance()
will make sure only uptodate data will be read out), it is unnecessary
to wait for barrier in regular read I/Os, waiting in only necessary
when the whole array is frozen.
The operations on conf->nr_pending[idx], conf->nr_waiting[idx], conf->
barrier[idx] are very carefully designed in raise_barrier(),
lower_barrier(), _wait_barrier() and wait_read_barrier(), in order to
avoid unnecessary spin locks in these functions. Once conf->
nr_pengding[idx] is increased, a resync I/O with same barrier bucket index
has to wait in raise_barrier(). Then in _wait_barrier() if no barrier
raised in same barrier bucket index and array is not frozen, the regular
I/O doesn't need to hold conf->resync_lock, it can just increase
conf->nr_pending[idx], and return to its caller. wait_read_barrier() is
very similar to _wait_barrier(), the only difference is it only waits when
array is frozen. For heavy parallel reading I/Os, the lockless I/O barrier
code almostly gets rid of all spin lock cost.
This patch significantly improves raid1 reading peroformance. From my
testing, a raid1 device built by two NVMe SSD, runs fio with 64KB
blocksize, 40 seq read I/O jobs, 128 iodepth, overall throughput
increases from 2.7GB/s to 4.6GB/s (+70%).
Changelog
V4:
- Change conf->nr_queued[] to atomic_t.
- Define BARRIER_BUCKETS_NR_BITS by (PAGE_SHIFT - ilog2(sizeof(atomic_t)))
V3:
- Add smp_mb__after_atomic() as Shaohua and Neil suggested.
- Change conf->nr_queued[] from atomic_t to int.
- Change conf->array_frozen from atomic_t back to int, and use
READ_ONCE(conf->array_frozen) to check value of conf->array_frozen
in _wait_barrier() and wait_read_barrier().
- In _wait_barrier() and wait_read_barrier(), add a call to
wake_up(&conf->wait_barrier) after atomic_dec(&conf->nr_pending[idx]),
to fix a deadlock between _wait_barrier()/wait_read_barrier and
freeze_array().
V2:
- Remove a spin_lock/unlock pair in raid1d().
- Add more code comments to explain why there is no racy when checking two
atomic_t variables at same time.
V1:
- Original RFC patch for comments.
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Shaohua Li <shli@fb.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:57 +00:00
|
|
|
sizeof(atomic_t), GFP_KERNEL);
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
if (!conf->nr_pending)
|
|
|
|
goto abort;
|
|
|
|
|
|
|
|
conf->nr_waiting = kcalloc(BARRIER_BUCKETS_NR,
|
RAID1: avoid unnecessary spin locks in I/O barrier code
When I run a parallel reading performan testing on a md raid1 device with
two NVMe SSDs, I observe very bad throughput in supprise: by fio with 64KB
block size, 40 seq read I/O jobs, 128 iodepth, overall throughput is
only 2.7GB/s, this is around 50% of the idea performance number.
The perf reports locking contention happens at allow_barrier() and
wait_barrier() code,
- 41.41% fio [kernel.kallsyms] [k] _raw_spin_lock_irqsave
- _raw_spin_lock_irqsave
+ 89.92% allow_barrier
+ 9.34% __wake_up
- 37.30% fio [kernel.kallsyms] [k] _raw_spin_lock_irq
- _raw_spin_lock_irq
- 100.00% wait_barrier
The reason is, in these I/O barrier related functions,
- raise_barrier()
- lower_barrier()
- wait_barrier()
- allow_barrier()
They always hold conf->resync_lock firstly, even there are only regular
reading I/Os and no resync I/O at all. This is a huge performance penalty.
The solution is a lockless-like algorithm in I/O barrier code, and only
holding conf->resync_lock when it has to.
The original idea is from Hannes Reinecke, and Neil Brown provides
comments to improve it. I continue to work on it, and make the patch into
current form.
In the new simpler raid1 I/O barrier implementation, there are two
wait barrier functions,
- wait_barrier()
Which calls _wait_barrier(), is used for regular write I/O. If there is
resync I/O happening on the same I/O barrier bucket, or the whole
array is frozen, task will wait until no barrier on same barrier bucket,
or the whold array is unfreezed.
- wait_read_barrier()
Since regular read I/O won't interfere with resync I/O (read_balance()
will make sure only uptodate data will be read out), it is unnecessary
to wait for barrier in regular read I/Os, waiting in only necessary
when the whole array is frozen.
The operations on conf->nr_pending[idx], conf->nr_waiting[idx], conf->
barrier[idx] are very carefully designed in raise_barrier(),
lower_barrier(), _wait_barrier() and wait_read_barrier(), in order to
avoid unnecessary spin locks in these functions. Once conf->
nr_pengding[idx] is increased, a resync I/O with same barrier bucket index
has to wait in raise_barrier(). Then in _wait_barrier() if no barrier
raised in same barrier bucket index and array is not frozen, the regular
I/O doesn't need to hold conf->resync_lock, it can just increase
conf->nr_pending[idx], and return to its caller. wait_read_barrier() is
very similar to _wait_barrier(), the only difference is it only waits when
array is frozen. For heavy parallel reading I/Os, the lockless I/O barrier
code almostly gets rid of all spin lock cost.
This patch significantly improves raid1 reading peroformance. From my
testing, a raid1 device built by two NVMe SSD, runs fio with 64KB
blocksize, 40 seq read I/O jobs, 128 iodepth, overall throughput
increases from 2.7GB/s to 4.6GB/s (+70%).
Changelog
V4:
- Change conf->nr_queued[] to atomic_t.
- Define BARRIER_BUCKETS_NR_BITS by (PAGE_SHIFT - ilog2(sizeof(atomic_t)))
V3:
- Add smp_mb__after_atomic() as Shaohua and Neil suggested.
- Change conf->nr_queued[] from atomic_t to int.
- Change conf->array_frozen from atomic_t back to int, and use
READ_ONCE(conf->array_frozen) to check value of conf->array_frozen
in _wait_barrier() and wait_read_barrier().
- In _wait_barrier() and wait_read_barrier(), add a call to
wake_up(&conf->wait_barrier) after atomic_dec(&conf->nr_pending[idx]),
to fix a deadlock between _wait_barrier()/wait_read_barrier and
freeze_array().
V2:
- Remove a spin_lock/unlock pair in raid1d().
- Add more code comments to explain why there is no racy when checking two
atomic_t variables at same time.
V1:
- Original RFC patch for comments.
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Shaohua Li <shli@fb.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:57 +00:00
|
|
|
sizeof(atomic_t), GFP_KERNEL);
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
if (!conf->nr_waiting)
|
|
|
|
goto abort;
|
|
|
|
|
|
|
|
conf->nr_queued = kcalloc(BARRIER_BUCKETS_NR,
|
RAID1: avoid unnecessary spin locks in I/O barrier code
When I run a parallel reading performan testing on a md raid1 device with
two NVMe SSDs, I observe very bad throughput in supprise: by fio with 64KB
block size, 40 seq read I/O jobs, 128 iodepth, overall throughput is
only 2.7GB/s, this is around 50% of the idea performance number.
The perf reports locking contention happens at allow_barrier() and
wait_barrier() code,
- 41.41% fio [kernel.kallsyms] [k] _raw_spin_lock_irqsave
- _raw_spin_lock_irqsave
+ 89.92% allow_barrier
+ 9.34% __wake_up
- 37.30% fio [kernel.kallsyms] [k] _raw_spin_lock_irq
- _raw_spin_lock_irq
- 100.00% wait_barrier
The reason is, in these I/O barrier related functions,
- raise_barrier()
- lower_barrier()
- wait_barrier()
- allow_barrier()
They always hold conf->resync_lock firstly, even there are only regular
reading I/Os and no resync I/O at all. This is a huge performance penalty.
The solution is a lockless-like algorithm in I/O barrier code, and only
holding conf->resync_lock when it has to.
The original idea is from Hannes Reinecke, and Neil Brown provides
comments to improve it. I continue to work on it, and make the patch into
current form.
In the new simpler raid1 I/O barrier implementation, there are two
wait barrier functions,
- wait_barrier()
Which calls _wait_barrier(), is used for regular write I/O. If there is
resync I/O happening on the same I/O barrier bucket, or the whole
array is frozen, task will wait until no barrier on same barrier bucket,
or the whold array is unfreezed.
- wait_read_barrier()
Since regular read I/O won't interfere with resync I/O (read_balance()
will make sure only uptodate data will be read out), it is unnecessary
to wait for barrier in regular read I/Os, waiting in only necessary
when the whole array is frozen.
The operations on conf->nr_pending[idx], conf->nr_waiting[idx], conf->
barrier[idx] are very carefully designed in raise_barrier(),
lower_barrier(), _wait_barrier() and wait_read_barrier(), in order to
avoid unnecessary spin locks in these functions. Once conf->
nr_pengding[idx] is increased, a resync I/O with same barrier bucket index
has to wait in raise_barrier(). Then in _wait_barrier() if no barrier
raised in same barrier bucket index and array is not frozen, the regular
I/O doesn't need to hold conf->resync_lock, it can just increase
conf->nr_pending[idx], and return to its caller. wait_read_barrier() is
very similar to _wait_barrier(), the only difference is it only waits when
array is frozen. For heavy parallel reading I/Os, the lockless I/O barrier
code almostly gets rid of all spin lock cost.
This patch significantly improves raid1 reading peroformance. From my
testing, a raid1 device built by two NVMe SSD, runs fio with 64KB
blocksize, 40 seq read I/O jobs, 128 iodepth, overall throughput
increases from 2.7GB/s to 4.6GB/s (+70%).
Changelog
V4:
- Change conf->nr_queued[] to atomic_t.
- Define BARRIER_BUCKETS_NR_BITS by (PAGE_SHIFT - ilog2(sizeof(atomic_t)))
V3:
- Add smp_mb__after_atomic() as Shaohua and Neil suggested.
- Change conf->nr_queued[] from atomic_t to int.
- Change conf->array_frozen from atomic_t back to int, and use
READ_ONCE(conf->array_frozen) to check value of conf->array_frozen
in _wait_barrier() and wait_read_barrier().
- In _wait_barrier() and wait_read_barrier(), add a call to
wake_up(&conf->wait_barrier) after atomic_dec(&conf->nr_pending[idx]),
to fix a deadlock between _wait_barrier()/wait_read_barrier and
freeze_array().
V2:
- Remove a spin_lock/unlock pair in raid1d().
- Add more code comments to explain why there is no racy when checking two
atomic_t variables at same time.
V1:
- Original RFC patch for comments.
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Shaohua Li <shli@fb.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:57 +00:00
|
|
|
sizeof(atomic_t), GFP_KERNEL);
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
if (!conf->nr_queued)
|
|
|
|
goto abort;
|
|
|
|
|
|
|
|
conf->barrier = kcalloc(BARRIER_BUCKETS_NR,
|
RAID1: avoid unnecessary spin locks in I/O barrier code
When I run a parallel reading performan testing on a md raid1 device with
two NVMe SSDs, I observe very bad throughput in supprise: by fio with 64KB
block size, 40 seq read I/O jobs, 128 iodepth, overall throughput is
only 2.7GB/s, this is around 50% of the idea performance number.
The perf reports locking contention happens at allow_barrier() and
wait_barrier() code,
- 41.41% fio [kernel.kallsyms] [k] _raw_spin_lock_irqsave
- _raw_spin_lock_irqsave
+ 89.92% allow_barrier
+ 9.34% __wake_up
- 37.30% fio [kernel.kallsyms] [k] _raw_spin_lock_irq
- _raw_spin_lock_irq
- 100.00% wait_barrier
The reason is, in these I/O barrier related functions,
- raise_barrier()
- lower_barrier()
- wait_barrier()
- allow_barrier()
They always hold conf->resync_lock firstly, even there are only regular
reading I/Os and no resync I/O at all. This is a huge performance penalty.
The solution is a lockless-like algorithm in I/O barrier code, and only
holding conf->resync_lock when it has to.
The original idea is from Hannes Reinecke, and Neil Brown provides
comments to improve it. I continue to work on it, and make the patch into
current form.
In the new simpler raid1 I/O barrier implementation, there are two
wait barrier functions,
- wait_barrier()
Which calls _wait_barrier(), is used for regular write I/O. If there is
resync I/O happening on the same I/O barrier bucket, or the whole
array is frozen, task will wait until no barrier on same barrier bucket,
or the whold array is unfreezed.
- wait_read_barrier()
Since regular read I/O won't interfere with resync I/O (read_balance()
will make sure only uptodate data will be read out), it is unnecessary
to wait for barrier in regular read I/Os, waiting in only necessary
when the whole array is frozen.
The operations on conf->nr_pending[idx], conf->nr_waiting[idx], conf->
barrier[idx] are very carefully designed in raise_barrier(),
lower_barrier(), _wait_barrier() and wait_read_barrier(), in order to
avoid unnecessary spin locks in these functions. Once conf->
nr_pengding[idx] is increased, a resync I/O with same barrier bucket index
has to wait in raise_barrier(). Then in _wait_barrier() if no barrier
raised in same barrier bucket index and array is not frozen, the regular
I/O doesn't need to hold conf->resync_lock, it can just increase
conf->nr_pending[idx], and return to its caller. wait_read_barrier() is
very similar to _wait_barrier(), the only difference is it only waits when
array is frozen. For heavy parallel reading I/Os, the lockless I/O barrier
code almostly gets rid of all spin lock cost.
This patch significantly improves raid1 reading peroformance. From my
testing, a raid1 device built by two NVMe SSD, runs fio with 64KB
blocksize, 40 seq read I/O jobs, 128 iodepth, overall throughput
increases from 2.7GB/s to 4.6GB/s (+70%).
Changelog
V4:
- Change conf->nr_queued[] to atomic_t.
- Define BARRIER_BUCKETS_NR_BITS by (PAGE_SHIFT - ilog2(sizeof(atomic_t)))
V3:
- Add smp_mb__after_atomic() as Shaohua and Neil suggested.
- Change conf->nr_queued[] from atomic_t to int.
- Change conf->array_frozen from atomic_t back to int, and use
READ_ONCE(conf->array_frozen) to check value of conf->array_frozen
in _wait_barrier() and wait_read_barrier().
- In _wait_barrier() and wait_read_barrier(), add a call to
wake_up(&conf->wait_barrier) after atomic_dec(&conf->nr_pending[idx]),
to fix a deadlock between _wait_barrier()/wait_read_barrier and
freeze_array().
V2:
- Remove a spin_lock/unlock pair in raid1d().
- Add more code comments to explain why there is no racy when checking two
atomic_t variables at same time.
V1:
- Original RFC patch for comments.
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Shaohua Li <shli@fb.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:57 +00:00
|
|
|
sizeof(atomic_t), GFP_KERNEL);
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
if (!conf->barrier)
|
|
|
|
goto abort;
|
|
|
|
|
treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 21:03:40 +00:00
|
|
|
conf->mirrors = kzalloc(array3_size(sizeof(struct raid1_info),
|
|
|
|
mddev->raid_disks, 2),
|
|
|
|
GFP_KERNEL);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!conf->mirrors)
|
2009-12-14 01:49:51 +00:00
|
|
|
goto abort;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-01-06 08:20:19 +00:00
|
|
|
conf->tmppage = alloc_page(GFP_KERNEL);
|
|
|
|
if (!conf->tmppage)
|
2009-12-14 01:49:51 +00:00
|
|
|
goto abort;
|
2006-01-06 08:20:19 +00:00
|
|
|
|
2009-12-14 01:49:51 +00:00
|
|
|
conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!conf->poolinfo)
|
2009-12-14 01:49:51 +00:00
|
|
|
goto abort;
|
2011-12-22 23:17:56 +00:00
|
|
|
conf->poolinfo->raid_disks = mddev->raid_disks * 2;
|
2019-06-14 22:41:04 +00:00
|
|
|
err = mempool_init(&conf->r1bio_pool, NR_RAID_BIOS, r1bio_pool_alloc,
|
2019-06-14 22:41:10 +00:00
|
|
|
rbio_pool_free, conf->poolinfo);
|
2018-05-20 22:25:52 +00:00
|
|
|
if (err)
|
2009-12-14 01:49:51 +00:00
|
|
|
goto abort;
|
|
|
|
|
2018-05-20 22:25:52 +00:00
|
|
|
err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
|
|
|
|
if (err)
|
md/raid1: simplify the splitting of requests.
raid1 currently splits requests in two different ways for
two different reasons.
First, bio_split() is used to ensure the bio fits within a
resync accounting region.
Second, multiple r1bios are allocated for each bio to handle
the possiblity of known bad blocks on some devices.
This can be simplified to just use bio_split() once, and not
use multiple r1bios.
We delay the split until we know a maximum bio size that can
be handled with a single r1bio, and then split the bio and
queue the remainder for later handling.
This avoids all loops inside raid1.c request handling. Just
a single read, or a single set of writes, is submitted to
lower-level devices for each bio that comes from
generic_make_request().
When the bio needs to be split, generic_make_request() will
do the necessary looping and call md_make_request() multiple
times.
raid1_make_request() no longer queues request for raid1 to handle,
so we can remove that branch from the 'if'.
This patch also creates a new private bio_set
(conf->bio_split) for splitting bios. Using fs_bio_set
is wrong, as it is meant to be used by filesystems, not
block devices. Using it inside md can lead to deadlocks
under high memory pressure.
Delete unused variable in raid1_write_request() (Shaohua)
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-04-05 04:05:50 +00:00
|
|
|
goto abort;
|
|
|
|
|
2009-10-16 04:55:44 +00:00
|
|
|
conf->poolinfo->mddev = mddev;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-12-22 23:17:57 +00:00
|
|
|
err = -EINVAL;
|
2008-05-14 23:05:54 +00:00
|
|
|
spin_lock_init(&conf->device_lock);
|
2012-03-19 01:46:39 +00:00
|
|
|
rdev_for_each(rdev, mddev) {
|
2009-12-14 01:49:51 +00:00
|
|
|
int disk_idx = rdev->raid_disk;
|
2005-04-16 22:20:36 +00:00
|
|
|
if (disk_idx >= mddev->raid_disks
|
|
|
|
|| disk_idx < 0)
|
|
|
|
continue;
|
2011-12-22 23:17:57 +00:00
|
|
|
if (test_bit(Replacement, &rdev->flags))
|
2012-10-31 00:42:03 +00:00
|
|
|
disk = conf->mirrors + mddev->raid_disks + disk_idx;
|
2011-12-22 23:17:57 +00:00
|
|
|
else
|
|
|
|
disk = conf->mirrors + disk_idx;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-12-22 23:17:57 +00:00
|
|
|
if (disk->rdev)
|
|
|
|
goto abort;
|
2005-04-16 22:20:36 +00:00
|
|
|
disk->rdev = rdev;
|
|
|
|
disk->head_position = 0;
|
md/raid1: prevent merging too large request
For SSD, if request size exceeds specific value (optimal io size), request size
isn't important for bandwidth. In such condition, if making request size bigger
will cause some disks idle, the total throughput will actually drop. A good
example is doing a readahead in a two-disk raid1 setup.
So when should we split big requests? We absolutly don't want to split big
request to very small requests. Even in SSD, big request transfer is more
efficient. This patch only considers request with size above optimal io size.
If all disks are busy, is it worth doing a split? Say optimal io size is 16k,
two requests 32k and two disks. We can let each disk run one 32k request, or
split the requests to 4 16k requests and each disk runs two. It's hard to say
which case is better, depending on hardware.
So only consider case where there are idle disks. For readahead, split is
always better in this case. And in my test, below patch can improve > 30%
thoughput. Hmm, not 100%, because disk isn't 100% busy.
Such case can happen not just in readahead, for example, in directio. But I
suppose directio usually will have bigger IO depth and make all disks busy, so
I ignored it.
Note: if the raid uses any hard disk, we don't prevent merging. That will make
performace worse.
Signed-off-by: Shaohua Li <shli@fusionio.com>
Signed-off-by: NeilBrown <neilb@suse.de>
2012-07-31 00:03:53 +00:00
|
|
|
disk->seq_start = MaxSector;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
conf->raid_disks = mddev->raid_disks;
|
|
|
|
conf->mddev = mddev;
|
|
|
|
INIT_LIST_HEAD(&conf->retry_list);
|
2015-08-14 01:11:10 +00:00
|
|
|
INIT_LIST_HEAD(&conf->bio_end_io_list);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
spin_lock_init(&conf->resync_lock);
|
2006-01-06 08:20:12 +00:00
|
|
|
init_waitqueue_head(&conf->wait_barrier);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-06-22 00:17:23 +00:00
|
|
|
bio_list_init(&conf->pending_bio_list);
|
2011-10-11 05:50:01 +00:00
|
|
|
conf->pending_count = 0;
|
2011-10-26 00:54:39 +00:00
|
|
|
conf->recovery_disabled = mddev->recovery_disabled - 1;
|
2005-06-22 00:17:23 +00:00
|
|
|
|
2011-12-22 23:17:57 +00:00
|
|
|
err = -EIO;
|
2011-12-22 23:17:56 +00:00
|
|
|
for (i = 0; i < conf->raid_disks * 2; i++) {
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
disk = conf->mirrors + i;
|
|
|
|
|
2011-12-22 23:17:57 +00:00
|
|
|
if (i < conf->raid_disks &&
|
|
|
|
disk[conf->raid_disks].rdev) {
|
|
|
|
/* This slot has a replacement. */
|
|
|
|
if (!disk->rdev) {
|
|
|
|
/* No original, just make the replacement
|
|
|
|
* a recovering spare
|
|
|
|
*/
|
|
|
|
disk->rdev =
|
|
|
|
disk[conf->raid_disks].rdev;
|
|
|
|
disk[conf->raid_disks].rdev = NULL;
|
|
|
|
} else if (!test_bit(In_sync, &disk->rdev->flags))
|
|
|
|
/* Original is not in_sync - bad */
|
|
|
|
goto abort;
|
|
|
|
}
|
|
|
|
|
2006-06-26 07:27:40 +00:00
|
|
|
if (!disk->rdev ||
|
|
|
|
!test_bit(In_sync, &disk->rdev->flags)) {
|
2005-04-16 22:20:36 +00:00
|
|
|
disk->head_position = 0;
|
2012-05-22 03:55:31 +00:00
|
|
|
if (disk->rdev &&
|
|
|
|
(disk->rdev->saved_raid_disk < 0))
|
2007-08-22 21:01:52 +00:00
|
|
|
conf->fullsync = 1;
|
2012-07-31 00:03:53 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2009-12-14 01:49:51 +00:00
|
|
|
|
|
|
|
err = -ENOMEM;
|
2012-07-03 05:56:52 +00:00
|
|
|
conf->thread = md_register_thread(raid1d, mddev, "raid1");
|
2016-11-02 03:16:50 +00:00
|
|
|
if (!conf->thread)
|
2009-12-14 01:49:51 +00:00
|
|
|
goto abort;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-12-14 01:49:51 +00:00
|
|
|
return conf;
|
|
|
|
|
|
|
|
abort:
|
|
|
|
if (conf) {
|
2018-05-20 22:25:52 +00:00
|
|
|
mempool_exit(&conf->r1bio_pool);
|
2009-12-14 01:49:51 +00:00
|
|
|
kfree(conf->mirrors);
|
|
|
|
safe_put_page(conf->tmppage);
|
|
|
|
kfree(conf->poolinfo);
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
kfree(conf->nr_pending);
|
|
|
|
kfree(conf->nr_waiting);
|
|
|
|
kfree(conf->nr_queued);
|
|
|
|
kfree(conf->barrier);
|
2018-05-20 22:25:52 +00:00
|
|
|
bioset_exit(&conf->bio_split);
|
2009-12-14 01:49:51 +00:00
|
|
|
kfree(conf);
|
|
|
|
}
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
|
2014-12-15 01:56:58 +00:00
|
|
|
static void raid1_free(struct mddev *mddev, void *priv);
|
2016-01-20 21:52:20 +00:00
|
|
|
static int raid1_run(struct mddev *mddev)
|
2009-12-14 01:49:51 +00:00
|
|
|
{
|
2011-10-11 05:49:05 +00:00
|
|
|
struct r1conf *conf;
|
2009-12-14 01:49:51 +00:00
|
|
|
int i;
|
2011-10-11 05:45:26 +00:00
|
|
|
struct md_rdev *rdev;
|
2012-04-01 23:48:38 +00:00
|
|
|
int ret;
|
2012-10-11 02:28:54 +00:00
|
|
|
bool discard_supported = false;
|
2009-12-14 01:49:51 +00:00
|
|
|
|
|
|
|
if (mddev->level != 1) {
|
2016-11-02 03:16:50 +00:00
|
|
|
pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n",
|
|
|
|
mdname(mddev), mddev->level);
|
2009-12-14 01:49:51 +00:00
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
if (mddev->reshape_position != MaxSector) {
|
2016-11-02 03:16:50 +00:00
|
|
|
pr_warn("md/raid1:%s: reshape_position set but not supported\n",
|
|
|
|
mdname(mddev));
|
2009-12-14 01:49:51 +00:00
|
|
|
return -EIO;
|
|
|
|
}
|
2017-06-05 06:05:13 +00:00
|
|
|
if (mddev_init_writes_pending(mddev) < 0)
|
|
|
|
return -ENOMEM;
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2009-12-14 01:49:51 +00:00
|
|
|
* copy the already verified devices into our private RAID1
|
|
|
|
* bookkeeping area. [whatever we allocate in run(),
|
2014-12-15 01:56:58 +00:00
|
|
|
* should be freed in raid1_free()]
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2009-12-14 01:49:51 +00:00
|
|
|
if (mddev->private == NULL)
|
|
|
|
conf = setup_conf(mddev);
|
|
|
|
else
|
|
|
|
conf = mddev->private;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-12-14 01:49:51 +00:00
|
|
|
if (IS_ERR(conf))
|
|
|
|
return PTR_ERR(conf);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2017-04-05 17:21:03 +00:00
|
|
|
if (mddev->queue) {
|
md/raid1,5,10: Disable WRITE SAME until a recovery strategy is in place
There are cases where the kernel will believe that the WRITE SAME
command is supported by a block device which does not, in fact,
support WRITE SAME. This currently happens for SATA drivers behind a
SAS controller, but there are probably a hundred other ways that can
happen, including drive firmware bugs.
After receiving an error for WRITE SAME the block layer will retry the
request as a plain write of zeroes, but mdraid will consider the
failure as fatal and consider the drive failed. This has the effect
that all the mirrors containing a specific set of data are each
offlined in very rapid succession resulting in data loss.
However, just bouncing the request back up to the block layer isn't
ideal either, because the whole initial request-retry sequence should
be inside the write bitmap fence, which probably means that md needs
to do its own conversion of WRITE SAME to write zero.
Until the failure scenario has been sorted out, disable WRITE SAME for
raid1, raid5, and raid10.
[neilb: added raid5]
This patch is appropriate for any -stable since 3.7 when write_same
support was added.
Cc: stable@vger.kernel.org
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: NeilBrown <neilb@suse.de>
2013-06-12 14:37:43 +00:00
|
|
|
blk_queue_max_write_same_sectors(mddev->queue, 0);
|
2017-04-05 17:21:03 +00:00
|
|
|
blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
|
|
|
|
}
|
md/raid1,5,10: Disable WRITE SAME until a recovery strategy is in place
There are cases where the kernel will believe that the WRITE SAME
command is supported by a block device which does not, in fact,
support WRITE SAME. This currently happens for SATA drivers behind a
SAS controller, but there are probably a hundred other ways that can
happen, including drive firmware bugs.
After receiving an error for WRITE SAME the block layer will retry the
request as a plain write of zeroes, but mdraid will consider the
failure as fatal and consider the drive failed. This has the effect
that all the mirrors containing a specific set of data are each
offlined in very rapid succession resulting in data loss.
However, just bouncing the request back up to the block layer isn't
ideal either, because the whole initial request-retry sequence should
be inside the write bitmap fence, which probably means that md needs
to do its own conversion of WRITE SAME to write zero.
Until the failure scenario has been sorted out, disable WRITE SAME for
raid1, raid5, and raid10.
[neilb: added raid5]
This patch is appropriate for any -stable since 3.7 when write_same
support was added.
Cc: stable@vger.kernel.org
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: NeilBrown <neilb@suse.de>
2013-06-12 14:37:43 +00:00
|
|
|
|
2012-03-19 01:46:39 +00:00
|
|
|
rdev_for_each(rdev, mddev) {
|
2011-06-07 22:50:35 +00:00
|
|
|
if (!mddev->gendisk)
|
|
|
|
continue;
|
2009-12-14 01:49:51 +00:00
|
|
|
disk_stack_limits(mddev->gendisk, rdev->bdev,
|
|
|
|
rdev->data_offset << 9);
|
2012-10-11 02:28:54 +00:00
|
|
|
if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
|
|
|
|
discard_supported = true;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2005-06-22 00:17:23 +00:00
|
|
|
|
2009-12-14 01:49:51 +00:00
|
|
|
mddev->degraded = 0;
|
2019-06-14 22:41:08 +00:00
|
|
|
for (i = 0; i < conf->raid_disks; i++)
|
2009-12-14 01:49:51 +00:00
|
|
|
if (conf->mirrors[i].rdev == NULL ||
|
|
|
|
!test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
|
|
|
|
test_bit(Faulty, &conf->mirrors[i].rdev->flags))
|
|
|
|
mddev->degraded++;
|
2019-09-03 13:12:41 +00:00
|
|
|
/*
|
|
|
|
* RAID1 needs at least one disk in active
|
|
|
|
*/
|
|
|
|
if (conf->raid_disks - mddev->degraded < 1) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto abort;
|
|
|
|
}
|
2009-12-14 01:49:51 +00:00
|
|
|
|
|
|
|
if (conf->raid_disks - mddev->degraded == 1)
|
|
|
|
mddev->recovery_cp = MaxSector;
|
|
|
|
|
2009-06-17 22:48:06 +00:00
|
|
|
if (mddev->recovery_cp != MaxSector)
|
2016-11-02 03:16:50 +00:00
|
|
|
pr_info("md/raid1:%s: not clean -- starting background reconstruction\n",
|
|
|
|
mdname(mddev));
|
|
|
|
pr_info("md/raid1:%s: active with %d out of %d mirrors\n",
|
2014-09-30 04:23:59 +00:00
|
|
|
mdname(mddev), mddev->raid_disks - mddev->degraded,
|
2005-04-16 22:20:36 +00:00
|
|
|
mddev->raid_disks);
|
2009-12-14 01:49:51 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Ok, everything is just fine now
|
|
|
|
*/
|
2009-12-14 01:49:51 +00:00
|
|
|
mddev->thread = conf->thread;
|
|
|
|
conf->thread = NULL;
|
|
|
|
mddev->private = conf;
|
2016-11-18 05:16:11 +00:00
|
|
|
set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
|
2009-12-14 01:49:51 +00:00
|
|
|
|
2009-03-31 03:59:03 +00:00
|
|
|
md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-06-07 22:50:35 +00:00
|
|
|
if (mddev->queue) {
|
2012-10-11 02:28:54 +00:00
|
|
|
if (discard_supported)
|
2018-03-08 01:10:10 +00:00
|
|
|
blk_queue_flag_set(QUEUE_FLAG_DISCARD,
|
2012-10-11 02:28:54 +00:00
|
|
|
mddev->queue);
|
|
|
|
else
|
2018-03-08 01:10:10 +00:00
|
|
|
blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
|
2012-10-11 02:28:54 +00:00
|
|
|
mddev->queue);
|
2011-06-07 22:50:35 +00:00
|
|
|
}
|
2012-04-01 23:48:38 +00:00
|
|
|
|
2019-06-14 22:41:08 +00:00
|
|
|
ret = md_integrity_register(mddev);
|
2014-12-15 01:56:57 +00:00
|
|
|
if (ret) {
|
|
|
|
md_unregister_thread(&mddev->thread);
|
2019-09-03 13:12:41 +00:00
|
|
|
goto abort;
|
2014-12-15 01:56:57 +00:00
|
|
|
}
|
2019-09-03 13:12:41 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
abort:
|
|
|
|
raid1_free(mddev, conf);
|
2012-04-01 23:48:38 +00:00
|
|
|
return ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2014-12-15 01:56:58 +00:00
|
|
|
static void raid1_free(struct mddev *mddev, void *priv)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2014-12-15 01:56:58 +00:00
|
|
|
struct r1conf *conf = priv;
|
2009-03-31 03:39:39 +00:00
|
|
|
|
2018-05-20 22:25:52 +00:00
|
|
|
mempool_exit(&conf->r1bio_pool);
|
2005-06-22 00:17:30 +00:00
|
|
|
kfree(conf->mirrors);
|
2013-04-24 01:42:44 +00:00
|
|
|
safe_put_page(conf->tmppage);
|
2005-06-22 00:17:30 +00:00
|
|
|
kfree(conf->poolinfo);
|
RAID1: a new I/O barrier implementation to remove resync window
'Commit 79ef3a8aa1cb ("raid1: Rewrite the implementation of iobarrier.")'
introduces a sliding resync window for raid1 I/O barrier, this idea limits
I/O barriers to happen only inside a slidingresync window, for regular
I/Os out of this resync window they don't need to wait for barrier any
more. On large raid1 device, it helps a lot to improve parallel writing
I/O throughput when there are background resync I/Os performing at
same time.
The idea of sliding resync widow is awesome, but code complexity is a
challenge. Sliding resync window requires several variables to work
collectively, this is complexed and very hard to make it work correctly.
Just grep "Fixes: 79ef3a8aa1" in kernel git log, there are 8 more patches
to fix the original resync window patch. This is not the end, any further
related modification may easily introduce more regreassion.
Therefore I decide to implement a much simpler raid1 I/O barrier, by
removing resync window code, I believe life will be much easier.
The brief idea of the simpler barrier is,
- Do not maintain a global unique resync window
- Use multiple hash buckets to reduce I/O barrier conflicts, regular
I/O only has to wait for a resync I/O when both them have same barrier
bucket index, vice versa.
- I/O barrier can be reduced to an acceptable number if there are enough
barrier buckets
Here I explain how the barrier buckets are designed,
- BARRIER_UNIT_SECTOR_SIZE
The whole LBA address space of a raid1 device is divided into multiple
barrier units, by the size of BARRIER_UNIT_SECTOR_SIZE.
Bio requests won't go across border of barrier unit size, that means
maximum bio size is BARRIER_UNIT_SECTOR_SIZE<<9 (64MB) in bytes.
For random I/O 64MB is large enough for both read and write requests,
for sequential I/O considering underlying block layer may merge them
into larger requests, 64MB is still good enough.
Neil also points out that for resync operation, "we want the resync to
move from region to region fairly quickly so that the slowness caused
by having to synchronize with the resync is averaged out over a fairly
small time frame". For full speed resync, 64MB should take less then 1
second. When resync is competing with other I/O, it could take up a few
minutes. Therefore 64MB size is fairly good range for resync.
- BARRIER_BUCKETS_NR
There are BARRIER_BUCKETS_NR buckets in total, which is defined by,
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - 2)
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
this patch makes the bellowed members of struct r1conf from integer
to array of integers,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int *nr_pending;
+ int *nr_waiting;
+ int *nr_queued;
+ int *barrier;
number of the array elements is defined as BARRIER_BUCKETS_NR. For 4KB
kernel space page size, (PAGE_SHIFT - 2) indecates there are 1024 I/O
barrier buckets, and each array of integers occupies single memory page.
1024 means for a request which is smaller than the I/O barrier unit size
has ~0.1% chance to wait for resync to pause, which is quite a small
enough fraction. Also requesting single memory page is more friendly to
kernel page allocator than larger memory size.
- I/O barrier bucket is indexed by bio start sector
If multiple I/O requests hit different I/O barrier units, they only need
to compete I/O barrier with other I/Os which hit the same I/O barrier
bucket index with each other. The index of a barrier bucket which a
bio should look for is calculated by sector_to_idx() which is defined
in raid1.h as an inline function,
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
Here sector_nr is the start sector number of a bio.
- Single bio won't go across boundary of a I/O barrier unit
If a request goes across boundary of barrier unit, it will be split. A
bio may be split in raid1_make_request() or raid1_sync_request(), if
sectors returned by align_to_barrier_unit_end() is smaller than
original bio size.
Comparing to single sliding resync window,
- Currently resync I/O grows linearly, therefore regular and resync I/O
will conflict within a single barrier units. So the I/O behavior is
similar to single sliding resync window.
- But a barrier unit bucket is shared by all barrier units with identical
barrier uinit index, the probability of conflict might be higher
than single sliding resync window, in condition that writing I/Os
always hit barrier units which have identical barrier bucket indexs with
the resync I/Os. This is a very rare condition in real I/O work loads,
I cannot imagine how it could happen in practice.
- Therefore we can achieve a good enough low conflict rate with much
simpler barrier algorithm and implementation.
There are two changes should be noticed,
- In raid1d(), I change the code to decrease conf->nr_pending[idx] into
single loop, it looks like this,
spin_lock_irqsave(&conf->device_lock, flags);
conf->nr_queued[idx]--;
spin_unlock_irqrestore(&conf->device_lock, flags);
This change generates more spin lock operations, but in next patch of
this patch set, it will be replaced by a single line code,
atomic_dec(&conf->nr_queueud[idx]);
So we don't need to worry about spin lock cost here.
- Mainline raid1 code split original raid1_make_request() into
raid1_read_request() and raid1_write_request(). If the original bio
goes across an I/O barrier unit size, this bio will be split before
calling raid1_read_request() or raid1_write_request(), this change
the code logic more simple and clear.
- In this patch wait_barrier() is moved from raid1_make_request() to
raid1_write_request(). In raid_read_request(), original wait_barrier()
is replaced by raid1_read_request().
The differnece is wait_read_barrier() only waits if array is frozen,
using different barrier function in different code path makes the code
more clean and easy to read.
Changelog
V4:
- Add alloc_r1bio() to remove redundant r1bio memory allocation code.
- Fix many typos in patch comments.
- Use (PAGE_SHIFT - ilog2(sizeof(int))) to define BARRIER_BUCKETS_NR_BITS.
V3:
- Rebase the patch against latest upstream kernel code.
- Many fixes by review comments from Neil,
- Back to use pointers to replace arraries in struct r1conf
- Remove total_barriers from struct r1conf
- Add more patch comments to explain how/why the values of
BARRIER_UNIT_SECTOR_SIZE and BARRIER_BUCKETS_NR are decided.
- Use get_unqueued_pending() to replace get_all_pendings() and
get_all_queued()
- Increase bucket number from 512 to 1024
- Change code comments format by review from Shaohua.
V2:
- Use bio_split() to split the orignal bio if it goes across barrier unit
bounday, to make the code more simple, by suggestion from Shaohua and
Neil.
- Use hash_long() to replace original linear hash, to avoid a possible
confilict between resync I/O and sequential write I/O, by suggestion from
Shaohua.
- Add conf->total_barriers to record barrier depth, which is used to
control number of parallel sync I/O barriers, by suggestion from Shaohua.
- In V1 patch the bellowed barrier buckets related members in r1conf are
allocated in memory page. To make the code more simple, V2 patch moves
the memory space into struct r1conf, like this,
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending[BARRIER_BUCKETS_NR];
+ int nr_waiting[BARRIER_BUCKETS_NR];
+ int nr_queued[BARRIER_BUCKETS_NR];
+ int barrier[BARRIER_BUCKETS_NR];
This change is by the suggestion from Shaohua.
- Remove some inrelavent code comments, by suggestion from Guoqing.
- Add a missing wait_barrier() before jumping to retry_write, in
raid1_make_write_request().
V1:
- Original RFC patch for comments
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-17 19:05:56 +00:00
|
|
|
kfree(conf->nr_pending);
|
|
|
|
kfree(conf->nr_waiting);
|
|
|
|
kfree(conf->nr_queued);
|
|
|
|
kfree(conf->barrier);
|
2018-05-20 22:25:52 +00:00
|
|
|
bioset_exit(&conf->bio_split);
|
2005-04-16 22:20:36 +00:00
|
|
|
kfree(conf);
|
|
|
|
}
|
|
|
|
|
2011-10-11 05:47:53 +00:00
|
|
|
static int raid1_resize(struct mddev *mddev, sector_t sectors)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
/* no resync is happening, and there is enough space
|
|
|
|
* on all devices, so we can resize.
|
|
|
|
* We need to make sure resync covers any new space.
|
|
|
|
* If the array is shrinking we should possibly wait until
|
|
|
|
* any io in the removed space completes, but it hardly seems
|
|
|
|
* worth it.
|
|
|
|
*/
|
2012-05-22 03:55:27 +00:00
|
|
|
sector_t newsize = raid1_size(mddev, sectors, 0);
|
|
|
|
if (mddev->external_size &&
|
|
|
|
mddev->array_sectors > newsize)
|
2009-03-31 04:00:31 +00:00
|
|
|
return -EINVAL;
|
2012-05-22 03:55:27 +00:00
|
|
|
if (mddev->bitmap) {
|
2018-08-01 22:20:50 +00:00
|
|
|
int ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
|
2012-05-22 03:55:27 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
md_set_array_sectors(mddev, newsize);
|
2009-03-31 04:00:31 +00:00
|
|
|
if (sectors > mddev->dev_sectors &&
|
2011-05-11 05:52:21 +00:00
|
|
|
mddev->recovery_cp > mddev->dev_sectors) {
|
2009-03-31 03:33:13 +00:00
|
|
|
mddev->recovery_cp = mddev->dev_sectors;
|
2005-04-16 22:20:36 +00:00
|
|
|
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
|
|
|
|
}
|
2009-03-31 04:00:31 +00:00
|
|
|
mddev->dev_sectors = sectors;
|
2005-07-27 18:43:28 +00:00
|
|
|
mddev->resync_max_sectors = sectors;
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-10-11 05:47:53 +00:00
|
|
|
static int raid1_reshape(struct mddev *mddev)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
/* We need to:
|
|
|
|
* 1/ resize the r1bio_pool
|
|
|
|
* 2/ resize conf->mirrors
|
|
|
|
*
|
|
|
|
* We allocate a new r1bio_pool if we can.
|
|
|
|
* Then raise a device barrier and wait until all IO stops.
|
|
|
|
* Then resize conf->mirrors and swap in the new r1bio pool.
|
2005-06-22 00:17:09 +00:00
|
|
|
*
|
|
|
|
* At the same time, we "pack" the devices so that all the missing
|
|
|
|
* devices have the higher raid_disk numbers.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2018-05-20 22:25:52 +00:00
|
|
|
mempool_t newpool, oldpool;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct pool_info *newpoolinfo;
|
2012-07-31 00:03:52 +00:00
|
|
|
struct raid1_info *newmirrors;
|
2011-10-11 05:49:05 +00:00
|
|
|
struct r1conf *conf = mddev->private;
|
2006-03-27 09:18:13 +00:00
|
|
|
int cnt, raid_disks;
|
2006-10-03 08:15:53 +00:00
|
|
|
unsigned long flags;
|
2017-05-08 09:56:55 +00:00
|
|
|
int d, d2;
|
2018-05-20 22:25:52 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
memset(&newpool, 0, sizeof(newpool));
|
|
|
|
memset(&oldpool, 0, sizeof(oldpool));
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-03-27 09:18:13 +00:00
|
|
|
/* Cannot change chunk_size, layout, or level */
|
2009-06-17 22:45:27 +00:00
|
|
|
if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
|
2006-03-27 09:18:13 +00:00
|
|
|
mddev->layout != mddev->new_layout ||
|
|
|
|
mddev->level != mddev->new_level) {
|
2009-06-17 22:45:27 +00:00
|
|
|
mddev->new_chunk_sectors = mddev->chunk_sectors;
|
2006-03-27 09:18:13 +00:00
|
|
|
mddev->new_layout = mddev->layout;
|
|
|
|
mddev->new_level = mddev->level;
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2017-05-08 09:56:55 +00:00
|
|
|
if (!mddev_is_clustered(mddev))
|
|
|
|
md_allow_write(mddev);
|
2007-01-26 08:57:11 +00:00
|
|
|
|
2006-03-27 09:18:13 +00:00
|
|
|
raid_disks = mddev->raid_disks + mddev->delta_disks;
|
|
|
|
|
2005-06-22 00:17:09 +00:00
|
|
|
if (raid_disks < conf->raid_disks) {
|
|
|
|
cnt=0;
|
|
|
|
for (d= 0; d < conf->raid_disks; d++)
|
|
|
|
if (conf->mirrors[d].rdev)
|
|
|
|
cnt++;
|
|
|
|
if (cnt > raid_disks)
|
2005-04-16 22:20:36 +00:00
|
|
|
return -EBUSY;
|
2005-06-22 00:17:09 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
|
|
|
|
if (!newpoolinfo)
|
|
|
|
return -ENOMEM;
|
|
|
|
newpoolinfo->mddev = mddev;
|
2011-12-22 23:17:56 +00:00
|
|
|
newpoolinfo->raid_disks = raid_disks * 2;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2019-06-14 22:41:04 +00:00
|
|
|
ret = mempool_init(&newpool, NR_RAID_BIOS, r1bio_pool_alloc,
|
2019-06-14 22:41:10 +00:00
|
|
|
rbio_pool_free, newpoolinfo);
|
2018-05-20 22:25:52 +00:00
|
|
|
if (ret) {
|
2005-04-16 22:20:36 +00:00
|
|
|
kfree(newpoolinfo);
|
2018-05-20 22:25:52 +00:00
|
|
|
return ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 21:03:40 +00:00
|
|
|
newmirrors = kzalloc(array3_size(sizeof(struct raid1_info),
|
|
|
|
raid_disks, 2),
|
2011-12-22 23:17:56 +00:00
|
|
|
GFP_KERNEL);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!newmirrors) {
|
|
|
|
kfree(newpoolinfo);
|
2018-05-20 22:25:52 +00:00
|
|
|
mempool_exit(&newpool);
|
2005-04-16 22:20:36 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2013-06-12 01:01:22 +00:00
|
|
|
freeze_array(conf, 0);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* ok, everything is stopped */
|
|
|
|
oldpool = conf->r1bio_pool;
|
|
|
|
conf->r1bio_pool = newpool;
|
2005-06-22 00:17:09 +00:00
|
|
|
|
2007-08-22 21:01:53 +00:00
|
|
|
for (d = d2 = 0; d < conf->raid_disks; d++) {
|
2011-10-11 05:45:26 +00:00
|
|
|
struct md_rdev *rdev = conf->mirrors[d].rdev;
|
2007-08-22 21:01:53 +00:00
|
|
|
if (rdev && rdev->raid_disk != d2) {
|
2011-07-27 01:00:36 +00:00
|
|
|
sysfs_unlink_rdev(mddev, rdev);
|
2007-08-22 21:01:53 +00:00
|
|
|
rdev->raid_disk = d2;
|
2011-07-27 01:00:36 +00:00
|
|
|
sysfs_unlink_rdev(mddev, rdev);
|
|
|
|
if (sysfs_link_rdev(mddev, rdev))
|
2016-11-02 03:16:50 +00:00
|
|
|
pr_warn("md/raid1:%s: cannot register rd%d\n",
|
|
|
|
mdname(mddev), rdev->raid_disk);
|
2005-06-22 00:17:09 +00:00
|
|
|
}
|
2007-08-22 21:01:53 +00:00
|
|
|
if (rdev)
|
|
|
|
newmirrors[d2++].rdev = rdev;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
kfree(conf->mirrors);
|
|
|
|
conf->mirrors = newmirrors;
|
|
|
|
kfree(conf->poolinfo);
|
|
|
|
conf->poolinfo = newpoolinfo;
|
|
|
|
|
2006-10-03 08:15:53 +00:00
|
|
|
spin_lock_irqsave(&conf->device_lock, flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
mddev->degraded += (raid_disks - conf->raid_disks);
|
2006-10-03 08:15:53 +00:00
|
|
|
spin_unlock_irqrestore(&conf->device_lock, flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
conf->raid_disks = mddev->raid_disks = raid_disks;
|
2006-03-27 09:18:13 +00:00
|
|
|
mddev->delta_disks = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2013-06-12 01:01:22 +00:00
|
|
|
unfreeze_array(conf);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-07-06 02:26:57 +00:00
|
|
|
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
|
2005-04-16 22:20:36 +00:00
|
|
|
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
|
|
|
|
md_wakeup_thread(mddev->thread);
|
|
|
|
|
2018-05-20 22:25:52 +00:00
|
|
|
mempool_exit(&oldpool);
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-10-19 01:49:15 +00:00
|
|
|
static void raid1_quiesce(struct mddev *mddev, int quiesce)
|
2005-09-09 23:23:45 +00:00
|
|
|
{
|
2011-10-11 05:49:05 +00:00
|
|
|
struct r1conf *conf = mddev->private;
|
2005-09-09 23:23:45 +00:00
|
|
|
|
2017-10-19 01:49:15 +00:00
|
|
|
if (quiesce)
|
2013-11-14 04:16:18 +00:00
|
|
|
freeze_array(conf, 0);
|
2017-10-19 01:49:15 +00:00
|
|
|
else
|
2013-11-14 04:16:18 +00:00
|
|
|
unfreeze_array(conf);
|
2005-09-09 23:23:45 +00:00
|
|
|
}
|
|
|
|
|
2011-10-11 05:47:53 +00:00
|
|
|
static void *raid1_takeover(struct mddev *mddev)
|
2009-12-14 01:49:51 +00:00
|
|
|
{
|
|
|
|
/* raid1 can take over:
|
|
|
|
* raid5 with 2 devices, any layout or chunk size
|
|
|
|
*/
|
|
|
|
if (mddev->level == 5 && mddev->raid_disks == 2) {
|
2011-10-11 05:49:05 +00:00
|
|
|
struct r1conf *conf;
|
2009-12-14 01:49:51 +00:00
|
|
|
mddev->new_level = 1;
|
|
|
|
mddev->new_layout = 0;
|
|
|
|
mddev->new_chunk_sectors = 0;
|
|
|
|
conf = setup_conf(mddev);
|
2016-12-08 23:48:17 +00:00
|
|
|
if (!IS_ERR(conf)) {
|
2013-11-14 04:16:18 +00:00
|
|
|
/* Array must appear to be quiesced */
|
|
|
|
conf->array_frozen = 1;
|
2017-01-05 00:10:19 +00:00
|
|
|
mddev_clear_unsupported_flags(mddev,
|
|
|
|
UNSUPPORTED_MDDEV_FLAGS);
|
2016-12-08 23:48:17 +00:00
|
|
|
}
|
2009-12-14 01:49:51 +00:00
|
|
|
return conf;
|
|
|
|
}
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-10-11 05:49:58 +00:00
|
|
|
static struct md_personality raid1_personality =
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
.name = "raid1",
|
2006-01-06 08:20:36 +00:00
|
|
|
.level = 1,
|
2005-04-16 22:20:36 +00:00
|
|
|
.owner = THIS_MODULE,
|
2016-01-20 21:52:20 +00:00
|
|
|
.make_request = raid1_make_request,
|
|
|
|
.run = raid1_run,
|
2014-12-15 01:56:58 +00:00
|
|
|
.free = raid1_free,
|
2016-01-20 21:52:20 +00:00
|
|
|
.status = raid1_status,
|
|
|
|
.error_handler = raid1_error,
|
2005-04-16 22:20:36 +00:00
|
|
|
.hot_add_disk = raid1_add_disk,
|
|
|
|
.hot_remove_disk= raid1_remove_disk,
|
|
|
|
.spare_active = raid1_spare_active,
|
2016-01-20 21:52:20 +00:00
|
|
|
.sync_request = raid1_sync_request,
|
2005-04-16 22:20:36 +00:00
|
|
|
.resize = raid1_resize,
|
2009-03-18 01:10:40 +00:00
|
|
|
.size = raid1_size,
|
2006-03-27 09:18:13 +00:00
|
|
|
.check_reshape = raid1_reshape,
|
2005-09-09 23:23:45 +00:00
|
|
|
.quiesce = raid1_quiesce,
|
2009-12-14 01:49:51 +00:00
|
|
|
.takeover = raid1_takeover,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static int __init raid_init(void)
|
|
|
|
{
|
2006-01-06 08:20:36 +00:00
|
|
|
return register_md_personality(&raid1_personality);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void raid_exit(void)
|
|
|
|
{
|
2006-01-06 08:20:36 +00:00
|
|
|
unregister_md_personality(&raid1_personality);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
module_init(raid_init);
|
|
|
|
module_exit(raid_exit);
|
|
|
|
MODULE_LICENSE("GPL");
|
2009-12-14 01:49:58 +00:00
|
|
|
MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
|
2005-04-16 22:20:36 +00:00
|
|
|
MODULE_ALIAS("md-personality-3"); /* RAID1 */
|
2006-01-06 08:20:51 +00:00
|
|
|
MODULE_ALIAS("md-raid1");
|
2006-01-06 08:20:36 +00:00
|
|
|
MODULE_ALIAS("md-level-1");
|
2011-10-11 05:50:01 +00:00
|
|
|
|
|
|
|
module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);
|