md/raid1: use bucket based mechanism for IO serialization

Since raid1 had already used bucket based mechanism to reduce
the conflict between write IO and resync IO, it is possible to
speed up performance for io serialization with refer to the
same mechanism.

To align with the barrier bucket mechanism, we created arrays
(with the same number of BARRIER_BUCKETS_NR) for spinlock, rb
tree and waitqueue. Then we can reduce lock competition with
multiple spinlocks, boost search performance with multiple rb
trees and also reduce thundering herd problem with multiple
waitqueues.

Signed-off-by: Guoqing Jiang <guoqing.jiang@cloud.ionos.com>
Signed-off-by: Song Liu <songliubraving@fb.com>
This commit is contained in:
Guoqing Jiang
2019-12-23 10:49:01 +01:00
committed by Song Liu
parent 69b00b5bb2
commit 025471f9f5
2 changed files with 19 additions and 8 deletions

View File

@@ -130,7 +130,7 @@ static void rdev_uninit_serial(struct md_rdev *rdev)
if (!test_and_clear_bit(CollisionCheck, &rdev->flags))
return;
kfree(rdev->serial);
kvfree(rdev->serial);
rdev->serial = NULL;
}
@@ -144,18 +144,26 @@ static void rdevs_uninit_serial(struct mddev *mddev)
static int rdev_init_serial(struct md_rdev *rdev)
{
/* serial_nums equals with BARRIER_BUCKETS_NR */
int i, serial_nums = 1 << ((PAGE_SHIFT - ilog2(sizeof(atomic_t))));
struct serial_in_rdev *serial = NULL;
if (test_bit(CollisionCheck, &rdev->flags))
return 0;
serial = kmalloc(sizeof(struct serial_in_rdev), GFP_KERNEL);
serial = kvmalloc(sizeof(struct serial_in_rdev) * serial_nums,
GFP_KERNEL);
if (!serial)
return -ENOMEM;
spin_lock_init(&serial->serial_lock);
serial->serial_rb = RB_ROOT_CACHED;
init_waitqueue_head(&serial->serial_io_wait);
for (i = 0; i < serial_nums; i++) {
struct serial_in_rdev *serial_tmp = &serial[i];
spin_lock_init(&serial_tmp->serial_lock);
serial_tmp->serial_rb = RB_ROOT_CACHED;
init_waitqueue_head(&serial_tmp->serial_io_wait);
}
rdev->serial = serial;
set_bit(CollisionCheck, &rdev->flags);