From: Shaohua Li
Sometime disk could have tracks broken and data there is inaccessable,
but data in other parts can be accessed in normal way. MD RAID supports
such disks. But we don't have a good way to test it, because we can't
control which part of a physical disk is bad. For a virtual disk, this
can be easily controlled.
This patch adds a new 'badblock' attribute. Configure it in this way:
echo "+1-100" > xxx/badblock, this will make sector [1-100] as bad
blocks.
echo "-20-30" > xxx/badblock, this will make sector [20-30] good
Signed-off-by: Shaohua Li
---
drivers/block/test_blk.c | 206 +--
1 file changed, 200 insertions(+), 6 deletions(-)
diff --git a/drivers/block/test_blk.c b/drivers/block/test_blk.c
index 631dae4..54647e9 100644
--- a/drivers/block/test_blk.c
+++ b/drivers/block/test_blk.c
@@ -16,6 +16,7 @@
#include
#include
#include
+#include
#define SECTOR_SHIFT 9
#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
@@ -112,6 +113,7 @@ struct testb_device {
struct testb *testb;
struct radix_tree_root pages;
struct radix_tree_root cache;
+ struct rb_root badblock_tree;
unsigned long flags;
unsigned int curr_cache;
@@ -238,6 +240,185 @@ static ssize_t testb_device_power_store(struct
config_item *item,
CONFIGFS_ATTR(testb_device_, power);
+struct testb_bb_range {
+ struct rb_node rb;
+ sector_t start;
+ sector_t end;
+ sector_t __subtree_last;
+};
+#define START(node) ((node)->start)
+#define LAST(node) ((node)->end)
+
+INTERVAL_TREE_DEFINE(struct testb_bb_range, rb, sector_t, __subtree_last,
+ START, LAST, static, testb_bb);
+
+static void testb_bb_adjust_range(struct rb_root *root,
+ struct testb_bb_range *range, sector_t start, sector_t end)
+{
+ testb_bb_remove(range, root);
+ range->start = start;
+ range->end = end;
+ testb_bb_insert(range, root);
+}
+
+static int testb_bb_insert_range(struct testb_device *dev, sector_t start,
+ sector_t end)
+{
+ struct rb_root *root = >badblock_tree;
+ struct testb_bb_range *first, *next;
+ sector_t nstart, nend;
+
+ first = testb_bb_iter_first(root, start, end);
+ if (!first) {
+ first = kmalloc(sizeof(*first), GFP_ATOMIC);
+ if (!first)
+ return -ENOMEM;
+ first->start = start;
+ first->end = end;
+ testb_bb_insert(first, root);
+ return 0;
+ }
+
+ nstart = min(start, first->start);
+ nend = max(first->end, end);
+ while (true) {
+ next = testb_bb_iter_next(first, start, end);
+ if (!next)
+ break;
+ nend = max(nend, next->end);
+ testb_bb_remove(next, root);
+ kfree(next);
+ }
+ testb_bb_adjust_range(root, first, nstart, nend);
+ return 0;
+}
+
+static int testb_bb_remove_range(struct testb_device *dev, sector_t start,
+ sector_t end)
+{
+ struct testb_bb_range *first;
+ struct rb_root *root = >badblock_tree;
+
+ first = testb_bb_iter_first(root, start, end);
+ while (first) {
+ if (first->start < start && first->end > end) {
+ sector_t tmp = first->end;
+
+ testb_bb_adjust_range(root, first, first->start,
+ start - 1);
+ return testb_bb_insert_range(dev, end + 1, tmp);
+ }
+
+ if (first->start >= start && first->end <= end) {
+ testb_bb_remove(first, root);
+ kfree(first);
+ first = testb_bb_iter_first(root, start, end);
+ continue;
+ }
+
+ if (first->start < start) {
+ testb_bb_adjust_range(root, first, first->start,
+ start - 1);
+ first = testb_bb_iter_first(root, start, end);
+ continue;
+ }
+
+ WARN_ON(first->end <= end);
+ testb_bb_adjust_range(root, first, end + 1, first->end);
+ return 0;
+ }
+ return 0;
+}
+
+static bool testb_bb_in_range(struct testb_device *dev, sector_t start,
+ sector_t end)
+{
+ assert_spin_locked(>lock);
+ return testb_bb_iter_first(>badblock_tree, start, end) != NULL;
+}
+
+static void testb_bb_clear_all(struct testb_device *dev)
+{
+ struct testb_bb_range *iter;
+
+ while ((iter = testb_bb_iter_first(>badblock_tree, 0,
+ ~(sector_t)0))) {
+ testb_bb_remove(iter, >badblock_tree);
+ kfree(iter);
+ }
+}
+
+static ssize_t testb_device_badblock_show(struct config_item *item, char