Re: [PATCH 6/9] btrfs: refactor the code that read from disk

2018-06-01 Thread Goffredo Baroncelli
On 05/30/2018 01:07 PM, Daniel Kiper wrote:
> On Wed, May 16, 2018 at 08:48:16PM +0200, Goffredo Baroncelli wrote:
>> This is a preparatory patch, to help the adding of the RAID 5/6 recovery
> 
> Please move "This is a preparatory patch" sentence below...
> 
>> code. In case of availability of all disks, this code is good for all the
>> RAID profiles. However in case of failure, the error handling is quite
>> different. Refactoring this code increases the general readability.
> 
> s/readability/readability too/?
> 
> You can put "This is a preparatory patch" in separate line here.
> Same for the other patches.

What about

btrfs: Refactor the code that read from disk

Move the code in charge to read the data from disk in a separate
function. This helps to separate the error handling logic (which depend by 
the different raid profiles) from the read from disk logic.
Refactoring this code increases the general readability too.

This is a preparatory patch, to help the adding of the RAID 5/6 recovery
code. 




> 
>> Signed-off-by: Goffredo Baroncelli 
>> ---
>>  grub-core/fs/btrfs.c | 85 +---
>>  1 file changed, 49 insertions(+), 36 deletions(-)
>>
>> diff --git a/grub-core/fs/btrfs.c b/grub-core/fs/btrfs.c
>> index 51f300829..63651928b 100644
>> --- a/grub-core/fs/btrfs.c
>> +++ b/grub-core/fs/btrfs.c
>> @@ -625,6 +625,47 @@ find_device (struct grub_btrfs_data *data, 
>> grub_uint64_t id)
>>return ctx.dev_found;
>>  }
>>
>> +static grub_err_t
>> +btrfs_read_from_chunk (struct grub_btrfs_data *data,
>> +   struct grub_btrfs_chunk_item *chunk,
>> +   grub_uint64_t stripen, grub_uint64_t stripe_offset,
>> +   int redundancy, grub_uint64_t csize,
>> +   void *buf)
>> +{
>> +
>> +struct grub_btrfs_chunk_stripe *stripe;
>> +grub_disk_addr_t paddr;
>> +grub_device_t dev;
>> +grub_err_t err;
>> +
>> +stripe = (struct grub_btrfs_chunk_stripe *) (chunk + 1);
>> +/* Right now the redundancy handling is easy.
>> +   With RAID5-like it will be more difficult.  */
>> +stripe += stripen + redundancy;
>> +
>> +paddr = grub_le_to_cpu64 (stripe->offset) + stripe_offset;
>> +
>> +grub_dprintf ("btrfs", "stripe %" PRIxGRUB_UINT64_T
>> +  " maps to 0x%" PRIxGRUB_UINT64_T "\n",
>> +  stripen, stripe->offset);
>> +grub_dprintf ("btrfs", "reading paddr 0x%" PRIxGRUB_UINT64_T "\n", 
>> paddr);
>> +
>> +dev = find_device (data, stripe->device_id);
>> +if (!dev)
>> +  {
>> +grub_dprintf ("btrfs",
>> +  "couldn't find a necessary member device "
>> +  "of multi-device filesystem\n");
>> +grub_errno = GRUB_ERR_NONE;
>> +return GRUB_ERR_READ_ERROR;
>> +  }
>> +
>> +err = grub_disk_read (dev->disk, paddr >> GRUB_DISK_SECTOR_BITS,
>> +  paddr & (GRUB_DISK_SECTOR_SIZE - 1),
>> +  csize, buf);
>> +return err;
>> +}
>> +
>>  static grub_err_t
>>  grub_btrfs_read_logical (struct grub_btrfs_data *data, grub_disk_addr_t 
>> addr,
>>   void *buf, grub_size_t size, int recursion_depth)
>> @@ -638,7 +679,6 @@ grub_btrfs_read_logical (struct grub_btrfs_data *data, 
>> grub_disk_addr_t addr,
>>grub_err_t err = 0;
>>struct grub_btrfs_key key_out;
>>int challoc = 0;
>> -  grub_device_t dev;
>>struct grub_btrfs_key key_in;
>>grub_size_t chsize;
>>grub_disk_addr_t chaddr;
>> @@ -879,42 +919,15 @@ grub_btrfs_read_logical (struct grub_btrfs_data *data, 
>> grub_disk_addr_t addr,
>>
>>  for (i = 0; i < redundancy; i++)
>>{
>> -struct grub_btrfs_chunk_stripe *stripe;
>> -grub_disk_addr_t paddr;
>> -
>> -stripe = (struct grub_btrfs_chunk_stripe *) (chunk + 1);
>> -/* Right now the redundancy handling is easy.
>> -   With RAID5-like it will be more difficult.  */
>> -stripe += stripen + i;
>> -
>> -paddr = grub_le_to_cpu64 (stripe->offset) + stripe_offset;
>> -
>> -grub_dprintf ("btrfs", "stripe %" PRIxGRUB_UINT64_T
>> -  " maps to 0x%" PRIxGRUB_UINT64_T "\n",
>> -  stripen, stripe->offset);
>> -grub_dprintf ("btrfs", "reading paddr 0x%" PRIxGRUB_UINT64_T
>> -  "\n", paddr);
>> -
>> -dev = find_device (data, stripe->device_id);
>> -if (!dev)
>> -  {
>> -grub_dprintf ("btrfs",
>> -  "couldn't find a necessary member device "
>> -  "of multi-device filesystem\n");
>> -err = grub_errno;
>> -grub_errno = GRUB_ERR_NONE;
>> -continue;
>> -  }
>> -
>> -err = grub_disk_read (dev->disk, paddr >> GRUB_DISK_SECTOR_BITS,
>> -

Re: [PATCH 8/9] btrfs: make more generic the code for RAID 6 rebuilding

2018-06-01 Thread Goffredo Baroncelli
On 05/30/2018 02:05 PM, Daniel Kiper wrote:
> On Wed, May 16, 2018 at 08:48:18PM +0200, Goffredo Baroncelli wrote:
>> The original code which handles the recovery of a RAID 6 disks array
>> assumes that all reads are multiple of 1 << GRUB_DISK_SECTOR_BITS and it
>> assumes that all the I/O is done via the struct grub_diskfilter_segment.
>> This is not true for the btrfs code. In order to reuse the native
>> grub_raid6_recover() code, it is modified to not call
>> grub_diskfilter_read_node() directly, but to call an handler passed
>> as argument.
> 
> s/as argument/as an argument/
ok
> 
>> Signed-off-by: Goffredo Baroncelli 
>> ---
>>  grub-core/disk/raid6_recover.c | 53 ++
>>  include/grub/diskfilter.h  |  9 ++
>>  2 files changed, 44 insertions(+), 18 deletions(-)
>>
>> diff --git a/grub-core/disk/raid6_recover.c b/grub-core/disk/raid6_recover.c
>> index aa674f6ca..10ee495e2 100644
>> --- a/grub-core/disk/raid6_recover.c
>> +++ b/grub-core/disk/raid6_recover.c
>> @@ -74,14 +74,25 @@ mod_255 (unsigned x)
>>  }
>>
>>  static grub_err_t
>> -grub_raid6_recover (struct grub_diskfilter_segment *array, int disknr, int 
>> p,
>> -char *buf, grub_disk_addr_t sector, grub_size_t size)
>> +raid6_recover_read_node (void *data, int disk_nr,
> 
> s/disk_nr/disknr/

OK

>> +grub_uint64_t sector,
>> +void *buf, grub_size_t size)
>> +{
>> +struct grub_diskfilter_segment *array = data;
> 
> Please add one empty line here.
> 
>> +return grub_diskfilter_read_node (&array->nodes[disk_nr],
>> +  (grub_disk_addr_t)sector,
>> +  size >> GRUB_DISK_SECTOR_BITS, buf);
>> +}
>> +
>> +grub_err_t
>> +grub_raid6_recover_gen (void *data, grub_uint64_t nstripes, int disknr, int 
>> p,
>> +char *buf, grub_uint64_t sector, grub_size_t size,
>> +raid_recover_read_t read_func, int layout)
> 
> Could you change the order of read_func and layout arguments?
> I mean make read_func the last one and put the layout before it.
OK
> 
>>  {
>>int i, q, pos;
>>int bad1 = -1, bad2 = -1;
>>char *pbuf = 0, *qbuf = 0;
>>
>> -  size <<= GRUB_DISK_SECTOR_BITS;
>>pbuf = grub_zalloc (size);
>>if (!pbuf)
>>  goto quit;
>> @@ -91,17 +102,17 @@ grub_raid6_recover (struct grub_diskfilter_segment 
>> *array, int disknr, int p,
>>  goto quit;
>>
>>q = p + 1;
>> -  if (q == (int) array->node_count)
>> +  if (q == (int) nstripes)
>>  q = 0;
>>
>>pos = q + 1;
>> -  if (pos == (int) array->node_count)
>> +  if (pos == (int) nstripes)
>>  pos = 0;
>>
>> -  for (i = 0; i < (int) array->node_count - 2; i++)
>> +  for (i = 0; i < (int) nstripes - 2; i++)
>>  {
>>int c;
>> -  if (array->layout & GRUB_RAID_LAYOUT_MUL_FROM_POS)
>> +  if (layout & GRUB_RAID_LAYOUT_MUL_FROM_POS)
>>  c = pos;
>>else
>>  c = i;
>> @@ -109,8 +120,7 @@ grub_raid6_recover (struct grub_diskfilter_segment 
>> *array, int disknr, int p,
>>  bad1 = c;
>>else
>>  {
>> -  if (! grub_diskfilter_read_node (&array->nodes[pos], sector,
>> -   size >> GRUB_DISK_SECTOR_BITS, buf))
>> +  if (!read_func(data, pos, sector, buf, size))
>>  {
>>grub_crypto_xor (pbuf, pbuf, buf, size);
>>grub_raid_block_mulx (c, buf, size);
>> @@ -128,7 +138,7 @@ grub_raid6_recover (struct grub_diskfilter_segment 
>> *array, int disknr, int p,
>>  }
>>
>>pos++;
>> -  if (pos == (int) array->node_count)
>> +  if (pos == (int) nstripes)
>>  pos = 0;
>>  }
>>
>> @@ -139,16 +149,14 @@ grub_raid6_recover (struct grub_diskfilter_segment 
>> *array, int disknr, int p,
>>if (bad2 < 0)
>>  {
>>/* One bad device */
>> -  if ((! grub_diskfilter_read_node (&array->nodes[p], sector,
>> -size >> GRUB_DISK_SECTOR_BITS, buf)))
>> +  if (!read_func(data, p, sector, buf, size))
>>  {
>>grub_crypto_xor (buf, buf, pbuf, size);
>>goto quit;
>>  }
>>
>>grub_errno = GRUB_ERR_NONE;
>> -  if (grub_diskfilter_read_node (&array->nodes[q], sector,
>> - size >> GRUB_DISK_SECTOR_BITS, buf))
>> +  if (read_func(data, q, sector, buf, size))
>>  goto quit;
>>
>>grub_crypto_xor (buf, buf, qbuf, size);
>> @@ -160,14 +168,12 @@ grub_raid6_recover (struct grub_diskfilter_segment 
>> *array, int disknr, int p,
>>/* Two bad devices */
>>unsigned c;
>>
>> -  if (grub_diskfilter_read_node (&array->nodes[p], sector,
>> - size >> GRUB_DISK_SECTOR_BITS, buf))
>> +  if (read_func(data, p, sector, buf, size))
>>  goto quit;
>>
>>grub_crypto_xor (pbuf, pbuf, buf, size);
>>
>> -

Re: [PATCH 9/9] btrfs: add RAID 6 recovery for a btrfs filesystem.

2018-06-01 Thread Goffredo Baroncelli
On 05/30/2018 02:15 PM, Daniel Kiper wrote:
> On Wed, May 16, 2018 at 08:48:19PM +0200, Goffredo Baroncelli wrote:
>> Add the RAID 6 recovery, in order to use a RAID 6 filesystem even if some
>> disks (up to two) are missing. This code use the old md RAID 6 code already
> 
> s/the old md/the md/

ok

>> present in grub.
>>
>> Signed-off-by: Goffredo Baroncelli 
>> ---
>>  grub-core/fs/btrfs.c | 45 +++-
>>  1 file changed, 40 insertions(+), 5 deletions(-)
>>
>> diff --git a/grub-core/fs/btrfs.c b/grub-core/fs/btrfs.c
>> index 5fcaad86f..3d71b954e 100644
>> --- a/grub-core/fs/btrfs.c
>> +++ b/grub-core/fs/btrfs.c
>> @@ -30,6 +30,7 @@
>>  #include 
>>  #include 
>>  #include 
>> +#include 
>>
>>  GRUB_MOD_LICENSE ("GPLv3+");
>>
>> @@ -695,11 +696,35 @@ rebuild_raid5 (struct raid56_buffer *buffers, 
>> grub_uint64_t nstripes,
>> csize);
>>  }
>>
>> +static grub_err_t
>> +raid6_recover_read_buffer (void *data, int disk_nr,
>> +   grub_uint64_t addr __attribute__ ((unused)),
>> +   void *dest, grub_size_t size)
>> +{
>> +struct raid56_buffer *buffers = data;
>> +
>> +grub_memcpy(dest, buffers[disk_nr].buf, size);
> 
> Could we avoid this grub_memcpy() call somehow?

I would like; however this would mean that the handling of the raid 5 and raid 
6 rebuilding code would be totally different. This has a big impact on the 
patches set and the related tests performed until now.
Are you sure that this effort will be really needed ?
> 
>> +grub_errno = buffers[disk_nr].data_is_valid ? GRUB_ERR_NONE :
>> + GRUB_ERR_READ_ERROR;
>> +return grub_errno;
>> +}
>> +
>> +static void
>> +rebuild_raid6 (struct raid56_buffer *buffers, grub_uint64_t nstripes,
>> +   grub_uint64_t csize, grub_uint64_t parities_pos, void *dest,
>> +   grub_uint64_t stripen)
>> +
>> +{
>> +  grub_raid6_recover_gen (buffers, nstripes, stripen, parities_pos,
>> +  dest, 0, csize, raid6_recover_read_buffer, 0);
>> +}
>> +
>>  static grub_err_t
>>  raid56_read_retry (struct grub_btrfs_data *data,
>> struct grub_btrfs_chunk_item *chunk,
>> grub_uint64_t stripe_offset, grub_uint64_t stripen,
>> -   grub_uint64_t csize, void *buf)
>> +   grub_uint64_t csize, void *buf, grub_uint64_t parities_pos)
>>  {
>>
>>struct raid56_buffer *buffers = NULL;
>> @@ -780,6 +805,15 @@ raid56_read_retry (struct grub_btrfs_data *data,
>>ret = GRUB_ERR_READ_ERROR;
>>goto cleanup;
>>  }
>> +  else if (failed_devices > 2 && (chunk_type & GRUB_BTRFS_CHUNK_TYPE_RAID6))
>> +{
>> +  grub_dprintf ("btrfs",
>> +"not enough disks for raid6: total %" PRIuGRUB_UINT64_T
>> +", missing %" PRIuGRUB_UINT64_T "\n",
>> +nstripes, failed_devices);
>> +  ret = GRUB_ERR_READ_ERROR;
>> +  goto cleanup;
>> +}
>>else
>>  {
>>grub_dprintf ("btrfs",
>> @@ -796,7 +830,7 @@ raid56_read_retry (struct grub_btrfs_data *data,
>>  }
>>else
>>  {
>> -  grub_dprintf ("btrfs", "called rebuild_raid6(), NOT IMPLEMENTED\n");
>> +  rebuild_raid6 (buffers, nstripes, csize, parities_pos, buf, stripen);
>>  }
>>
>>  cleanup:
>> @@ -891,8 +925,9 @@ grub_btrfs_read_logical (struct grub_btrfs_data *data, 
>> grub_disk_addr_t addr,
>>  int is_raid56;
>>  grub_uint64_t parities_pos = 0;
>>
>> -is_raid56 = !!(grub_le_to_cpu64 (chunk->type) &
>> -   GRUB_BTRFS_CHUNK_TYPE_RAID5);
>> +is_raid56 = !!(grub_le_to_cpu64 (chunk->type) &
>> +   (GRUB_BTRFS_CHUNK_TYPE_RAID5|
> 
> Space before "|" please?
OK
> 
>> +GRUB_BTRFS_CHUNK_TYPE_RAID6));
>>
>>  if (grub_le_to_cpu64 (chunk->size) <= off)
>>{
>> @@ -1089,7 +1124,7 @@ grub_btrfs_read_logical (struct grub_btrfs_data *data, 
>> grub_disk_addr_t addr,
>>   csize, buf);
>>  if (err != GRUB_ERR_NONE)
>>err = raid56_read_retry (data, chunk, stripe_offset,
>> -   stripen, csize, buf);
>> +   stripen, csize, buf, parities_pos);
> 
> I am not sure what is parities_pos and where it is initialized.
> If it is a part of earlier patch but it is not used then I think
> that it should be a part of this patch.
You are right
> 
> Daniel
> 

I think that I implemented all your requests in the next patches set. The only 
exception is the removing the memcpy in the raid6 recovery path.
Now I am starting the test. when I will completed the test phase, I will resend 
the patches.

BR
G.Baroncelli
-- 
gpg @keyserver.linux.it: Goffredo Baroncelli 
Key fingerprint BBF5 1610 0B64 DAC6 5F7D  17B2 0EDA 9B37 8B82 E0B5

___
Grub-devel mailing list
Grub-devel@gnu.org
https://lists.gnu.o

Re: [PATCH 1/9] btrfs: add support for reading a filesystem with a RAID 5 or RAID 6 profile.

2018-06-01 Thread Goffredo Baroncelli
Hi Daniel,

my comments below
On 05/30/2018 12:07 PM, Daniel Kiper wrote:
> On Wed, May 16, 2018 at 08:48:11PM +0200, Goffredo Baroncelli wrote:
>> Signed-off-by: Goffredo Baroncelli 
[...]

>> +   *  off -> logical address to read (from the beginning of the
>> +   * chunk space)
> 
> What do you mean by "chunk"? Is it e.g. A1 + B1 region? Please make it
> clear what do you mean by "chunk".

Chunk is a very pervasive concept in BTRFS. A reader who looks to a BTRFS
raid code, must know very well this concepts. I am not sure that GRUB code
is the right place to contain a full BTRFS chunk description.

Basically, the BTRFS logical space is mapped to the physical one via the
chunks (aka block group). Each chunk maps a range of the logical address to a
range in the disk(s). If more disks are involved, different profile
might be used (linear, raid0... raid5/6). E.g.: a chunk maps a logical
address (say 0.1GB) to a physical address (say 1GB..2GB of disk1, 3GB..to 
4GB of disk2, in raid1 mode).
The chunks are a low layer. All the BTRFS metadata are in term of the 
logical address (upper layer).

[...]
>> +  /*
>> +   * In the line below stripen is evaluated without considering
> 
> s/In the line below //
> 
>> +   * the parities (0 for A1, A2, A3... 1 for B1, B2...);
> 
> s/;/./
> 
>> +   */
>> +  high = grub_divmod64 (stripe_nr, nstripes - nparities, &stripen);
>> +
>> +  /*
>> +   * In the line below stripen, now consider also the parities (0
> 
> s/In the line below stripen, now/Now/

I think that "stripen" must be re-added

> 
>> +   * for A1, 1 for A2, 2 for A3); the math is done "modulo"
> 
> s/; the/. The/
> s/"modulo"/modulo/
> 
>> +   * number of disks
> 
> Full stop at the end of sentence please.
> 
> Daniel
> 


-- 
gpg @keyserver.linux.it: Goffredo Baroncelli 
Key fingerprint BBF5 1610 0B64 DAC6 5F7D  17B2 0EDA 9B37 8B82 E0B5

___
Grub-devel mailing list
Grub-devel@gnu.org
https://lists.gnu.org/mailman/listinfo/grub-devel


Re: [PATCH 7/9] btrfs: add support for recovery for a RAID 5 btrfs profiles.

2018-06-01 Thread Goffredo Baroncelli
On 05/30/2018 01:30 PM, Daniel Kiper wrote:
> On Wed, May 16, 2018 at 08:48:17PM +0200, Goffredo Baroncelli wrote:
>> Signed-off-by: Goffredo Baroncelli 
>> ---
>>  grub-core/fs/btrfs.c | 174 ++-
>>  1 file changed, 170 insertions(+), 4 deletions(-)
>>
>> diff --git a/grub-core/fs/btrfs.c b/grub-core/fs/btrfs.c
>> index 63651928b..5fcaad86f 100644
>> --- a/grub-core/fs/btrfs.c
>> +++ b/grub-core/fs/btrfs.c
>> @@ -29,6 +29,7 @@
>>  #include 
>>  #include 
>>  #include 
>> +#include 
>>
>>  GRUB_MOD_LICENSE ("GPLv3+");
>>
>> @@ -666,6 +667,150 @@ btrfs_read_from_chunk (struct grub_btrfs_data *data,
>>  return err;
>>  }
>>
>> +struct raid56_buffer {
>> +  void *buf;
>> +  int  data_is_valid;
>> +};
>> +
>> +static void
>> +rebuild_raid5 (struct raid56_buffer *buffers, grub_uint64_t nstripes,
>> +   grub_uint64_t csize)
>> +{
>> +  grub_uint64_t target = 0, i;
>> +
>> +  while (buffers[target].data_is_valid && target < nstripes)
>> +++target;
>> +
>> +  if (target == nstripes)
>> +{
>> +  grub_dprintf ("btrfs", "called rebuild_raid5(), but all disks are 
>> OK\n");
>> +  return;
>> +}
>> +
>> +  grub_dprintf ("btrfs", "rebuilding raid5 stripe #%" PRIuGRUB_UINT64_T 
>> "\n",
>> +target);
>> +  for (i = 0; i < nstripes; i++)
>> +if (i != target)
>> +  grub_crypto_xor (buffers[target].buf, buffers[target].buf, 
>> buffers[i].buf,
>> +   csize);
>> +}
>> +
>> +static grub_err_t
>> +raid56_read_retry (struct grub_btrfs_data *data,
>> +   struct grub_btrfs_chunk_item *chunk,
>> +   grub_uint64_t stripe_offset, grub_uint64_t stripen,
>> +   grub_uint64_t csize, void *buf)
>> +{
>> +
>> +  struct raid56_buffer *buffers = NULL;
>> +  grub_uint64_t nstripes = grub_le_to_cpu16 (chunk->nstripes);
>> +  grub_uint64_t chunk_type = grub_le_to_cpu64 (chunk->type);
>> +  grub_err_t ret = GRUB_ERR_NONE;
>> +  grub_uint64_t i, failed_devices;
>> +
>> +  buffers = grub_zalloc (sizeof(*buffers) * nstripes);
>> +  if (!buffers)
>> +{
>> +  ret = GRUB_ERR_OUT_OF_MEMORY;
>> +  goto cleanup;
>> +}
>> +
>> +  for (i = 0; i < nstripes; i++)
>> +{
>> +  buffers[i].buf = grub_zalloc (csize);
>> +  if (!buffers[i].buf)
>> +{
>> +  ret = GRUB_ERR_OUT_OF_MEMORY;
>> +  goto cleanup;
>> +}
>> +}
>> +
>> +  for (i = 0; i < nstripes; i++)
>> +{
>> +  struct grub_btrfs_chunk_stripe *stripe;
>> +  grub_disk_addr_t paddr;
>> +  grub_device_t dev;
>> +  grub_err_t err2;
>> +
>> +  stripe = (struct grub_btrfs_chunk_stripe *) (chunk + 1);
>> +  stripe += i;
>> +
>> +  paddr = grub_le_to_cpu64 (stripe->offset) + stripe_offset;
>> +  grub_dprintf ("btrfs", "reading paddr %" PRIxGRUB_UINT64_T
>> +" from stripe ID %" PRIxGRUB_UINT64_T "\n", paddr,
>> +stripe->device_id);
>> +
>> +  dev = find_device (data, stripe->device_id);
>> +  if (!dev)
>> +{
>> +  buffers[i].data_is_valid = 0;
>> +  grub_dprintf ("btrfs", "stripe %" PRIuGRUB_UINT64_T " FAILED (dev ID 
>> %"
>> +PRIxGRUB_UINT64_T ")\n", i, stripe->device_id);
>> +  continue;
> 
> What will happen if more than one stripe is broken?
See below
> 
>> +}
>> +
>> +  err2 = grub_disk_read (dev->disk, paddr >> GRUB_DISK_SECTOR_BITS,
>> + paddr & (GRUB_DISK_SECTOR_SIZE - 1),
>> + csize, buffers[i].buf);
>> +  if (err2 == GRUB_ERR_NONE)
>> +{
>> +  buffers[i].data_is_valid = 1;
>> +  grub_dprintf ("btrfs", "stripe %" PRIuGRUB_UINT64_T " Ok (dev ID %"
>> +PRIxGRUB_UINT64_T ")\n", i, stripe->device_id);
>> +}
>> +  else
>> +{
>> +  buffers[i].data_is_valid = 0;
>> +  grub_dprintf ("btrfs", "stripe %" PRIuGRUB_UINT64_T
>> +" FAILED (dev ID %" PRIxGRUB_UINT64_T ")\n", i,
>> +stripe->device_id);
> 
> Ditto?
See below
> 
>> +}
>> +}
>> +
>> +  failed_devices = 0;
>> +  for (i = 0; i < nstripes; i++)
>> +if (!buffers[i].data_is_valid)
>> +  ++failed_devices;
>> +  if (failed_devices > 1 && (chunk_type & GRUB_BTRFS_CHUNK_TYPE_RAID5))
>> +{
>> +  grub_dprintf ("btrfs",
>> +"not enough disks for raid5: total %" PRIuGRUB_UINT64_T
> 
> s/raid5/RAID5/ Please fix this and the rest of messages in the other patches.
OK
> 
>> +", missing %" PRIuGRUB_UINT64_T "\n",
>> +nstripes, failed_devices);
>> +  ret = GRUB_ERR_READ_ERROR;
>> +  goto cleanup;
> 
> Ahhh... Here it is! Perfect!
Right !
> 
>> +}
>> +  else
>> +{
>> +  grub_dprintf ("btrfs",
>> +"enough disks for raid5/6 rebuilding: total %"
> 
> s#raid5/6#RAID5# This is the patch for RAID 5. Right?
> Please do not mix RAID 5 changes with RAID 6 changes.
ok
> 
>> +PRIuGRUB_UINT64_T ", mi