Author: arekm                        Date: Wed Dec  1 12:56:05 2010 GMT
Module: packages                      Tag: HEAD
---- Log message:
- fix some (very unlikely to happen) scsi problems

---- Files affected:
packages/kernel:
   kernel.spec (1.860 -> 1.861) , kernel-scsi-corruption.patch (NONE -> 1.1)  
(NEW)

---- Diffs:

================================================================
Index: packages/kernel/kernel.spec
diff -u packages/kernel/kernel.spec:1.860 packages/kernel/kernel.spec:1.861
--- packages/kernel/kernel.spec:1.860   Thu Nov 25 20:20:50 2010
+++ packages/kernel/kernel.spec Wed Dec  1 13:55:59 2010
@@ -196,6 +196,9 @@
 
 ### End netfilter
 
+# http://marc.info/?l=dm-devel&m=129073232606808&q=raw
+Patch48:       kernel-scsi-corruption.patch
+
 # http://zph.bratcheda.org/linux-2.6.26.3-zph.patch
 Patch49:       kernel-zph.patch
 
@@ -721,6 +724,9 @@
 ##
 # end of netfilter
 
+# scsi
+%patch48 -p1
+
 # zph
 %patch49 -p1
 
@@ -1535,6 +1541,9 @@
 All persons listed below can be reached at <cvs_login>@pld-linux.org
 
 $Log$
+Revision 1.861  2010/12/01 12:55:59  arekm
+- fix some (very unlikely to happen) scsi problems
+
 Revision 1.860  2010/11/25 19:20:50  arekm
 - rel 2
 

================================================================
Index: packages/kernel/kernel-scsi-corruption.patch
diff -u /dev/null packages/kernel/kernel-scsi-corruption.patch:1.1
--- /dev/null   Wed Dec  1 13:56:05 2010
+++ packages/kernel/kernel-scsi-corruption.patch        Wed Dec  1 13:55:59 2010
@@ -0,0 +1,246 @@
+>>>>> "Jens" == Jens Axboe <jax...@fusionio.com> writes:
+
+Jens> Great, the two different values and needing to sync them was
+Jens> horrible.  What kind of testing did you do? Have to be a little
+Jens> extra careful at this point.
+
+Yeah, we should probably let it soak a bit in -next just to make sure.
+
+There really aren't many devices from this millennium that don't support
+clustering. Which I guess is why we haven't seen any problems.
+
+I ended up disabling clustering in one of the FC drivers to test with a
+real workload. Threw in a BUG_ON(nsegs > queue_max_segments(q)) for good
+measure.
+
+I also tested mixing and matching clustered and non-clustered bottom
+devices while stacking with DM.
+
+New version below, fixing the things you and Matthew pointed out...
+
+
+
+block: Deprecate QUEUE_FLAG_CLUSTER and use queue_limits instead
+
+When stacking devices, a request_queue is not always available. This
+forced us to have a no_cluster flag in the queue_limits that could be
+used as a carrier until the request_queue had been set up for a
+metadevice.
+
+There were several problems with that approach. First of all it was up
+to the stacking device to remember to set queue flag after stacking had
+completed. Also, the queue flag and the queue limits had to be kept in
+sync at all times. We got that wrong, which could lead to us issuing
+commands that went beyond the max scatterlist limit set by the driver.
+
+The proper fix is to avoid having two flags for tracking the same thing.
+We deprecate QUEUE_FLAG_CLUSTER and use the queue limit directly in the
+block layer merging functions. The queue_limit 'no_cluster' is turned
+into 'cluster' to avoid double negatives and to ease stacking.
+Clustering defaults to being enabled as before. The queue flag logic is
+removed from the stacking function, and explicitly setting the cluster
+flag is no longer necessary in DM and MD.
+
+Reported-by: Ed Lin <ed....@promise.com>
+Signed-off-by: Martin K. Petersen <martin.peter...@oracle.com>
+
+diff --git a/block/blk-merge.c b/block/blk-merge.c
+index 77b7c26..74bc4a7 100644
+--- a/block/blk-merge.c
++++ b/block/blk-merge.c
+@@ -21,7 +21,7 @@ static unsigned int __blk_recalc_rq_segments(struct 
request_queue *q,
+               return 0;
+ 
+       fbio = bio;
+-      cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
++      cluster = blk_queue_cluster(q);
+       seg_size = 0;
+       nr_phys_segs = 0;
+       for_each_bio(bio) {
+@@ -87,7 +87,7 @@ EXPORT_SYMBOL(blk_recount_segments);
+ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
+                                  struct bio *nxt)
+ {
+-      if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
++      if (!blk_queue_cluster(q))
+               return 0;
+ 
+       if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
+@@ -123,7 +123,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request 
*rq,
+       int nsegs, cluster;
+ 
+       nsegs = 0;
+-      cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
++      cluster = blk_queue_cluster(q);
+ 
+       /*
+        * for each bio in rq
+diff --git a/block/blk-settings.c b/block/blk-settings.c
+index 701859f..e55f5fc 100644
+--- a/block/blk-settings.c
++++ b/block/blk-settings.c
+@@ -126,7 +126,7 @@ void blk_set_default_limits(struct queue_limits *lim)
+       lim->alignment_offset = 0;
+       lim->io_opt = 0;
+       lim->misaligned = 0;
+-      lim->no_cluster = 0;
++      lim->cluster = 1;
+ }
+ EXPORT_SYMBOL(blk_set_default_limits);
+ 
+@@ -464,15 +464,6 @@ EXPORT_SYMBOL(blk_queue_io_opt);
+ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
+ {
+       blk_stack_limits(&t->limits, &b->limits, 0);
+-
+-      if (!t->queue_lock)
+-              WARN_ON_ONCE(1);
+-      else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
+-              unsigned long flags;
+-              spin_lock_irqsave(t->queue_lock, flags);
+-              queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
+-              spin_unlock_irqrestore(t->queue_lock, flags);
+-      }
+ }
+ EXPORT_SYMBOL(blk_queue_stack_limits);
+ 
+@@ -545,7 +536,7 @@ int blk_stack_limits(struct queue_limits *t, struct 
queue_limits *b,
+       t->io_min = max(t->io_min, b->io_min);
+       t->io_opt = lcm(t->io_opt, b->io_opt);
+ 
+-      t->no_cluster |= b->no_cluster;
++      t->cluster &= b->cluster;
+       t->discard_zeroes_data &= b->discard_zeroes_data;
+ 
+       /* Physical block size a multiple of the logical block size? */
+@@ -641,7 +632,6 @@ void disk_stack_limits(struct gendisk *disk, struct 
block_device *bdev,
+                      sector_t offset)
+ {
+       struct request_queue *t = disk->queue;
+-      struct request_queue *b = bdev_get_queue(bdev);
+ 
+       if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
+               char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
+@@ -652,17 +642,6 @@ void disk_stack_limits(struct gendisk *disk, struct 
block_device *bdev,
+               printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
+                      top, bottom);
+       }
+-
+-      if (!t->queue_lock)
+-              WARN_ON_ONCE(1);
+-      else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
+-              unsigned long flags;
+-
+-              spin_lock_irqsave(t->queue_lock, flags);
+-              if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
+-                      queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
+-              spin_unlock_irqrestore(t->queue_lock, flags);
+-      }
+ }
+ EXPORT_SYMBOL(disk_stack_limits);
+ 
+diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
+index 013457f..41fb691 100644
+--- a/block/blk-sysfs.c
++++ b/block/blk-sysfs.c
+@@ -119,7 +119,7 @@ static ssize_t queue_max_integrity_segments_show(struct 
request_queue *q, char *
+ 
+ static ssize_t queue_max_segment_size_show(struct request_queue *q, char 
*page)
+ {
+-      if (test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
++      if (blk_queue_cluster(q))
+               return queue_var_show(queue_max_segment_size(q), (page));
+ 
+       return queue_var_show(PAGE_CACHE_SIZE, (page));
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index 90267f8..e2da191 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -1131,11 +1131,6 @@ void dm_table_set_restrictions(struct dm_table *t, 
struct request_queue *q,
+        */
+       q->limits = *limits;
+ 
+-      if (limits->no_cluster)
+-              queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
+-      else
+-              queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q);
+-
+       if (!dm_table_supports_discards(t))
+               queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
+       else
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 84c46a1..52694d2 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -4296,9 +4296,6 @@ static int md_alloc(dev_t dev, char *name)
+               goto abort;
+       mddev->queue->queuedata = mddev;
+ 
+-      /* Can be unlocked because the queue is new: no concurrency */
+-      queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, mddev->queue);
+-
+       blk_queue_make_request(mddev->queue, md_make_request);
+ 
+       disk = alloc_disk(1 << shift);
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index b55b0ec..3852e51 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -1643,9 +1643,8 @@ struct request_queue *__scsi_alloc_queue(struct 
Scsi_Host *shost,
+ 
+       blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
+ 
+-      /* New queue, no concurrency on queue_flags */
+       if (!shost->use_clustering)
+-              queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
++              q->limits.cluster = 0;
+ 
+       /*
+        * set a reasonable default alignment on word boundaries: the
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index aae86fd..95aeeeb 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -250,7 +250,7 @@ struct queue_limits {
+ 
+       unsigned char           misaligned;
+       unsigned char           discard_misaligned;
+-      unsigned char           no_cluster;
++      unsigned char           cluster;
+       signed char             discard_zeroes_data;
+ };
+ 
+@@ -380,7 +380,6 @@ struct request_queue
+ #endif
+ };
+ 
+-#define QUEUE_FLAG_CLUSTER    0       /* cluster several segments into 1 */
+ #define QUEUE_FLAG_QUEUED     1       /* uses generic tag queueing */
+ #define QUEUE_FLAG_STOPPED    2       /* queue is stopped */
+ #define       QUEUE_FLAG_SYNCFULL     3       /* read queue has been filled */
+@@ -403,7 +402,6 @@ struct request_queue
+ #define QUEUE_FLAG_SECDISCARD  19     /* supports SECDISCARD */
+ 
+ #define QUEUE_FLAG_DEFAULT    ((1 << QUEUE_FLAG_IO_STAT) |            \
+-                               (1 << QUEUE_FLAG_CLUSTER) |            \
+                                (1 << QUEUE_FLAG_STACKABLE)    |       \
+                                (1 << QUEUE_FLAG_SAME_COMP)    |       \
+                                (1 << QUEUE_FLAG_ADD_RANDOM))
+@@ -510,6 +508,11 @@ static inline void queue_flag_clear(unsigned int flag, 
struct request_queue *q)
+ 
+ #define rq_data_dir(rq)               ((rq)->cmd_flags & 1)
+ 
++static inline unsigned int blk_queue_cluster(struct request_queue *q)
++{
++      return q->limits.cluster;
++}
++
+ /*
+  * We regard a request as sync, if either a read or a sync write
+  */
+
+
+--
+dm-devel mailing list
+dm-de...@redhat.com
+https://www.redhat.com/mailman/listinfo/dm-devel
\ No newline at end of file
================================================================

---- CVS-web:
    
http://cvs.pld-linux.org/cgi-bin/cvsweb.cgi/packages/kernel/kernel.spec?r1=1.860&r2=1.861&f=u

_______________________________________________
pld-cvs-commit mailing list
pld-cvs-commit@lists.pld-linux.org
http://lists.pld-linux.org/mailman/listinfo/pld-cvs-commit

Reply via email to