Now 84676c1f21e8ff5(genirq/affinity: assign vectors to all possible CPUs)
has been merged to V4.16-rc, and it is easy to allocate all offline CPUs
for some irq vectors, this can't be avoided even though the allocation
is improved.
For example, on a 8cores VM, 4~7 are not-present/offline, 4 queues of
virtio-scsi, the irq affinity assigned can become the following shape:
irq 36, cpu list 0-7
irq 37, cpu list 0-7
irq 38, cpu list 0-7
irq 39, cpu list 0-1
irq 40, cpu list 4,6
irq 41, cpu list 2-3
irq 42, cpu list 5,7
Then IO hang is triggered in case of non-SCSI_MQ.
Given storage IO is always C/S model, there isn't such issue with
SCSI_MQ(blk-mq),
because no IO can be submitted to one hw queue if the hw queue hasn't online
CPUs.
Fix this issue by forcing to use blk_mq.
BTW, I have been used virtio-scsi(scsi_mq) for several years, and it has
been quite stable, so it shouldn't cause extra risk.
Cc: Hannes Reinecke
Cc: Arun Easi
Cc: Omar Sandoval ,
Cc: "Martin K. Petersen" ,
Cc: James Bottomley ,
Cc: Christoph Hellwig ,
Cc: Don Brace
Cc: Kashyap Desai
Cc: Peter Rivera
Cc: Paolo Bonzini
Cc: Laurence Oberman
Cc: Mike Snitzer
Signed-off-by: Ming Lei
---
drivers/scsi/virtio_scsi.c | 59 +++---
1 file changed, 3 insertions(+), 56 deletions(-)
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 7c28e8d4955a..54e3a0f6844c 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -91,9 +91,6 @@ struct virtio_scsi_vq {
struct virtio_scsi_target_state {
seqcount_t tgt_seq;
- /* Count of outstanding requests. */
- atomic_t reqs;
-
/* Currently active virtqueue for requests sent to this target. */
struct virtio_scsi_vq *req_vq;
};
@@ -152,8 +149,6 @@ static void virtscsi_complete_cmd(struct virtio_scsi
*vscsi, void *buf)
struct virtio_scsi_cmd *cmd = buf;
struct scsi_cmnd *sc = cmd->sc;
struct virtio_scsi_cmd_resp *resp = >resp.cmd;
- struct virtio_scsi_target_state *tgt =
- scsi_target(sc->device)->hostdata;
dev_dbg(>device->sdev_gendev,
"cmd %p response %u status %#02x sense_len %u\n",
@@ -210,8 +205,6 @@ static void virtscsi_complete_cmd(struct virtio_scsi
*vscsi, void *buf)
}
sc->scsi_done(sc);
-
- atomic_dec(>reqs);
}
static void virtscsi_vq_done(struct virtio_scsi *vscsi,
@@ -580,10 +573,7 @@ static int virtscsi_queuecommand_single(struct Scsi_Host
*sh,
struct scsi_cmnd *sc)
{
struct virtio_scsi *vscsi = shost_priv(sh);
- struct virtio_scsi_target_state *tgt =
- scsi_target(sc->device)->hostdata;
- atomic_inc(>reqs);
return virtscsi_queuecommand(vscsi, >req_vqs[0], sc);
}
@@ -596,55 +586,11 @@ static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct
virtio_scsi *vscsi,
return >req_vqs[hwq];
}
-static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi,
- struct virtio_scsi_target_state
*tgt)
-{
- struct virtio_scsi_vq *vq;
- unsigned long flags;
- u32 queue_num;
-
- local_irq_save(flags);
- if (atomic_inc_return(>reqs) > 1) {
- unsigned long seq;
-
- do {
- seq = read_seqcount_begin(>tgt_seq);
- vq = tgt->req_vq;
- } while (read_seqcount_retry(>tgt_seq, seq));
- } else {
- /* no writes can be concurrent because of atomic_t */
- write_seqcount_begin(>tgt_seq);
-
- /* keep previous req_vq if a reader just arrived */
- if (unlikely(atomic_read(>reqs) > 1)) {
- vq = tgt->req_vq;
- goto unlock;
- }
-
- queue_num = smp_processor_id();
- while (unlikely(queue_num >= vscsi->num_queues))
- queue_num -= vscsi->num_queues;
- tgt->req_vq = vq = >req_vqs[queue_num];
- unlock:
- write_seqcount_end(>tgt_seq);
- }
- local_irq_restore(flags);
-
- return vq;
-}
-
static int virtscsi_queuecommand_multi(struct Scsi_Host *sh,
struct scsi_cmnd *sc)
{
struct virtio_scsi *vscsi = shost_priv(sh);
- struct virtio_scsi_target_state *tgt =
- scsi_target(sc->device)->hostdata;
- struct virtio_scsi_vq *req_vq;
-
- if (shost_use_blk_mq(sh))
-