Re: [Qemu-block] [PATCH] throttle: fix a qemu crash problem when calling blk_delete

2017-10-21 Thread sochin.jiang
Thanks for replying.

Indeed, the problem comes from the caller. I guest

some code should be reconsidered in blk_remove_bs

and blk_delete, especially with throttling.

Secondly, when handling drive_del in hmp_drive_del,

throttle_timers_detach_aio_context() is called

first time in blk_remove_bs and again throughing blk_unref,

see below:

hmp_drive_del->
blk_remove_bs->
*throttle_timers_detach_aio_context*->
...
blk_unref->
blk_delete->
blk_io_limits_disable->
throttle_group_unregister_tgm->
throttle_timers_destroy->
*throttle_timers_detach_aio_context*->**...**


sochin

.  


On 2017/10/20 19:43, Alberto Garcia wrote:
> On Sun 24 Nov 2013 04:55:52 AM CET, sochin.jiang wrote:
>^^^
> I guess the date in your computer is wrong :-)
>
>> commit 7ca7f0 moves the throttling related part of the BDS life cycle
>> management to BlockBackend, adds call to
>> throttle_timers_detach_aio_context in blk_remove_bs.  commit 1606e
>> remove a block device from its throttle group in blk_delete by calling
>> blk_io_limits_disable, this fix an easily reproducible qemu crash. But
>> delete a BB without a BDS inserted could easily cause a qemu crash too
>> by calling bdrv_drained_begin in blk_io_limits_disable. Say, a simply
>> drive_add and then a drive_del command.
> Thanks, I can reproduce this easily by running QEMU and doing
>
>drive_add 0 if=none,throttling.iops-total=5000
>
> followed by
>
>drive_del none0
>
>>  void bdrv_drained_begin(BlockDriverState *bs)
>>  {
>> +if (!bs) {
>> +return;
>> +}
>> +
>>  if (qemu_in_coroutine()) {
>>  bdrv_co_yield_to_drain(bs, true);
>>  return;
>> @@ -284,6 +288,10 @@ void bdrv_drained_begin(BlockDriverState *bs)
>>  
>>  void bdrv_drained_end(BlockDriverState *bs)
>>  {
>> +if (!bs) {
>> +return;
>> +}
>> +
> I'd say that if someone calls bdrv_drained_begin() with a NULL pointer
> then the problem is in the caller...
>
>>  static void throttle_timer_destroy(QEMUTimer **timer)
>>  {
>> -assert(*timer != NULL);
>> -
>>  timer_del(*timer);
>>  timer_free(*timer);
>>  *timer = NULL;
>> @@ -258,7 +256,9 @@ void throttle_timers_detach_aio_context(ThrottleTimers 
>> *tt)
>>  int i;
>>  
>>  for (i = 0; i < 2; i++) {
>> -throttle_timer_destroy(>timers[i]);
>> +if (tt->timers[i]) {
>> +throttle_timer_destroy(>timers[i]);
>> +}
>>  }
>>  }
> Why is this part necessary? In what situation you end up calling
> throttle_timers_detach_aio_context() twice?
>
> Berto
>
> .
>



[Qemu-block] [PATCH] qemu-block: add support HMB with feature commands.

2017-10-21 Thread Minwoo Im
Add support HMB(Host Memory Block) with feature commands(Get Feature, Set 
Feature).
nvme-4.14 tree supports HMB features.
This patch will make nvme controller to return 32MiB preferred size of HMB to 
host via identify command.
Set Feature, Get Feature implemented for HMB.

Signed-off-by: Minwoo Im 
---
 hw/block/nvme.c | 35 +++
 hw/block/nvme.h | 21 -
 2 files changed, 55 insertions(+), 1 deletion(-)

diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index 6071dc1..d351781 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -605,6 +605,23 @@ static uint16_t nvme_identify(NvmeCtrl *n, NvmeCmd *cmd)
 }
 }
 
+static uint32_t nvme_get_feature_hmb(NvmeCtrl *n, NvmeCmd *cmd)
+{
+uint32_t result = n->hmb_flag.flag;
+uint64_t prp1 = le64_to_cpu(cmd->prp1);
+uint64_t prp2 = le64_to_cpu(cmd->prp2);
+NvmeHmbAttr attr = {0, };
+
+attr.hsize = cpu_to_le32(n->hmb_attr.hsize);
+attr.hmdlal = cpu_to_le32(n->hmb_attr.hmdlal);
+attr.hmdlau = cpu_to_le32(n->hmb_attr.hmdlau);
+attr.hmdlec = cpu_to_le32(n->hmb_attr.hmdlec);
+
+nvme_dma_read_prp(n, (uint8_t *), sizeof(attr), prp1, prp2);
+
+return result;
+}
+
 static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
 {
 uint32_t dw10 = le32_to_cpu(cmd->cdw10);
@@ -617,6 +634,9 @@ static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, 
NvmeRequest *req)
 case NVME_NUMBER_OF_QUEUES:
 result = cpu_to_le32((n->num_queues - 1) | ((n->num_queues - 1) << 
16));
 break;
+case NVME_HOST_MEMORY_BUFFER:
+result = nvme_get_feature_hmb(n, cmd);
+break;
 default:
 return NVME_INVALID_FIELD | NVME_DNR;
 }
@@ -625,6 +645,16 @@ static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd 
*cmd, NvmeRequest *req)
 return NVME_SUCCESS;
 }
 
+static void nvme_set_feature_hmb(NvmeCtrl *n, NvmeCmd *cmd)
+{
+n->hmb_flag.flag = le32_to_cpu(cmd->cdw11);
+
+n->hmb_attr.hsize = le32_to_cpu(cmd->cdw12);
+n->hmb_attr.hmdlal = le32_to_cpu(cmd->cdw13);
+n->hmb_attr.hmdlau = le32_to_cpu(cmd->cdw14);
+n->hmb_attr.hmdlec = le32_to_cpu(cmd->cdw15);
+}
+
 static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
 {
 uint32_t dw10 = le32_to_cpu(cmd->cdw10);
@@ -638,6 +668,9 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd *cmd, 
NvmeRequest *req)
 req->cqe.result =
 cpu_to_le32((n->num_queues - 1) | ((n->num_queues - 1) << 16));
 break;
+case NVME_HOST_MEMORY_BUFFER:
+nvme_set_feature_hmb(n, cmd);
+break;
 default:
 return NVME_INVALID_FIELD | NVME_DNR;
 }
@@ -985,6 +1018,8 @@ static int nvme_init(PCIDevice *pci_dev)
 id->oacs = cpu_to_le16(0);
 id->frmw = 7 << 1;
 id->lpa = 1 << 0;
+id->hmpre = 0x2000;
+id->hmmin = 0x0;
 id->sqes = (0x6 << 4) | 0x6;
 id->cqes = (0x4 << 4) | 0x4;
 id->nn = cpu_to_le32(n->num_namespaces);
diff --git a/hw/block/nvme.h b/hw/block/nvme.h
index 6aab338..fab748b 100644
--- a/hw/block/nvme.h
+++ b/hw/block/nvme.h
@@ -552,7 +552,10 @@ typedef struct NvmeIdCtrl {
 uint8_t lpa;
 uint8_t elpe;
 uint8_t npss;
-uint8_t rsvd511[248];
+uint8_t rsvd271[8];
+uint32_thmpre;
+uint32_thmmin;
+uint8_t rsvd511[232];
 uint8_t sqes;
 uint8_t cqes;
 uint16_trsvd515;
@@ -623,9 +626,22 @@ enum NvmeFeatureIds {
 NVME_INTERRUPT_VECTOR_CONF  = 0x9,
 NVME_WRITE_ATOMICITY= 0xa,
 NVME_ASYNCHRONOUS_EVENT_CONF= 0xb,
+NVME_HOST_MEMORY_BUFFER = 0xd,
 NVME_SOFTWARE_PROGRESS_MARKER   = 0x80
 };
 
+typedef struct NvmeHmbFlag {
+uint32_tflag;
+} NvmeHmbFlag;
+
+typedef struct NvmeHmbAttr {
+uint32_thsize;
+uint32_thmdlal;
+uint32_thmdlau;
+uint32_thmdlec;
+uint8_t rsvd4095[4080];
+} NvmeHmbAttr;
+
 typedef struct NvmeRangeType {
 uint8_t type;
 uint8_t attributes;
@@ -776,6 +792,9 @@ typedef struct NvmeCtrl {
 uint32_tcmbloc;
 uint8_t *cmbuf;
 
+NvmeHmbFlag hmb_flag;
+NvmeHmbAttr hmb_attr;
+
 char*serial;
 NvmeNamespace   *namespaces;
 NvmeSQueue  **sq;
-- 
2.7.4