On Tue, Dec 08, 2020 at 09:33:39AM +0100, Klaus Jensen wrote:
> +static uint16_t nvme_copy(NvmeCtrl *n, NvmeRequest *req)
> +{

<snip>

> +    for (i = 0; i < nr; i++) {
> +        uint32_t _nlb = le16_to_cpu(range[i].nlb) + 1;
> +        if (_nlb > le16_to_cpu(ns->id_ns.mssrl)) {
> +            return NVME_CMD_SIZE_LIMIT | NVME_DNR;
> +        }
> +
> +        nlb += _nlb;
> +    }
> +
> +    if (nlb > le32_to_cpu(ns->id_ns.mcl)) {
> +        return NVME_CMD_SIZE_LIMIT | NVME_DNR;
> +    }
> +
> +    bounce = bouncep = g_malloc(nvme_l2b(ns, nlb));
> +
> +    for (i = 0; i < nr; i++) {
> +        uint64_t slba = le64_to_cpu(range[i].slba);
> +        uint32_t nlb = le16_to_cpu(range[i].nlb) + 1;
> +
> +        status = nvme_check_bounds(ns, slba, nlb);
> +        if (status) {
> +            trace_pci_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze);
> +            goto free_bounce;
> +        }
> +
> +        if (NVME_ERR_REC_DULBE(ns->features.err_rec)) {
> +            status = nvme_check_dulbe(ns, slba, nlb);
> +            if (status) {
> +                goto free_bounce;
> +            }
> +        }
> +    }

Only comment I have is that these two for-loops look like they can be
collaped into one, which also simplifies how you account for the bounce
buffer when error'ing out. 

Reply via email to