Use the HS hook to completely generate the HS BL descriptor, similarly
to what is done in the LS hook, instead of (arbitrarily) using the the
acr_v1 format as an intermediate.

This allows us to make the bootloader descriptor structures private to
each implementation, resulting in a cleaner an more consistent design.

Signed-off-by: Alexandre Courbot <[email protected]>
---
 drm/nouveau/nvkm/subdev/secboot/acr_v1.c       | 145 +++++++++++++------------
 drm/nouveau/nvkm/subdev/secboot/acr_v1.h       | 142 ++++++++++++------------
 drm/nouveau/nvkm/subdev/secboot/acr_v1_gm20b.c |  57 ++++------
 3 files changed, 165 insertions(+), 179 deletions(-)

diff --git a/drm/nouveau/nvkm/subdev/secboot/acr_v1.c 
b/drm/nouveau/nvkm/subdev/secboot/acr_v1.c
index 68bb3113fd23..486327e5441a 100644
--- a/drm/nouveau/nvkm/subdev/secboot/acr_v1.c
+++ b/drm/nouveau/nvkm/subdev/secboot/acr_v1.c
@@ -82,6 +82,42 @@ struct ls_ucode_mgr {
        struct list_head img_list;
 };
 
+/**
+ * struct acr_v1_bl_desc - DMEM bootloader descriptor
+ * @signature:         16B signature for secure code. 0s if no secure code
+ * @ctx_dma:           DMA context to be used by BL while loading code/data
+ * @code_dma_base:     256B-aligned Physical FB Address where code is located
+ *                     (falcon's $xcbase register)
+ * @non_sec_code_off:  offset from code_dma_base where the non-secure code is
+ *                      located. The offset must be multiple of 256 to help 
perf
+ * @non_sec_code_size: the size of the nonSecure code part.
+ * @sec_code_off:      offset from code_dma_base where the secure code is
+ *                      located. The offset must be multiple of 256 to help 
perf
+ * @sec_code_size:     offset from code_dma_base where the secure code is
+ *                      located. The offset must be multiple of 256 to help 
perf
+ * @code_entry_point:  code entry point which will be invoked by BL after
+ *                      code is loaded.
+ * @data_dma_base:     256B aligned Physical FB Address where data is located.
+ *                     (falcon's $xdbase register)
+ * @data_size:         size of data block. Should be multiple of 256B
+ *
+ * Structure used by the bootloader to load the rest of the code. This has
+ * to be filled by host and copied into DMEM at offset provided in the
+ * hsflcn_bl_desc.bl_desc_dmem_load_off.
+ */
+struct acr_v1_bl_desc {
+       u32 reserved[4];
+       u32 signature[4];
+       u32 ctx_dma;
+       struct flcn_u64 code_dma_base;
+       u32 non_sec_code_off;
+       u32 non_sec_code_size;
+       u32 sec_code_off;
+       u32 sec_code_size;
+       u32 code_entry_point;
+       struct flcn_u64 data_dma_base;
+       u32 data_size;
+};
 
 /*
  *
@@ -115,21 +151,6 @@ struct hsf_fw_header {
 };
 
 /**
- * struct hsf_load_header - HS firmware load header
- */
-struct hsf_load_header {
-       u32 non_sec_code_off;
-       u32 non_sec_code_size;
-       u32 data_dma_base;
-       u32 data_size;
-       u32 num_apps;
-       struct {
-               u32 sec_code_off;
-               u32 sec_code_size;
-       } app[0];
-};
-
-/**
  * struct hsflcn_acr_desc - data section of the HS firmware
  *
  * This header is to be copied at the beginning of DMEM by the HS bootloader.
@@ -714,32 +735,21 @@ acr_v1_hsf_patch_signature(struct nvkm_acr_v1 *acr, void 
*acr_image)
  * acr_v1_populate_hsf_bl_desc() - populate BL descriptor for HS image
  */
 static void
-acr_v1_populate_hsf_bl_desc(void *acr_image, struct acr_v1_bl_desc *bl_desc)
+acr_v1_generate_hsf_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
+                           u64 offset)
 {
-       struct fw_bin_header *hsbin_hdr = acr_image;
-       struct hsf_fw_header *fw_hdr = acr_image + hsbin_hdr->header_offset;
-       struct hsf_load_header *load_hdr = acr_image + fw_hdr->hdr_offset;
+       struct acr_v1_bl_desc *bl_desc = _bl_desc;
 
-       /*
-        * Descriptor for the bootloader that will load the ACR image into
-        * IMEM/DMEM memory.
-        */
-       fw_hdr = acr_image + hsbin_hdr->header_offset;
-       load_hdr = acr_image + fw_hdr->hdr_offset;
        memset(bl_desc, 0, sizeof(*bl_desc));
        bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
-       bl_desc->non_sec_code_off = load_hdr->non_sec_code_off;
-       bl_desc->non_sec_code_size = load_hdr->non_sec_code_size;
-       bl_desc->sec_code_off = load_hdr->app[0].sec_code_off;
-       bl_desc->sec_code_size = load_hdr->app[0].sec_code_size;
+       bl_desc->non_sec_code_off = hdr->non_sec_code_off;
+       bl_desc->non_sec_code_size = hdr->non_sec_code_size;
+       bl_desc->sec_code_off = hdr->app[0].sec_code_off;
+       bl_desc->sec_code_size = hdr->app[0].sec_code_size;
        bl_desc->code_entry_point = 0;
-       /*
-        * We need to set code_dma_base to the virtual address of the acr_blob,
-        * and add this address to data_dma_base before writing it into DMEM
-        */
-       bl_desc->code_dma_base.lo = 0;
-       bl_desc->data_dma_base.lo = load_hdr->data_dma_base;
-       bl_desc->data_size = load_hdr->data_size;
+       bl_desc->code_dma_base = u64_to_flcn64(offset);
+       bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base);
+       bl_desc->data_size = hdr->data_size;
 }
 
 static void
@@ -779,38 +789,44 @@ acr_v1_fixup_hs_desc(struct nvkm_acr_v1 *acr, struct 
nvkm_secboot *sb,
 static int
 acr_v1_prepare_hs_blob(struct nvkm_acr_v1 *acr, struct nvkm_secboot *sb,
                       const char *fw, struct nvkm_gpuobj **blob,
-                      struct acr_v1_bl_desc *bl_desc, bool patch)
+                      struct hsf_load_header *load_header, bool patch)
 {
        const struct nvkm_subdev *subdev = acr->base.subdev;
        void *acr_image;
        struct fw_bin_header *hsbin_hdr;
+       struct hsf_fw_header *fw_hdr;
+       struct hsf_load_header *load_hdr;
        void *acr_data;
        int ret;
 
        acr_image = acr_v1_load_firmware(subdev, fw, 0);
        if (IS_ERR(acr_image))
                return PTR_ERR(acr_image);
+
        hsbin_hdr = acr_image;
+       fw_hdr = acr_image + hsbin_hdr->header_offset;
+       load_hdr = acr_image + fw_hdr->hdr_offset;
+       acr_data = acr_image + hsbin_hdr->data_offset;
 
        /* Patch signature */
        acr_v1_hsf_patch_signature(acr, acr_image);
 
-       acr_data = acr_image + hsbin_hdr->data_offset;
-
        /* Patch descriptor with WPR information? */
        if (patch) {
-               struct hsf_fw_header *fw_hdr;
-               struct hsf_load_header *load_hdr;
                struct hsflcn_acr_desc *desc;
 
-               fw_hdr = acr_image + hsbin_hdr->header_offset;
-               load_hdr = acr_image + fw_hdr->hdr_offset;
                desc = acr_data + load_hdr->data_dma_base;
                acr_v1_fixup_hs_desc(acr, sb, desc);
        }
 
-       /* Generate HS BL descriptor */
-       acr_v1_populate_hsf_bl_desc(acr_image, bl_desc);
+       if (load_hdr->num_apps > ACR_V1_MAX_APPS) {
+               nvkm_error(subdev, "more apps (%d) than supported (%d)!",
+                          load_hdr->num_apps, ACR_V1_MAX_APPS);
+               ret = -EINVAL;
+               goto cleanup;
+       }
+       memcpy(load_header, load_hdr, sizeof(*load_header) +
+                              (sizeof(load_hdr->app[0]) * load_hdr->num_apps));
 
        /* Create ACR blob and copy HS data to it */
        ret = nvkm_gpuobj_new(subdev->device, ALIGN(hsbin_hdr->data_size, 256),
@@ -879,7 +895,7 @@ acr_v1_load_blobs(struct nvkm_acr_v1 *acr, struct 
nvkm_secboot *sb)
 
        /* Load the HS firmware that will load the LS firmwares */
        ret = acr_v1_prepare_hs_blob(acr, sb, "acr/ucode_load", &acr->load_blob,
-                                    &acr->load_bl_desc, true);
+                                    &acr->load_bl_header, true);
        if (ret)
                return ret;
 
@@ -887,7 +903,7 @@ acr_v1_load_blobs(struct nvkm_acr_v1 *acr, struct 
nvkm_secboot *sb)
        if (sb->wpr_size == 0) {
                ret = acr_v1_prepare_hs_blob(acr, sb, "acr/ucode_unload",
                                             &acr->unload_blob,
-                                            &acr->unload_bl_desc, false);
+                                            &acr->unload_bl_header, false);
                if (ret)
                        return ret;
        }
@@ -918,30 +934,21 @@ acr_v1_load(struct nvkm_acr *_acr, struct nvkm_secboot 
*sb,
        void *hsbl_data = blob_data + hsbl_desc->data_off;
        u32 code_size = ALIGN(hsbl_desc->code_size, 256);
        const u32 base = sb->base;
-       const u32 bl_desc_size = acr->func->bl_desc_size;
-       struct acr_v1_bl_desc desc;
+       const u32 bl_desc_size = acr->hs_func->bl_desc_size;
+       const struct hsf_load_header *load_hdr;
        u8 bl_desc[bl_desc_size];
-       u64 vma_addr;
        u32 code_start;
 
        /* Find the bootloader descriptor for our blob and copy it */
        if (blob == acr->load_blob) {
-               desc = acr->load_bl_desc;
+               load_hdr = &acr->load_bl_header;
        } else if (blob == acr->unload_blob) {
-               desc = acr->unload_bl_desc;
+               load_hdr = &acr->unload_bl_header;
        } else {
                nvkm_error(_acr->subdev, "invalid secure boot blob!\n");
                return -EINVAL;
        }
 
-       /* Patch the descriptor's load addresses */
-       vma_addr = flcn64_to_u64(desc.code_dma_base) + offset;
-       desc.code_dma_base.lo = lower_32_bits(vma_addr);
-       desc.code_dma_base.hi = upper_32_bits(vma_addr);
-       vma_addr = flcn64_to_u64(desc.data_dma_base) + offset;
-       desc.data_dma_base.lo = lower_32_bits(vma_addr);
-       desc.data_dma_base.hi = upper_32_bits(vma_addr);
-
        /*
         * Copy HS bootloader data
         */
@@ -954,8 +961,8 @@ acr_v1_load(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
        nvkm_falcon_load_imem(device, sb->base, hsbl_code, code_start,
                              code_size, hsbl_desc->start_tag);
 
-       /* Fixup the BL header */
-       acr->func->generate_bl_desc(&desc, &bl_desc);
+       /* Generate the BL header */
+       acr->hs_func->generate_bl_desc(load_hdr, bl_desc, offset);
 
        /*
         * Copy HS BL header where the HS descriptor expects it to be
@@ -966,12 +973,6 @@ acr_v1_load(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
        return 0;
 }
 
-static void
-acr_v1_generate_bl_desc(const struct acr_v1_bl_desc *desc, void *ret)
-{
-       memcpy(ret, desc, sizeof(*desc));
-}
-
 /*
  * acr_v1_reset() - execute secure boot from the prepared state
  *
@@ -1079,10 +1080,10 @@ acr_v1_dtor(struct nvkm_acr *_acr)
        kfree(acr);
 }
 
-static const struct nvkm_acr_v1_func
-gm200_acr_v1_func = {
+static const struct nvkm_acr_v1_hs_func
+gm200_acr_v1_hs_func = {
+       .generate_bl_desc = acr_v1_generate_hsf_bl_desc,
        .bl_desc_size = sizeof(struct acr_v1_bl_desc),
-       .generate_bl_desc = acr_v1_generate_bl_desc,
 };
 
 static const nvkm_acr_v1_ls_func
@@ -1121,7 +1122,7 @@ nvkm_acr_v1_new(void)
                return ERR_PTR(-ENOMEM);
 
        acr->base.func = &acr_v1_func;
-       acr->func = &gm200_acr_v1_func;
+       acr->hs_func = &gm200_acr_v1_hs_func;
        acr->ls_func = &acr_v1_ls_func;
 
        return &acr->base;
diff --git a/drm/nouveau/nvkm/subdev/secboot/acr_v1.h 
b/drm/nouveau/nvkm/subdev/secboot/acr_v1.h
index a3111c1ed9fb..a84867413df8 100644
--- a/drm/nouveau/nvkm/subdev/secboot/acr_v1.h
+++ b/drm/nouveau/nvkm/subdev/secboot/acr_v1.h
@@ -48,43 +48,6 @@ static inline struct flcn_u64 u64_to_flcn64(const u64 u)
        return ret;
 }
 
-/**
- * struct acr_v1_bl_desc - DMEM bootloader descriptor
- * @signature:         16B signature for secure code. 0s if no secure code
- * @ctx_dma:           DMA context to be used by BL while loading code/data
- * @code_dma_base:     256B-aligned Physical FB Address where code is located
- *                     (falcon's $xcbase register)
- * @non_sec_code_off:  offset from code_dma_base where the non-secure code is
- *                      located. The offset must be multiple of 256 to help 
perf
- * @non_sec_code_size: the size of the nonSecure code part.
- * @sec_code_off:      offset from code_dma_base where the secure code is
- *                      located. The offset must be multiple of 256 to help 
perf
- * @sec_code_size:     offset from code_dma_base where the secure code is
- *                      located. The offset must be multiple of 256 to help 
perf
- * @code_entry_point:  code entry point which will be invoked by BL after
- *                      code is loaded.
- * @data_dma_base:     256B aligned Physical FB Address where data is located.
- *                     (falcon's $xdbase register)
- * @data_size:         size of data block. Should be multiple of 256B
- *
- * Structure used by the bootloader to load the rest of the code. This has
- * to be filled by host and copied into DMEM at offset provided in the
- * hsflcn_bl_desc.bl_desc_dmem_load_off.
- */
-struct acr_v1_bl_desc {
-       u32 reserved[4];
-       u32 signature[4];
-       u32 ctx_dma;
-       struct flcn_u64 code_dma_base;
-       u32 non_sec_code_off;
-       u32 non_sec_code_size;
-       u32 sec_code_off;
-       u32 sec_code_size;
-       u32 code_entry_point;
-       struct flcn_u64 data_dma_base;
-       u32 data_size;
-};
-
 /*
  *
  * LS blob structures
@@ -110,6 +73,33 @@ struct lsf_ucode_desc {
 };
 
 /**
+ * struct lsf_wpr_header - LS blob WPR Header
+ * @falcon_id:         LS falcon ID
+ * @lsb_offset:                offset of the lsb_lsf_header in the WPR region
+ * @bootstrap_owner:   secure falcon reponsible for bootstrapping the LS falcon
+ * @lazy_bootstrap:    skip bootstrapping by ACR
+ * @status:            bootstrapping status
+ *
+ * An array of these is written at the beginning of the WPR region, one for
+ * each managed falcon. The array is terminated by an instance which falcon_id
+ * is LSF_FALCON_ID_INVALID.
+ */
+struct lsf_wpr_header {
+       u32  falcon_id;
+       u32  lsb_offset;
+       u32  bootstrap_owner;
+       u32  lazy_bootstrap;
+       u32  status;
+#define LSF_IMAGE_STATUS_NONE                          0
+#define LSF_IMAGE_STATUS_COPY                          1
+#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED                2
+#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED                3
+#define LSF_IMAGE_STATUS_VALIDATION_DONE               4
+#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED            5
+#define LSF_IMAGE_STATUS_BOOTSTRAP_READY               6
+};
+
+/**
  * struct lsf_lsb_header - LS firmware header
  * @signature:         signature to verify the firmware against
  * @ucode_off:         offset of the ucode blob in the WPR region. The ucode
@@ -151,34 +141,6 @@ struct lsf_lsb_header {
 };
 
 /**
- * struct lsf_wpr_header - LS blob WPR Header
- * @falcon_id:         LS falcon ID
- * @lsb_offset:                offset of the lsb_lsf_header in the WPR region
- * @bootstrap_owner:   secure falcon reponsible for bootstrapping the LS falcon
- * @lazy_bootstrap:    skip bootstrapping by ACR
- * @status:            bootstrapping status
- *
- * An array of these is written at the beginning of the WPR region, one for
- * each managed falcon. The array is terminated by an instance which falcon_id
- * is LSF_FALCON_ID_INVALID.
- */
-struct lsf_wpr_header {
-       u32  falcon_id;
-       u32  lsb_offset;
-       u32  bootstrap_owner;
-       u32  lazy_bootstrap;
-       u32  status;
-#define LSF_IMAGE_STATUS_NONE                          0
-#define LSF_IMAGE_STATUS_COPY                          1
-#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED                2
-#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED                3
-#define LSF_IMAGE_STATUS_VALIDATION_DONE               4
-#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED            5
-#define LSF_IMAGE_STATUS_BOOTSTRAP_READY               6
-};
-
-
-/**
  * struct ls_ucode_img_desc - descriptor of firmware image
  * @descriptor_size:           size of this descriptor
  * @image_size:                        size of the whole image
@@ -254,16 +216,43 @@ struct ls_ucode_img {
        struct lsf_lsb_header lsb_header;
 };
 
+
+
+/*
+ *
+ * HS blob structures
+ *
+ */
+
+
+struct hsf_load_header_app {
+       u32 sec_code_off;
+       u32 sec_code_size;
+};
+
+/**
+ * struct hsf_load_header - HS firmware load header
+ */
+struct hsf_load_header {
+       u32 non_sec_code_off;
+       u32 non_sec_code_size;
+       u32 data_dma_base;
+       u32 data_size;
+       u32 num_apps;
+       struct hsf_load_header_app app[0];
+};
+
+
 /**
- * struct nvkm_acr_v1_func - manages nuances between ACR v1 instances
+ * struct nvkm_acr_v1_hs_func - manages nuances between ACR v1 HS instances
  *
- * @bl_desc_size: size of the bootloader descriptor
  * @generate_bl_desc: function called on a block of bl_desc_size to generate 
the
  *                proper bootloader descriptor
+ * @bl_desc_size: size of the ootloader descriptor
  */
-struct nvkm_acr_v1_func {
+struct nvkm_acr_v1_hs_func {
+       void (*generate_bl_desc)(const struct hsf_load_header *, void *, u64);
        u32 bl_desc_size;
-       void (*generate_bl_desc)(const struct acr_v1_bl_desc *, void *);
 };
 
 /**
@@ -289,6 +278,9 @@ nvkm_acr_v1_ls_func[NVKM_FALCON_END];
 int acr_v1_ls_load_fecs(const struct nvkm_subdev *, struct ls_ucode_img *);
 int acr_v1_ls_load_gpccs(const struct nvkm_subdev *, struct ls_ucode_img *);
 
+
+#define ACR_V1_MAX_APPS 8
+
 /**
  * struct nvkm_acr_v1 - version 1 of the ACR
  *
@@ -303,7 +295,7 @@ int acr_v1_ls_load_gpccs(const struct nvkm_subdev *, struct 
ls_ucode_img *);
  */
 struct nvkm_acr_v1 {
        struct nvkm_acr base;
-       const struct nvkm_acr_v1_func *func;
+       const struct nvkm_acr_v1_hs_func *hs_func;
        const nvkm_acr_v1_ls_func *ls_func;
 
        /*
@@ -311,11 +303,17 @@ struct nvkm_acr_v1 {
         * on Tegra the HS FW copies the LS blob into the fixed WPR instead
         */
        struct nvkm_gpuobj *load_blob;
-       struct acr_v1_bl_desc load_bl_desc;
+       struct {
+               struct hsf_load_header load_bl_header;
+               struct hsf_load_header_app __load_apps[ACR_V1_MAX_APPS];
+       };
 
        /* HS FW - unlock WPR region (dGPU only) */
        struct nvkm_gpuobj *unload_blob;
-       struct acr_v1_bl_desc unload_bl_desc;
+       struct {
+               struct hsf_load_header unload_bl_header;
+               struct hsf_load_header_app __unload_apps[ACR_V1_MAX_APPS];
+       };
 
        /* HS bootloader */
        void *hsbl_blob;
diff --git a/drm/nouveau/nvkm/subdev/secboot/acr_v1_gm20b.c 
b/drm/nouveau/nvkm/subdev/secboot/acr_v1_gm20b.c
index 1062d4f6a1ff..e59dc8e5437b 100644
--- a/drm/nouveau/nvkm/subdev/secboot/acr_v1_gm20b.c
+++ b/drm/nouveau/nvkm/subdev/secboot/acr_v1_gm20b.c
@@ -42,37 +42,6 @@ struct acr_v1_gm20b_bl_desc {
        u32 data_size;
 };
 
-/**
- * gm20b_acr_v1_generate_bl_desc - write BL descriptor in format used by GM20B
- *
- * There is only a slight format difference (DMA addresses being 32-bits and
- * 256B-aligned) to address.
- */
-static void
-gm20b_acr_v1_generate_bl_desc(const struct acr_v1_bl_desc *desc, void *ret)
-{
-       struct acr_v1_gm20b_bl_desc *gdesc = ret;
-       u64 addr;
-
-       memcpy(gdesc->reserved, desc->reserved, sizeof(gdesc->reserved));
-       memcpy(gdesc->signature, desc->signature, sizeof(gdesc->signature));
-       gdesc->ctx_dma = desc->ctx_dma;
-       addr = desc->code_dma_base.hi;
-       addr <<= 32;
-       addr |= desc->code_dma_base.lo;
-       gdesc->code_dma_base = lower_32_bits(addr >> 8);
-       gdesc->non_sec_code_off = desc->non_sec_code_off;
-       gdesc->non_sec_code_size = desc->non_sec_code_size;
-       gdesc->sec_code_off = desc->sec_code_off;
-       gdesc->sec_code_size = desc->sec_code_size;
-       gdesc->code_entry_point = desc->code_entry_point;
-       addr = desc->data_dma_base.hi;
-       addr <<= 32;
-       addr |= desc->data_dma_base.lo;
-       gdesc->data_dma_base = lower_32_bits(addr >> 8);
-       gdesc->data_size = desc->data_size;
-}
-
 static void
 acr_v1_gm20b_ls_generic_bl_desc(const struct ls_ucode_img *img, u64 wpr_addr,
                                void *_desc)
@@ -92,10 +61,28 @@ acr_v1_gm20b_ls_generic_bl_desc(const struct ls_ucode_img 
*img, u64 wpr_addr,
        desc->code_entry_point = pdesc->app_imem_entry;
 }
 
-static const struct nvkm_acr_v1_func
-gm20b_acr_v1_func = {
+static void
+gm20b_acr_v1_generate_hsf_bl_desc(const struct hsf_load_header *load_hdr,
+                                 void *_bl_desc, u64 offset)
+{
+       struct acr_v1_gm20b_bl_desc *bl_desc = _bl_desc;
+
+       memset(bl_desc, 0, sizeof(*bl_desc));
+       bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
+       bl_desc->non_sec_code_off = load_hdr->non_sec_code_off;
+       bl_desc->non_sec_code_size = load_hdr->non_sec_code_size;
+       bl_desc->sec_code_off = load_hdr->app[0].sec_code_off;
+       bl_desc->sec_code_size = load_hdr->app[0].sec_code_size;
+       bl_desc->code_entry_point = 0;
+       bl_desc->code_dma_base = offset >> 8;
+       bl_desc->data_dma_base = (offset + load_hdr->data_dma_base) >> 8;
+       bl_desc->data_size = load_hdr->data_size;
+}
+
+static const struct nvkm_acr_v1_hs_func
+gm20b_acr_v1_hs_func = {
+       .generate_bl_desc = gm20b_acr_v1_generate_hsf_bl_desc,
        .bl_desc_size = sizeof(struct acr_v1_gm20b_bl_desc),
-       .generate_bl_desc = gm20b_acr_v1_generate_bl_desc,
 };
 
 static const struct nvkm_acr_func
@@ -128,7 +115,7 @@ nvkm_acr_v1_gm20b_new(void)
                return ERR_PTR(-ENOMEM);
 
        acr->base.func = &gm20b_acr_func;
-       acr->func = &gm20b_acr_v1_func;
+       acr->hs_func = &gm20b_acr_v1_hs_func;
        acr->ls_func = &gm20b_acr_v1_ls_func;
 
        return &acr->base;
-- 
2.10.0

_______________________________________________
Nouveau mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/nouveau

Reply via email to