On Tue, Mar 24, 2026 at 12:45:29PM +0200, Jarkko Sakkinen wrote:
> On Tue, Mar 24, 2026 at 12:48:02PM +0530, Arun Menon wrote:
> > Add support for sending and receiving TPM command data in chunks when
> > the payload exceeds the physical size of the hardware MMIO window.
> >
> > This introduces the following changes:
> >
> > - crb_map_io(): Checks the device interface capability to determine if
> > chunking is supported, setting the chunking_supported flag. It also
> > stores the hardware's maximum response buffer size in priv->rsp_size.
> > - crb_send(): Iteratively writes command chunks to the fixed priv->cmd
> > MMIO window. It signals the TPM backend to process intermediate chunks
> > using CRB_START_NEXT_CHUNK, and signals the final chunk to begin
> > execution using CRB_START_INVOKE.
> > - crb_recv(): Parses the expected response size from the initial TPM
> > header. It then iteratively reads chunks from the fixed priv->rsp
> > MMIO window into the destination buffer, advancing the buffer offset
> > until the complete response is retrieved.
> >
> > Signed-off-by: Arun Menon <[email protected]>
>
> This is also just description of wha this adds. I'd lessen the detail
> and write a description that describes motivation and logic of the
> change. It's a good test for author knowledge, as if you really get
> the topic you can explain its gist. In addition, it can be reflected
> to implementation (vs. the descriptions that are pseudocode in English)
Sure, I will replace the commit message with motivation+logic instead of
explaining the code step by step.
>
> Since this RFC and QEMU does not have the feature in release it is
> good to polish stuff like this.
>
> > ---
> > drivers/char/tpm/tpm_crb.c | 150 +++++++++++++++++++++++++++----------
> > 1 file changed, 109 insertions(+), 41 deletions(-)
> >
> > diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
> > index 10128d078245c..fb63cc3737253 100644
> > --- a/drivers/char/tpm/tpm_crb.c
> > +++ b/drivers/char/tpm/tpm_crb.c
> > @@ -104,11 +104,13 @@ struct crb_priv {
> > u8 __iomem *cmd;
> > u8 __iomem *rsp;
> > u32 cmd_size;
> > + u32 rsp_size;
> > u32 smc_func_id;
> > u32 __iomem *pluton_start_addr;
> > u32 __iomem *pluton_reply_addr;
> > u8 ffa_flags;
> > u8 ffa_attributes;
> > + bool chunking_supported;
> > };
> >
> > struct tpm2_crb_smc {
> > @@ -368,38 +370,6 @@ static u8 crb_status(struct tpm_chip *chip)
> > return sts;
> > }
> >
> > -static int crb_recv(struct tpm_chip *chip, u8 *buf, size_t count)
> > -{
> > - struct crb_priv *priv = dev_get_drvdata(&chip->dev);
> > - unsigned int expected;
> > -
> > - /* A sanity check that the upper layer wants to get at least the header
> > - * as that is the minimum size for any TPM response.
> > - */
> > - if (count < TPM_HEADER_SIZE)
> > - return -EIO;
> > -
> > - /* If this bit is set, according to the spec, the TPM is in
> > - * unrecoverable condition.
> > - */
> > - if (ioread32(&priv->regs_t->ctrl_sts) & CRB_CTRL_STS_ERROR)
> > - return -EIO;
> > -
> > - /* Read the first 8 bytes in order to get the length of the response.
> > - * We read exactly a quad word in order to make sure that the remaining
> > - * reads will be aligned.
> > - */
> > - memcpy_fromio(buf, priv->rsp, 8);
> > -
> > - expected = be32_to_cpup((__be32 *)&buf[2]);
> > - if (expected > count || expected < TPM_HEADER_SIZE)
> > - return -EIO;
> > -
> > - memcpy_fromio(&buf[8], &priv->rsp[8], expected - 8);
> > -
> > - return expected;
> > -}
> > -
> > static int crb_do_acpi_start(struct tpm_chip *chip)
> > {
> > union acpi_object *obj;
> > @@ -474,6 +444,8 @@ static int crb_trigger_tpm(struct tpm_chip *chip, u32
> > start_cmd)
> > static int crb_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, size_t
> > len)
> > {
> > struct crb_priv *priv = dev_get_drvdata(&chip->dev);
> > + size_t offset = 0;
> > + size_t chunk_size;
> > int rc = 0;
> >
> > /* Zero the cancel register so that the next command will not get
> > @@ -481,7 +453,7 @@ static int crb_send(struct tpm_chip *chip, u8 *buf,
> > size_t bufsiz, size_t len)
> > */
> > iowrite32(0, &priv->regs_t->ctrl_cancel);
> >
> > - if (len > priv->cmd_size) {
> > + if (len > priv->cmd_size && !priv->chunking_supported) {
> > dev_err(&chip->dev, "invalid command count value %zd %d\n",
> > len, priv->cmd_size);
> > return -E2BIG;
> > @@ -491,16 +463,101 @@ static int crb_send(struct tpm_chip *chip, u8 *buf,
> > size_t bufsiz, size_t len)
> > if (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON)
> > __crb_cmd_ready(&chip->dev, priv, chip->locality);
> >
> > - memcpy_toio(priv->cmd, buf, len);
> > + while (offset < len) {
> > + chunk_size = min_t(size_t, len - offset, priv->cmd_size);
> > +
> > + memcpy_toio(priv->cmd, buf + offset, chunk_size);
> > + offset += chunk_size;
> > +
> > + /* Make sure that cmd is populated before issuing start. */
> > + wmb();
> > + if (offset < len) {
> > + rc = crb_trigger_tpm(chip, CRB_START_NEXT_CHUNK);
> > + if (rc)
> > + return rc;
> > + if (!crb_wait_for_reg_32(&priv->regs_t->ctrl_start,
> > + CRB_START_NEXT_CHUNK, 0, TPM2_TIMEOUT_C)) {
> > + dev_err(&chip->dev,
> > + "Timeout waiting for backend to consume
> > chunk\n");
> > + return -ETIME;
> > + }
> > + } else {
> > + rc = crb_trigger_tpm(chip, CRB_START_INVOKE);
> > + if (rc)
> > + return rc;
> > + }
> > + }
> > + return crb_try_pluton_doorbell(priv, false);
> > +}
> >
> > - /* Make sure that cmd is populated before issuing start. */
> > - wmb();
> > +static int crb_recv(struct tpm_chip *chip, u8 *buf, size_t count)
> > +{
> > + struct crb_priv *priv = dev_get_drvdata(&chip->dev);
> > + unsigned int expected;
> > + size_t offset = 0;
> > + size_t chunk_size;
> > + size_t first_read;
> > + int rc;
> >
> > - rc = crb_trigger_tpm(chip, CRB_START_INVOKE);
> > - if (rc)
> > - return rc;
> > + /* A sanity check that the upper layer wants to get at least the header
> > + * as that is the minimum size for any TPM response.
> > + */
> > + if (count < TPM_HEADER_SIZE)
> > + return -EIO;
> >
> > - return crb_try_pluton_doorbell(priv, false);
> > + /* If this bit is set, according to the spec, the TPM is in
> > + * unrecoverable condition.
> > + */
> > + if (ioread32(&priv->regs_t->ctrl_sts) & CRB_CTRL_STS_ERROR)
> > + return -EIO;
> > +
> > + /* Read the first 8 bytes in order to get the length of the response.
> > + * We read exactly a quad word in order to make sure that the remaining
> > + * reads will be aligned.
> > + */
> > + memcpy_fromio(buf, priv->rsp, 8);
> > +
> > + expected = be32_to_cpup((__be32 *)&buf[2]);
> > + if (expected > count || expected < TPM_HEADER_SIZE)
> > + return -EIO;
> > +
> > + /*
> > + * Set chunk_size by comparing the size of the buffer that the upper
> > layer has
> > + * allocated (count) to the hardware tpm limit (priv->rsp_size).
> > + * This is to prevent buffer overflow while writing to buf.
> > + */
> > + chunk_size = min_t(size_t, count, priv->rsp_size);
> > +
> > + /*
> > + * Compare the actual size of the response we found in the header to
> > the chunk_size.
> > + */
> > + first_read = min_t(size_t, expected, chunk_size);
> > +
> > + memcpy_fromio(&buf[8], &priv->rsp[8], first_read - 8);
> > + offset = first_read;
> > +
> > + while (offset < expected) {
> > + if (!priv->chunking_supported) {
> > + dev_err(&chip->dev, "Response larger than MMIO and
> > chunking not supported\n");
> > + return -EIO;
> > + }
> > +
> > + rc = crb_trigger_tpm(chip, CRB_START_NEXT_CHUNK);
> > + if (rc)
> > + return rc;
> > +
> > + if (!crb_wait_for_reg_32(&priv->regs_t->ctrl_start,
> > + CRB_START_NEXT_CHUNK, 0,
> > TPM2_TIMEOUT_C)) {
> > + dev_err(&chip->dev, "Timeout waiting for backend
> > response\n");
> > + return -ETIME;
> > + }
> > +
> > + chunk_size = min_t(size_t, expected - offset, priv->rsp_size);
> > + memcpy_fromio(buf + offset, priv->rsp, chunk_size);
> > + offset += chunk_size;
> > + }
> > +
> > + return expected;
> > }
> >
> > static void crb_cancel(struct tpm_chip *chip)
> > @@ -727,6 +784,15 @@ static int crb_map_io(struct acpi_device *device,
> > struct crb_priv *priv,
> > goto out;
> > }
> >
> > + if (priv->regs_h) {
> > + u32 intf_id = ioread32((u32 __iomem *)&priv->regs_h->intf_id);
> > +
> > + if (intf_id & CRB_INTF_CAP_CRB_CHUNK) {
> > + priv->chunking_supported = true;
> > + dev_info(dev, "CRB Chunking is supported by backend\n");
> > + }
> > + }
> > +
> > memcpy_fromio(&__rsp_pa, &priv->regs_t->ctrl_rsp_pa, 8);
> > rsp_pa = le64_to_cpu(__rsp_pa);
> > rsp_size = ioread32(&priv->regs_t->ctrl_rsp_size);
> > @@ -764,8 +830,10 @@ static int crb_map_io(struct acpi_device *device,
> > struct crb_priv *priv,
> > priv->rsp = priv->cmd;
> >
> > out:
> > - if (!ret)
> > + if (!ret) {
> > priv->cmd_size = cmd_size;
> > + priv->rsp_size = rsp_size;
> > + }
> >
> > __crb_go_idle(dev, priv, 0);
> >
> > --
> > 2.53.0
> >
>
> BR, Jarkko
>
Regards,
Arun Menon