In mana_hwc_rx_event_handler(), resp->response.hwc_msg_id is read from
DMA-coherent memory and bounds-checked, then mana_hwc_handle_resp()
re-reads the same field from the same DMA buffer for test_bit() and
pointer arithmetic.

DMA-coherent memory is mapped uncacheable on x86 and is shared,
unencrypted, in Confidential VMs (SEV-SNP/TDX), so each load goes
directly to host-visible memory. A H/W can modify the value
between the check and the use, bypassing the bounds validation.

Fix this by reading hwc_msg_id exactly once using READ_ONCE() into a
stack-local variable in mana_hwc_rx_event_handler(), and passing the
validated value as a parameter to mana_hwc_handle_resp().

Fixes: ca9c54d2d6a5 ("net: mana: Add a driver for Microsoft Azure Network 
Adapter (MANA)")
Signed-off-by: Erni Sri Satya Vennela <[email protected]>
---
 .../net/ethernet/microsoft/mana/hw_channel.c  | 23 +++++++++++--------
 1 file changed, 13 insertions(+), 10 deletions(-)

diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c 
b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index dbbde0fa57e7..fd8b324d7fb6 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -77,21 +77,19 @@ static int mana_hwc_post_rx_wqe(const struct hwc_wq 
*hwc_rxq,
 }
 
 static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len,
-                                struct hwc_work_request *rx_req)
+                                struct hwc_work_request *rx_req, u16 msg_id)
 {
        const struct gdma_resp_hdr *resp_msg = rx_req->buf_va;
        struct hwc_caller_ctx *ctx;
        int err;
 
-       if (!test_bit(resp_msg->response.hwc_msg_id,
-                     hwc->inflight_msg_res.map)) {
-               dev_err(hwc->dev, "hwc_rx: invalid msg_id = %u\n",
-                       resp_msg->response.hwc_msg_id);
+       if (!test_bit(msg_id, hwc->inflight_msg_res.map)) {
+               dev_err(hwc->dev, "hwc_rx: invalid msg_id = %u\n", msg_id);
                mana_hwc_post_rx_wqe(hwc->rxq, rx_req);
                return;
        }
 
-       ctx = hwc->caller_ctx + resp_msg->response.hwc_msg_id;
+       ctx = hwc->caller_ctx + msg_id;
        err = mana_hwc_verify_resp_msg(ctx, resp_msg, resp_len);
        if (err)
                goto out;
@@ -251,6 +249,7 @@ static void mana_hwc_rx_event_handler(void *ctx, u32 
gdma_rxq_id,
        struct gdma_sge *sge;
        u64 rq_base_addr;
        u64 rx_req_idx;
+       u16 msg_id;
        u8 *wqe;
 
        if (WARN_ON_ONCE(hwc_rxq->gdma_wq->id != gdma_rxq_id))
@@ -269,13 +268,17 @@ static void mana_hwc_rx_event_handler(void *ctx, u32 
gdma_rxq_id,
        rx_req = &hwc_rxq->msg_buf->reqs[rx_req_idx];
        resp = (struct gdma_resp_hdr *)rx_req->buf_va;
 
-       if (resp->response.hwc_msg_id >= hwc->num_inflight_msg) {
-               dev_err(hwc->dev, "HWC RX: wrong msg_id=%u\n",
-                       resp->response.hwc_msg_id);
+       /* Read msg_id once from DMA buffer to prevent TOCTOU:
+        * DMA memory is shared/unencrypted in CVMs - host can
+        * modify it between reads.
+        */
+       msg_id = READ_ONCE(resp->response.hwc_msg_id);
+       if (msg_id >= hwc->num_inflight_msg) {
+               dev_err(hwc->dev, "HWC RX: wrong msg_id=%u\n", msg_id);
                return;
        }
 
-       mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, rx_req);
+       mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, rx_req, msg_id);
 
        /* Can no longer use 'resp', because the buffer is posted to the HW
         * in mana_hwc_handle_resp() above.
-- 
2.34.1


Reply via email to