Share the SMCCC plumbing used by SEND_DIRECT and RUN via a common ffa_finish_direct_req_run() helper so canonical success and error cases are handled in one place.
The dispatcher now routes FFA_RUN through ffa_handle_run(), and direct requests bail out early if a guest targets itself or a non-secure endpoint. This simplifies the direct path and prepares the mediator for the wider v1.2 register ABI. Signed-off-by: Bertrand Marquis <[email protected]> --- xen/arch/arm/tee/ffa.c | 3 + xen/arch/arm/tee/ffa_msg.c | 111 ++++++++++++++++++++++++--------- xen/arch/arm/tee/ffa_private.h | 1 + 3 files changed, 84 insertions(+), 31 deletions(-) diff --git a/xen/arch/arm/tee/ffa.c b/xen/arch/arm/tee/ffa.c index 7392bb6c3db9..92cb6ad7ec97 100644 --- a/xen/arch/arm/tee/ffa.c +++ b/xen/arch/arm/tee/ffa.c @@ -347,6 +347,9 @@ static bool ffa_handle_call(struct cpu_user_regs *regs) case FFA_MSG_SEND_DIRECT_REQ_64: ffa_handle_msg_send_direct_req(regs, fid); return true; + case FFA_RUN: + ffa_handle_run(regs, fid); + return true; case FFA_MSG_SEND2: e = ffa_handle_msg_send2(regs); break; diff --git a/xen/arch/arm/tee/ffa_msg.c b/xen/arch/arm/tee/ffa_msg.c index dec429cbf160..8bb4bd93f724 100644 --- a/xen/arch/arm/tee/ffa_msg.c +++ b/xen/arch/arm/tee/ffa_msg.c @@ -21,42 +21,74 @@ struct ffa_part_msg_rxtx { uint32_t msg_size; }; -void ffa_handle_msg_send_direct_req(struct cpu_user_regs *regs, uint32_t fid) +static void ffa_finish_direct_req_run(struct cpu_user_regs *regs, + struct arm_smccc_1_2_regs *req) { - struct arm_smccc_1_2_regs arg = { .a0 = fid, }; struct arm_smccc_1_2_regs resp = { }; - struct domain *d = current->domain; - uint32_t src_dst; uint64_t mask; - if ( smccc_is_conv_64(fid) ) + arm_smccc_1_2_smc(req, &resp); + + switch ( resp.a0 ) + { + case FFA_ERROR: + case FFA_SUCCESS_32: + case FFA_SUCCESS_64: + case FFA_MSG_SEND_DIRECT_RESP_32: + case FFA_MSG_SEND_DIRECT_RESP_64: + case FFA_MSG_YIELD: + case FFA_INTERRUPT: + break; + default: + /* Bad fid, report back to the caller. */ + ffa_set_regs_error(regs, FFA_RET_ABORTED); + return; + } + + if ( smccc_is_conv_64(resp.a0) ) mask = GENMASK_ULL(63, 0); else mask = GENMASK_ULL(31, 0); + ffa_set_regs(regs, resp.a0, resp.a1 & mask, resp.a2 & mask, resp.a3 & mask, + resp.a4 & mask, resp.a5 & mask, resp.a6 & mask, + resp.a7 & mask); +} + +void ffa_handle_msg_send_direct_req(struct cpu_user_regs *regs, uint32_t fid) +{ + struct arm_smccc_1_2_regs arg = { .a0 = fid, }; + struct domain *d = current->domain; + uint32_t src_dst; + uint64_t mask; + int32_t ret; + if ( !ffa_fw_supports_fid(fid) ) { - resp.a0 = FFA_ERROR; - resp.a2 = FFA_RET_NOT_SUPPORTED; + ret = FFA_RET_NOT_SUPPORTED; goto out; } src_dst = get_user_reg(regs, 1); - if ( (src_dst >> 16) != ffa_get_vm_id(d) ) + if ( (src_dst >> 16) != ffa_get_vm_id(d) || + (src_dst & GENMASK(15,0)) == ffa_get_vm_id(d) ) { - resp.a0 = FFA_ERROR; - resp.a2 = FFA_RET_INVALID_PARAMETERS; + ret = FFA_RET_INVALID_PARAMETERS; goto out; } /* we do not support direct messages to VMs */ if ( !FFA_ID_IS_SECURE(src_dst & GENMASK(15,0)) ) { - resp.a0 = FFA_ERROR; - resp.a2 = FFA_RET_NOT_SUPPORTED; + ret = FFA_RET_NOT_SUPPORTED; goto out; } + if ( smccc_is_conv_64(fid) ) + mask = GENMASK_ULL(63, 0); + else + mask = GENMASK_ULL(31, 0); + arg.a1 = src_dst; arg.a2 = get_user_reg(regs, 2) & mask; arg.a3 = get_user_reg(regs, 3) & mask; @@ -65,27 +97,11 @@ void ffa_handle_msg_send_direct_req(struct cpu_user_regs *regs, uint32_t fid) arg.a6 = get_user_reg(regs, 6) & mask; arg.a7 = get_user_reg(regs, 7) & mask; - arm_smccc_1_2_smc(&arg, &resp); - switch ( resp.a0 ) - { - case FFA_ERROR: - case FFA_SUCCESS_32: - case FFA_SUCCESS_64: - case FFA_MSG_SEND_DIRECT_RESP_32: - case FFA_MSG_SEND_DIRECT_RESP_64: - break; - default: - /* Bad fid, report back to the caller. */ - memset(&resp, 0, sizeof(resp)); - resp.a0 = FFA_ERROR; - resp.a1 = src_dst; - resp.a2 = FFA_RET_ABORTED; - } + ffa_finish_direct_req_run(regs, &arg); + return; out: - ffa_set_regs(regs, resp.a0, resp.a1 & mask, resp.a2 & mask, resp.a3 & mask, - resp.a4 & mask, resp.a5 & mask, resp.a6 & mask, - resp.a7 & mask); + ffa_set_regs_error(regs, ret); } static int32_t ffa_msg_send2_vm(uint16_t dst_id, const void *src_buf, @@ -215,3 +231,36 @@ out: spin_unlock(&src_ctx->tx_lock); return ret; } + +void ffa_handle_run(struct cpu_user_regs *regs, uint32_t fid) +{ + struct arm_smccc_1_2_regs arg = { .a0 = fid, }; + uint32_t dst = get_user_reg(regs, 1); + int32_t ret; + + if ( !ffa_fw_supports_fid(fid) ) + { + ret = FFA_RET_NOT_SUPPORTED; + goto out; + } + + /* + * We do not support FFA_RUN to VMs. + * Destination endpoint ID is in bits [31:16], bits[15:0] contain the + * vCPU ID. + */ + if ( !FFA_ID_IS_SECURE(dst >> 16) ) + { + ret = FFA_RET_NOT_SUPPORTED; + goto out; + } + + arg.a1 = dst; + + ffa_finish_direct_req_run(regs, &arg); + + return; + +out: + ffa_set_regs_error(regs, ret); +} diff --git a/xen/arch/arm/tee/ffa_private.h b/xen/arch/arm/tee/ffa_private.h index c1dac09c75ca..a9a03c7c5d71 100644 --- a/xen/arch/arm/tee/ffa_private.h +++ b/xen/arch/arm/tee/ffa_private.h @@ -461,6 +461,7 @@ static inline void ffa_raise_rx_buffer_full(struct domain *d) void ffa_handle_msg_send_direct_req(struct cpu_user_regs *regs, uint32_t fid); int32_t ffa_handle_msg_send2(struct cpu_user_regs *regs); +void ffa_handle_run(struct cpu_user_regs *regs, uint32_t fid); #ifdef CONFIG_FFA_VM_TO_VM static inline uint16_t get_ffa_vm_count(void) -- 2.51.2
