MISRA Rule 13.1: Initializer lists shall not contain persistent side
effects.

The violations occur because both the `GVA_INFO` and `TRACE_TIME` macro
expansions include expressions with persistent side effects introduced
via inline assembly.

In the case of `GVA_INFO`, the issue stems from the initializer list
containing a direct call to `current`, which evaluates to
`this_cpu(curr_vcpu)` and involves persistent side effects via the
`asm` statement. To resolve this, the side-effect-producing expression
is computed in a separate statement prior to the macro initialization:

    struct vcpu *current_vcpu = current;

The computed value is passed into the `GVA_INFO(current_vcpu)` macro,
ensuring that the initializer is clean and free of such side effects.

Similarly, the `TRACE_TIME` macro violates this rule when accessing
expressions like `current->vcpu_id` and `current->domain->domain_id`,
which also depend on `current` and inline assembly. To fix this, the
value of `current` is assigned to a temporary variable:

    struct vcpu *v = current;

This temporary variable is then used to access `domain_id` and `vcpu_id`.
This ensures that the arguments passed to the `TRACE_TIME` macro are
simple expressions free of persistent side effects.

Signed-off-by: Dmytro Prokopchuk <dmytro_prokopch...@epam.com>
---
Test CI pipeline:
https://gitlab.com/xen-project/people/dimaprkp4k/xen/-/pipelines/1959339335
---
 xen/arch/arm/guestcopy.c | 12 ++++++++----
 xen/common/sched/core.c  | 11 ++++++-----
 2 files changed, 14 insertions(+), 9 deletions(-)

diff --git a/xen/arch/arm/guestcopy.c b/xen/arch/arm/guestcopy.c
index 497e785ec4..f483908510 100644
--- a/xen/arch/arm/guestcopy.c
+++ b/xen/arch/arm/guestcopy.c
@@ -109,27 +109,31 @@ static unsigned long copy_guest(void *buf, uint64_t addr, 
unsigned int len,
 
 unsigned long raw_copy_to_guest(void *to, const void *from, unsigned int len)
 {
+    struct vcpu *current_vcpu = current;
     return copy_guest((void *)from, (vaddr_t)to, len,
-                      GVA_INFO(current), COPY_to_guest | COPY_linear);
+                      GVA_INFO(current_vcpu), COPY_to_guest | COPY_linear);
 }
 
 unsigned long raw_copy_to_guest_flush_dcache(void *to, const void *from,
                                              unsigned int len)
 {
-    return copy_guest((void *)from, (vaddr_t)to, len, GVA_INFO(current),
+    struct vcpu *current_vcpu = current;
+    return copy_guest((void *)from, (vaddr_t)to, len, GVA_INFO(current_vcpu),
                       COPY_to_guest | COPY_flush_dcache | COPY_linear);
 }
 
 unsigned long raw_clear_guest(void *to, unsigned int len)
 {
-    return copy_guest(NULL, (vaddr_t)to, len, GVA_INFO(current),
+    struct vcpu *current_vcpu = current;
+    return copy_guest(NULL, (vaddr_t)to, len, GVA_INFO(current_vcpu),
                       COPY_to_guest | COPY_linear);
 }
 
 unsigned long raw_copy_from_guest(void *to, const void __user *from,
                                   unsigned int len)
 {
-    return copy_guest(to, (vaddr_t)from, len, GVA_INFO(current),
+    struct vcpu *current_vcpu = current;
+    return copy_guest(to, (vaddr_t)from, len, GVA_INFO(current_vcpu),
                       COPY_from_guest | COPY_linear);
 }
 
diff --git a/xen/common/sched/core.c b/xen/common/sched/core.c
index 4c77ea4b8d..a2c53dca14 100644
--- a/xen/common/sched/core.c
+++ b/xen/common/sched/core.c
@@ -1514,7 +1514,7 @@ static long do_poll(const struct sched_poll *sched_poll)
 /* Voluntarily yield the processor for this allocation. */
 long vcpu_yield(void)
 {
-    struct vcpu * v=current;
+    struct vcpu *v = current;
     spinlock_t *lock;
 
     rcu_read_lock(&sched_res_rculock);
@@ -1527,7 +1527,7 @@ long vcpu_yield(void)
 
     SCHED_STAT_CRANK(vcpu_yield);
 
-    TRACE_TIME(TRC_SCHED_YIELD, current->domain->domain_id, current->vcpu_id);
+    TRACE_TIME(TRC_SCHED_YIELD, v->domain->domain_id, v->vcpu_id);
     raise_softirq(SCHEDULE_SOFTIRQ);
     return 0;
 }
@@ -1899,6 +1899,7 @@ typedef long ret_t;
 ret_t do_sched_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
 {
     ret_t ret = 0;
+    struct vcpu *v = current;
 
     switch ( cmd )
     {
@@ -1922,8 +1923,8 @@ ret_t do_sched_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) 
arg)
         if ( copy_from_guest(&sched_shutdown, arg, 1) )
             break;
 
-        TRACE_TIME(TRC_SCHED_SHUTDOWN, current->domain->domain_id,
-                   current->vcpu_id, sched_shutdown.reason);
+        TRACE_TIME(TRC_SCHED_SHUTDOWN, v->domain->domain_id,
+                   v->vcpu_id, sched_shutdown.reason);
         ret = domain_shutdown(current->domain, (u8)sched_shutdown.reason);
 
         break;
@@ -1938,7 +1939,7 @@ ret_t do_sched_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) 
arg)
         if ( copy_from_guest(&sched_shutdown, arg, 1) )
             break;
 
-        TRACE_TIME(TRC_SCHED_SHUTDOWN_CODE, d->domain_id, current->vcpu_id,
+        TRACE_TIME(TRC_SCHED_SHUTDOWN_CODE, d->domain_id, v->vcpu_id,
                    sched_shutdown.reason);
 
         spin_lock(&d->shutdown_lock);
-- 
2.43.0

Reply via email to