In preparation for dynamic xstate buffer expansion, update the xstate
restore function parameters to equally handle static in-line xstate buffer,
as well as dynamically allocated xstate buffer.

No functional change.

Signed-off-by: Chang S. Bae <chang.seok....@intel.com>
Reviewed-by: Len Brown <len.br...@intel.com>
Cc: x...@kernel.org
Cc: linux-kernel@vger.kernel.org
Cc: k...@vger.kernel.org
---
Changes from v2:
* Updated the changelog with task->fpu removed. (Boris Petkov)
---
 arch/x86/include/asm/fpu/internal.h | 9 ++++++---
 arch/x86/kernel/fpu/core.c          | 4 ++--
 arch/x86/kernel/fpu/signal.c        | 3 +--
 arch/x86/kvm/x86.c                  | 2 +-
 4 files changed, 10 insertions(+), 8 deletions(-)

diff --git a/arch/x86/include/asm/fpu/internal.h 
b/arch/x86/include/asm/fpu/internal.h
index 0153c4d4ca77..37ea5e37f21c 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -397,8 +397,9 @@ static inline int copy_user_to_xregs(struct xregs_state 
__user *buf, u64 mask)
  * Restore xstate from kernel space xsave area, return an error code instead of
  * an exception.
  */
-static inline int copy_kernel_to_xregs_err(struct xregs_state *xstate, u64 
mask)
+static inline int copy_kernel_to_xregs_err(struct fpu *fpu, u64 mask)
 {
+       struct xregs_state *xstate = &fpu->state.xsave;
        u32 lmask = mask;
        u32 hmask = mask >> 32;
        int err;
@@ -425,8 +426,10 @@ static inline void __copy_kernel_to_fpregs(union 
fpregs_state *fpstate, u64 mask
        }
 }
 
-static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate)
+static inline void copy_kernel_to_fpregs(struct fpu *fpu)
 {
+       union fpregs_state *fpstate = &fpu->state;
+
        /*
         * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
         * pending. Clear the x87 state here by setting it to fixed values.
@@ -511,7 +514,7 @@ static inline void __fpregs_load_activate(void)
                return;
 
        if (!fpregs_state_valid(fpu, cpu)) {
-               copy_kernel_to_fpregs(&fpu->state);
+               copy_kernel_to_fpregs(fpu);
                fpregs_activate(fpu);
                fpu->last_cpu = cpu;
        }
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index f23e5ffbb307..20925cae2a84 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -172,7 +172,7 @@ void fpu__save(struct fpu *fpu)
 
        if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
                if (!copy_fpregs_to_fpstate(fpu)) {
-                       copy_kernel_to_fpregs(&fpu->state);
+                       copy_kernel_to_fpregs(fpu);
                }
        }
 
@@ -248,7 +248,7 @@ int fpu__copy(struct task_struct *dst, struct task_struct 
*src)
                memcpy(&dst_fpu->state, &src_fpu->state, 
fpu_kernel_xstate_size);
 
        else if (!copy_fpregs_to_fpstate(dst_fpu))
-               copy_kernel_to_fpregs(&dst_fpu->state);
+               copy_kernel_to_fpregs(dst_fpu);
 
        fpregs_unlock();
 
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index 0d6deb75c507..414a13427934 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -426,8 +426,7 @@ static int __fpu__restore_sig(void __user *buf, void __user 
*buf_fx, int size)
                 * Restore previously saved supervisor xstates along with
                 * copied-in user xstates.
                 */
-               ret = copy_kernel_to_xregs_err(&fpu->state.xsave,
-                                              user_xfeatures | 
xfeatures_mask_supervisor());
+               ret = copy_kernel_to_xregs_err(fpu, user_xfeatures | 
xfeatures_mask_supervisor());
 
        } else if (use_fxsr()) {
                ret = __copy_from_user(&fpu->state.fxsave, buf_fx, state_size);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 09368201d9cc..a087bbf252b6 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -9249,7 +9249,7 @@ static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
 
        kvm_save_current_fpu(vcpu->arch.guest_fpu);
 
-       copy_kernel_to_fpregs(&vcpu->arch.user_fpu->state);
+       copy_kernel_to_fpregs(vcpu->arch.user_fpu);
 
        fpregs_mark_activate();
        fpregs_unlock();
-- 
2.17.1

Reply via email to