On 8/27/20 11:38 AM, Catalin Marinas wrote:
> On Fri, Aug 14, 2020 at 07:27:06PM +0200, Andrey Konovalov wrote:
>> diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
>> index cde127508e38..a17fefb0571b 100644
>> --- a/arch/arm64/kernel/entry.S
>> +++ b/arch/arm64/kernel/entry.S
>> @@ -172,6 +172,29 @@ alternative_else_nop_endif
>>  #endif
>>      .endm
>>  
>> +    /* Note: tmp should always be a callee-saved register */
> 
> Why callee-saved? Do you preserve it anywhere here?
>

Aargh, this is an old comment, I forgot to remove it after the last refactor.
Thank you for pointing this out.

>> +    .macro mte_restore_gcr, el, tsk, tmp, tmp2
>> +#ifdef CONFIG_ARM64_MTE
>> +alternative_if_not ARM64_MTE
>> +    b       1f
>> +alternative_else_nop_endif
>> +    .if     \el == 0
>> +    ldr     \tmp, [\tsk, #THREAD_GCR_EL1_USER]
>> +    .else
>> +    ldr_l   \tmp, gcr_kernel_excl
>> +    .endif
>> +    /*
>> +     * Calculate and set the exclude mask preserving
>> +     * the RRND (bit[16]) setting.
>> +     */
>> +    mrs_s   \tmp2, SYS_GCR_EL1
>> +    bfi     \tmp2, \tmp, #0, #16
>> +    msr_s   SYS_GCR_EL1, \tmp2
>> +    isb
>> +1:
>> +#endif
>> +    .endm
>> +
>>      .macro  kernel_entry, el, regsize = 64
>>      .if     \regsize == 32
>>      mov     w0, w0                          // zero upper 32 bits of x0
>> @@ -209,6 +232,8 @@ alternative_else_nop_endif
>>  
>>      ptrauth_keys_install_kernel tsk, x20, x22, x23
>>  
>> +    mte_restore_gcr 1, tsk, x22, x23
>> +
>>      scs_load tsk, x20
>>      .else
>>      add     x21, sp, #S_FRAME_SIZE
>> @@ -386,6 +411,8 @@ alternative_else_nop_endif
>>      /* No kernel C function calls after this as user keys are set. */
>>      ptrauth_keys_install_user tsk, x0, x1, x2
>>  
>> +    mte_restore_gcr 0, tsk, x0, x1
>> +
>>      apply_ssbd 0, x0, x1
>>      .endif
>>  
>> @@ -957,6 +984,7 @@ SYM_FUNC_START(cpu_switch_to)
>>      mov     sp, x9
>>      msr     sp_el0, x1
>>      ptrauth_keys_install_kernel x1, x8, x9, x10
>> +    mte_restore_gcr 1, x1, x8, x9
>>      scs_save x0, x8
>>      scs_load x1, x8
>>      ret
> 
> Since we set GCR_EL1 on exception entry and return, why is this needed?
> We don't have a per-kernel thread GCR_EL1, it's global to all threads,
> so I think cpu_switch_to() should not be touched.
> 

I agree, we can remove it. We only require the kernel entry and the kernel exit
ones.

>> diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
>> index 7717ea9bc2a7..cfac7d02f032 100644
>> --- a/arch/arm64/kernel/mte.c
>> +++ b/arch/arm64/kernel/mte.c
>> @@ -18,10 +18,14 @@
>>  
>>  #include <asm/barrier.h>
>>  #include <asm/cpufeature.h>
>> +#include <asm/kasan.h>
>> +#include <asm/kprobes.h>
>>  #include <asm/mte.h>
>>  #include <asm/ptrace.h>
>>  #include <asm/sysreg.h>
>>  
>> +u64 gcr_kernel_excl __read_mostly;
> 
> Could we make this __ro_after_init?
>

Yes, it makes sense, it should be updated only once through mte_init_tags().

Something to consider though here is that this might not be the right approach
if in future we want to add stack tagging. In such a case we need to know the
kernel exclude mask before any C code is executed. Initializing the mask via
mte_init_tags() it is too late.

I was thinking to add a compilation define instead of having gcr_kernel_excl in
place. This might not work if the kernel excl mask is meant to change during the
execution.

Thoughts?

>> +
>>  static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool 
>> check_swap)
>>  {
>>      pte_t old_pte = READ_ONCE(*ptep);
>> @@ -115,6 +119,13 @@ void * __must_check mte_set_mem_tag_range(void *addr, 
>> size_t size, u8 tag)
>>      return ptr;
>>  }
>>  
>> +void mte_init_tags(u64 max_tag)
>> +{
>> +    u64 incl = ((1ULL << ((max_tag & MTE_TAG_MAX) + 1)) - 1);
> 
> I'd rather use GENMASK here, it is more readable.
> 

Agree, we can change it.

-- 
Regards,
Vincenzo

Reply via email to