Now there's no any place where kasan_arch_is_ready() is needed, remove
all its invocations.

Signed-off-by: Baoquan He <b...@redhat.com>
---
 mm/kasan/common.c  |  9 +++------
 mm/kasan/generic.c |  9 ---------
 mm/kasan/kasan.h   |  6 ------
 mm/kasan/shadow.c  | 18 ------------------
 4 files changed, 3 insertions(+), 39 deletions(-)

diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 69a848f2a8aa..e48c1fd60edf 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -271,7 +271,7 @@ static inline void poison_slab_object(struct kmem_cache 
*cache, void *object,
 bool __kasan_slab_pre_free(struct kmem_cache *cache, void *object,
                                unsigned long ip)
 {
-       if (!kasan_arch_is_ready() || is_kfence_address(object))
+       if (is_kfence_address(object))
                return false;
        return check_slab_allocation(cache, object, ip);
 }
@@ -279,7 +279,7 @@ bool __kasan_slab_pre_free(struct kmem_cache *cache, void 
*object,
 bool __kasan_slab_free(struct kmem_cache *cache, void *object, bool init,
                       bool still_accessible)
 {
-       if (!kasan_arch_is_ready() || is_kfence_address(object))
+       if (is_kfence_address(object))
                return false;
 
        /*
@@ -318,9 +318,6 @@ bool __kasan_slab_free(struct kmem_cache *cache, void 
*object, bool init,
 
 static inline bool check_page_allocation(void *ptr, unsigned long ip)
 {
-       if (!kasan_arch_is_ready())
-               return false;
-
        if (ptr != page_address(virt_to_head_page(ptr))) {
                kasan_report_invalid_free(ptr, ip, KASAN_REPORT_INVALID_FREE);
                return true;
@@ -547,7 +544,7 @@ bool __kasan_mempool_poison_object(void *ptr, unsigned long 
ip)
                return true;
        }
 
-       if (is_kfence_address(ptr) || !kasan_arch_is_ready())
+       if (is_kfence_address(ptr))
                return true;
 
        slab = folio_slab(folio);
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index 8daea5892754..d513e3e2e136 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -168,9 +168,6 @@ static __always_inline bool check_region_inline(const void 
*addr,
        if (!kasan_enabled())
                return true;
 
-       if (!kasan_arch_is_ready())
-               return true;
-
        if (unlikely(size == 0))
                return true;
 
@@ -196,9 +193,6 @@ bool kasan_byte_accessible(const void *addr)
 {
        s8 shadow_byte;
 
-       if (!kasan_arch_is_ready())
-               return true;
-
        shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr));
 
        return shadow_byte >= 0 && shadow_byte < KASAN_GRANULE_SIZE;
@@ -505,9 +499,6 @@ static void release_alloc_meta(struct kasan_alloc_meta 
*meta)
 
 static void release_free_meta(const void *object, struct kasan_free_meta *meta)
 {
-       if (!kasan_arch_is_ready())
-               return;
-
        /* Check if free meta is valid. */
        if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_SLAB_FREE_META)
                return;
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 129178be5e64..e0ffc16495d7 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -544,12 +544,6 @@ static inline void kasan_poison_last_granule(const void 
*address, size_t size) {
 
 #endif /* CONFIG_KASAN_GENERIC */
 
-#ifndef kasan_arch_is_ready
-static inline bool kasan_arch_is_ready(void)   { return true; }
-#elif !defined(CONFIG_KASAN_GENERIC) || !defined(CONFIG_KASAN_OUTLINE)
-#error kasan_arch_is_ready only works in KASAN generic outline mode!
-#endif
-
 #if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
 
 void kasan_kunit_test_suite_start(void);
diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
index 637f2d02d2a3..d8b975282b22 100644
--- a/mm/kasan/shadow.c
+++ b/mm/kasan/shadow.c
@@ -128,9 +128,6 @@ void kasan_poison(const void *addr, size_t size, u8 value, 
bool init)
        if (!kasan_enabled())
                return;
 
-       if (!kasan_arch_is_ready())
-               return;
-
        /*
         * Perform shadow offset calculation based on untagged address, as
         * some of the callers (e.g. kasan_poison_new_object) pass tagged
@@ -156,9 +153,6 @@ void kasan_poison_last_granule(const void *addr, size_t 
size)
        if (!kasan_enabled())
                return;
 
-       if (!kasan_arch_is_ready())
-               return;
-
        if (size & KASAN_GRANULE_MASK) {
                u8 *shadow = (u8 *)kasan_mem_to_shadow(addr + size);
                *shadow = size & KASAN_GRANULE_MASK;
@@ -402,9 +396,6 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned 
long size)
        if (!kasan_enabled())
                return 0;
 
-       if (!kasan_arch_is_ready())
-               return 0;
-
        if (!is_vmalloc_or_module_addr((void *)addr))
                return 0;
 
@@ -575,9 +566,6 @@ void kasan_release_vmalloc(unsigned long start, unsigned 
long end,
        if (!kasan_enabled())
                return;
 
-       if (!kasan_arch_is_ready())
-               return;
-
        region_start = ALIGN(start, KASAN_MEMORY_PER_SHADOW_PAGE);
        region_end = ALIGN_DOWN(end, KASAN_MEMORY_PER_SHADOW_PAGE);
 
@@ -626,9 +614,6 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned 
long size,
         * with setting memory tags, so the KASAN_VMALLOC_INIT flag is ignored.
         */
 
-       if (!kasan_arch_is_ready())
-               return (void *)start;
-
        if (!is_vmalloc_or_module_addr(start))
                return (void *)start;
 
@@ -651,9 +636,6 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned 
long size,
  */
 void __kasan_poison_vmalloc(const void *start, unsigned long size)
 {
-       if (!kasan_arch_is_ready())
-               return;
-
        if (!is_vmalloc_or_module_addr(start))
                return;
 
-- 
2.41.0


Reply via email to