#syz test
diff --git a/Documentation/dev-tools/kmemleak.rst
b/Documentation/dev-tools/kmemleak.rst
index 7d784e03f3f9..da2c849d4735 100644
--- a/Documentation/dev-tools/kmemleak.rst
+++ b/Documentation/dev-tools/kmemleak.rst
@@ -163,6 +163,7 @@ See the include/linux/kmemleak.h header for the functions
prototype.
- ``kmemleak_not_leak`` - mark an object as not a leak
- ``kmemleak_transient_leak`` - mark an object as a transient leak
- ``kmemleak_ignore`` - do not scan or report an object as leak
+- ``kmemleak_unignore`` - undo a previous kmemleak_ignore()
- ``kmemleak_scan_area`` - add scan areas inside a memory block
- ``kmemleak_no_scan`` - do not scan a memory block
- ``kmemleak_erase`` - erase an old value in a pointer variable
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
index fbd424b2abb1..4eec0560be09 100644
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -28,6 +28,7 @@ extern void kmemleak_update_trace(const void *ptr) __ref;
extern void kmemleak_not_leak(const void *ptr) __ref;
extern void kmemleak_transient_leak(const void *ptr) __ref;
extern void kmemleak_ignore(const void *ptr) __ref;
+extern void kmemleak_unignore(const void *ptr, int min_count) __ref;
extern void kmemleak_ignore_percpu(const void __percpu *ptr) __ref;
extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref;
extern void kmemleak_no_scan(const void *ptr) __ref;
@@ -104,6 +105,10 @@ static inline void kmemleak_ignore_percpu(const void
__percpu *ptr)
static inline void kmemleak_ignore(const void *ptr)
{
}
+
+static inline void kmemleak_unignore(const void *ptr, int min_count)
+{
+}
static inline void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
{
}
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index d79acf5c5100..871e20ba3d7b 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -909,6 +909,8 @@ static void __paint_it(struct kmemleak_object *object, int
color)
object->min_count = color;
if (color == KMEMLEAK_BLACK)
object->flags |= OBJECT_NO_SCAN;
+ else
+ object->flags &= ~OBJECT_NO_SCAN;
}
static void paint_it(struct kmemleak_object *object, int color)
@@ -1292,6 +1294,24 @@ void __ref kmemleak_ignore(const void *ptr)
}
EXPORT_SYMBOL(kmemleak_ignore);
+/**
+ * kmemleak_unignore - undo a previous kmemleak_ignore() on an object
+ * @ptr: pointer to beginning of the object
+ * @min_count: minimum number of references the object must have to be
+ * considered a non-leak (see kmemleak_alloc() for details)
+ *
+ * Calling this function undoes a prior kmemleak_ignore() by restoring the
+ * given min_count, making the object visible to kmemleak again.
+ */
+void __ref kmemleak_unignore(const void *ptr, int min_count)
+{
+ pr_debug("%s(0x%px)\n", __func__, ptr);
+
+ if (kmemleak_enabled && ptr && !IS_ERR(ptr))
+ paint_ptr((unsigned long)ptr, min_count, 0);
+}
+EXPORT_SYMBOL(kmemleak_unignore);
+
/**
* kmemleak_scan_area - limit the range to be scanned in an allocated object
* @ptr: pointer to beginning or inside the object. This also
diff --git a/mm/slab_common.c b/mm/slab_common.c
index d5a70a831a2a..73f4668d870d 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1954,8 +1954,14 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr)
if (!head)
might_sleep();
- if (!IS_ENABLED(CONFIG_PREEMPT_RT) && kfree_rcu_sheaf(ptr))
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT) && kfree_rcu_sheaf(ptr)) {
+ /*
+ * The object is now queued for deferred freeing via an RCU
+ * sheaf. Tell kmemleak to ignore it.
+ */
+ kmemleak_ignore(ptr);
return;
+ }
// Queue the object but don't yet schedule the batch.
if (debug_rcu_head_queue(ptr)) {
diff --git a/mm/slub.c b/mm/slub.c
index 20cb4f3b636d..3bfe113ae326 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3014,8 +3014,10 @@ static void pcs_flush_all(struct kmem_cache *s)
free_empty_sheaf(s, spare);
}
- if (rcu_free)
+ if (rcu_free) {
+ kmemleak_ignore(rcu_free);
call_rcu(&rcu_free->rcu_head, rcu_free_sheaf_nobarn);
+ }
sheaf_flush_main(s);
}
@@ -3035,6 +3037,7 @@ static void __pcs_flush_all_cpu(struct kmem_cache *s,
unsigned int cpu)
}
if (pcs->rcu_free) {
+ kmemleak_ignore(pcs->rcu_free);
call_rcu(&pcs->rcu_free->rcu_head, rcu_free_sheaf_nobarn);
pcs->rcu_free = NULL;
}
@@ -4031,8 +4034,10 @@ static void flush_rcu_sheaf(struct work_struct *w)
local_unlock(&s->cpu_sheaves->lock);
- if (rcu_free)
+ if (rcu_free) {
+ kmemleak_ignore(rcu_free);
call_rcu(&rcu_free->rcu_head, rcu_free_sheaf_nobarn);
+ }
}
@@ -5832,6 +5837,7 @@ static void rcu_free_sheaf(struct rcu_head *head)
if (data_race(barn->nr_full) < MAX_FULL_SHEAVES) {
stat(s, BARN_PUT);
+ kmemleak_unignore(sheaf, 1);
barn_put_full_sheaf(barn, sheaf);
return;
}
@@ -5842,6 +5848,7 @@ static void rcu_free_sheaf(struct rcu_head *head)
empty:
if (barn && data_race(barn->nr_empty) < MAX_EMPTY_SHEAVES) {
+ kmemleak_unignore(sheaf, 1);
barn_put_empty_sheaf(barn, sheaf);
return;
}
@@ -5948,8 +5955,10 @@ bool __kfree_rcu_sheaf(struct kmem_cache *s, void *obj)
* we flush before local_unlock to make sure a racing
* flush_all_rcu_sheaves() doesn't miss this sheaf
*/
- if (rcu_sheaf)
+ if (rcu_sheaf) {
+ kmemleak_ignore(rcu_sheaf);
call_rcu(&rcu_sheaf->rcu_head, rcu_free_sheaf);
+ }
local_unlock(&s->cpu_sheaves->lock);
@@ -7538,6 +7547,7 @@ static void early_kmem_cache_node_alloc(int node)
slab->freelist = get_freepointer(kmem_cache_node, n);
slab->inuse = 1;
kmem_cache_node->node[node] = n;
+ kmemleak_alloc(n, kmem_cache_node->size, 1, GFP_NOWAIT);
init_kmem_cache_node(n, NULL);
inc_slabs_node(kmem_cache_node, node, slab->objects);
base-commit: c23719abc3308df7ed3ad35650ad211fb2d2003d
--
2.43.0
_______________________________________________
Linux-f2fs-devel mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel