Re: [PATCH 2/3] mm: Make list_lru_node::memcg_lrus RCU protected
Hello Kirill, On Tue, Aug 22, 2017 at 03:29:26PM +0300, Kirill Tkhai wrote: > The array list_lru_node::memcg_lrus::list_lru_one[] only grows, > and it never shrinks. The growths happens in memcg_update_list_lru_node(), > and old array's members remain the same after it. > > So, the access to the array's members may become RCU protected, > and it's possible to avoid using list_lru_node::lock to dereference it. > This will be used to get list's nr_items in next patch lockless. > > Signed-off-by: Kirill TkhaiThe patch looks very nice. A few really minor comments below. First, I don't think it's worth splitting this patch in three: patch #1 introduces a structure member that is only used in patch #2, while patch #2 adds RCU protection, but nobody benefits from it until patch #3 is applied. Since patches #1 and #3 are tiny, why don't you fold them in patch #2? > diff --git a/mm/list_lru.c b/mm/list_lru.c > @@ -42,24 +42,30 @@ static void list_lru_unregister(struct list_lru *lru) > #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) > static inline bool list_lru_memcg_aware(struct list_lru *lru) > { > + struct list_lru_memcg *memcg_lrus; > /* >* This needs node 0 to be always present, even >* in the systems supporting sparse numa ids. > + * > + * Here we only check the pointer is not NULL, > + * so RCU lock is not need. >*/ > - return !!lru->node[0].memcg_lrus; > + memcg_lrus = rcu_dereference_check(lru->node[0].memcg_lrus, true); > + return !!memcg_lrus; IIRC you don't need rcu_dereference() here, because you don't actually dereference anything. The compiler shouldn't complain if you leaved this as is. > } > > static inline struct list_lru_one * > list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx) > { > + struct list_lru_memcg *memcg_lrus; > /* > - * The lock protects the array of per cgroup lists from relocation > - * (see memcg_update_list_lru_node). > + * Either lock and RCU protects the array of per cgroup lists Typo: s/and/or/ > + * from relocation (see memcg_update_list_lru_node). >*/ > - lockdep_assert_held(>lock); > - if (nlru->memcg_lrus && idx >= 0) > - return nlru->memcg_lrus->lru[idx]; > - > + memcg_lrus = rcu_dereference_check(nlru->memcg_lrus, > +lockdep_is_held(>lock)); > + if (memcg_lrus && idx >= 0) > + return memcg_lrus->lru[idx]; > return >lru; > } > > @@ -76,9 +82,12 @@ static __always_inline struct mem_cgroup > *mem_cgroup_from_kmem(void *ptr) > static inline struct list_lru_one * > list_lru_from_kmem(struct list_lru_node *nlru, void *ptr) > { > + struct list_lru_memcg *memcg_lrus; > struct mem_cgroup *memcg; > > - if (!nlru->memcg_lrus) > + /* Here we only check the pointer is not NULL, so RCU lock isn't need */ > + memcg_lrus = rcu_dereference_check(nlru->memcg_lrus, true); > + if (!memcg_lrus) Again, rcu_dereference() is redundant. > return >lru; > > memcg = mem_cgroup_from_kmem(ptr); > @@ -323,25 +332,33 @@ static int __memcg_init_list_lru_node(struct > list_lru_memcg *memcg_lrus, > > static int memcg_init_list_lru_node(struct list_lru_node *nlru) > { > + struct list_lru_memcg *memcg_lrus; > int size = memcg_nr_cache_ids; > > - nlru->memcg_lrus = kmalloc(sizeof(struct list_lru_memcg) + > -size * sizeof(void *), GFP_KERNEL); > - if (!nlru->memcg_lrus) > + memcg_lrus = kmalloc(sizeof(*memcg_lrus) + > + size * sizeof(void *), GFP_KERNEL); > + if (!memcg_lrus) > return -ENOMEM; > > - if (__memcg_init_list_lru_node(nlru->memcg_lrus, 0, size)) { > - kfree(nlru->memcg_lrus); > + if (__memcg_init_list_lru_node(memcg_lrus, 0, size)) { > + kfree(memcg_lrus); > return -ENOMEM; > } > + rcu_assign_pointer(nlru->memcg_lrus, memcg_lrus); You don't need a memory barrier here, so RCU_INIT_POINTER() would fit better. > > return 0; > } > > static void memcg_destroy_list_lru_node(struct list_lru_node *nlru) > { > - __memcg_destroy_list_lru_node(nlru->memcg_lrus, 0, memcg_nr_cache_ids); > - kfree(nlru->memcg_lrus); > + struct list_lru_memcg *memcg_lrus; > + /* > + * This is called when shrinker has already been unregistered, > + * and nobody can use it. So, it's not need to use kfree_rcu(). Typo: s/it's not need/there's no need/ > + */ > + memcg_lrus = rcu_dereference_check(nlru->memcg_lrus, true); IIRC there's rcu_dereference_protected() for cases when you don't expect any changes to an __rcu variable. Let's use it instead of rcu_dereference_check() where appropriate. > + __memcg_destroy_list_lru_node(memcg_lrus, 0, memcg_nr_cache_ids); > + kfree(memcg_lrus); > } > > static int
Re: [PATCH 2/3] mm: Make list_lru_node::memcg_lrus RCU protected
Hello Kirill, On Tue, Aug 22, 2017 at 03:29:26PM +0300, Kirill Tkhai wrote: > The array list_lru_node::memcg_lrus::list_lru_one[] only grows, > and it never shrinks. The growths happens in memcg_update_list_lru_node(), > and old array's members remain the same after it. > > So, the access to the array's members may become RCU protected, > and it's possible to avoid using list_lru_node::lock to dereference it. > This will be used to get list's nr_items in next patch lockless. > > Signed-off-by: Kirill Tkhai The patch looks very nice. A few really minor comments below. First, I don't think it's worth splitting this patch in three: patch #1 introduces a structure member that is only used in patch #2, while patch #2 adds RCU protection, but nobody benefits from it until patch #3 is applied. Since patches #1 and #3 are tiny, why don't you fold them in patch #2? > diff --git a/mm/list_lru.c b/mm/list_lru.c > @@ -42,24 +42,30 @@ static void list_lru_unregister(struct list_lru *lru) > #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) > static inline bool list_lru_memcg_aware(struct list_lru *lru) > { > + struct list_lru_memcg *memcg_lrus; > /* >* This needs node 0 to be always present, even >* in the systems supporting sparse numa ids. > + * > + * Here we only check the pointer is not NULL, > + * so RCU lock is not need. >*/ > - return !!lru->node[0].memcg_lrus; > + memcg_lrus = rcu_dereference_check(lru->node[0].memcg_lrus, true); > + return !!memcg_lrus; IIRC you don't need rcu_dereference() here, because you don't actually dereference anything. The compiler shouldn't complain if you leaved this as is. > } > > static inline struct list_lru_one * > list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx) > { > + struct list_lru_memcg *memcg_lrus; > /* > - * The lock protects the array of per cgroup lists from relocation > - * (see memcg_update_list_lru_node). > + * Either lock and RCU protects the array of per cgroup lists Typo: s/and/or/ > + * from relocation (see memcg_update_list_lru_node). >*/ > - lockdep_assert_held(>lock); > - if (nlru->memcg_lrus && idx >= 0) > - return nlru->memcg_lrus->lru[idx]; > - > + memcg_lrus = rcu_dereference_check(nlru->memcg_lrus, > +lockdep_is_held(>lock)); > + if (memcg_lrus && idx >= 0) > + return memcg_lrus->lru[idx]; > return >lru; > } > > @@ -76,9 +82,12 @@ static __always_inline struct mem_cgroup > *mem_cgroup_from_kmem(void *ptr) > static inline struct list_lru_one * > list_lru_from_kmem(struct list_lru_node *nlru, void *ptr) > { > + struct list_lru_memcg *memcg_lrus; > struct mem_cgroup *memcg; > > - if (!nlru->memcg_lrus) > + /* Here we only check the pointer is not NULL, so RCU lock isn't need */ > + memcg_lrus = rcu_dereference_check(nlru->memcg_lrus, true); > + if (!memcg_lrus) Again, rcu_dereference() is redundant. > return >lru; > > memcg = mem_cgroup_from_kmem(ptr); > @@ -323,25 +332,33 @@ static int __memcg_init_list_lru_node(struct > list_lru_memcg *memcg_lrus, > > static int memcg_init_list_lru_node(struct list_lru_node *nlru) > { > + struct list_lru_memcg *memcg_lrus; > int size = memcg_nr_cache_ids; > > - nlru->memcg_lrus = kmalloc(sizeof(struct list_lru_memcg) + > -size * sizeof(void *), GFP_KERNEL); > - if (!nlru->memcg_lrus) > + memcg_lrus = kmalloc(sizeof(*memcg_lrus) + > + size * sizeof(void *), GFP_KERNEL); > + if (!memcg_lrus) > return -ENOMEM; > > - if (__memcg_init_list_lru_node(nlru->memcg_lrus, 0, size)) { > - kfree(nlru->memcg_lrus); > + if (__memcg_init_list_lru_node(memcg_lrus, 0, size)) { > + kfree(memcg_lrus); > return -ENOMEM; > } > + rcu_assign_pointer(nlru->memcg_lrus, memcg_lrus); You don't need a memory barrier here, so RCU_INIT_POINTER() would fit better. > > return 0; > } > > static void memcg_destroy_list_lru_node(struct list_lru_node *nlru) > { > - __memcg_destroy_list_lru_node(nlru->memcg_lrus, 0, memcg_nr_cache_ids); > - kfree(nlru->memcg_lrus); > + struct list_lru_memcg *memcg_lrus; > + /* > + * This is called when shrinker has already been unregistered, > + * and nobody can use it. So, it's not need to use kfree_rcu(). Typo: s/it's not need/there's no need/ > + */ > + memcg_lrus = rcu_dereference_check(nlru->memcg_lrus, true); IIRC there's rcu_dereference_protected() for cases when you don't expect any changes to an __rcu variable. Let's use it instead of rcu_dereference_check() where appropriate. > + __memcg_destroy_list_lru_node(memcg_lrus, 0, memcg_nr_cache_ids); > + kfree(memcg_lrus); > } > > static int memcg_update_list_lru_node(struct