Re: [PATCH bpf-next v8 1/7] A purely mechanical change to split the renaming from the actual generalization.

2020-08-18 Thread KP Singh



On 8/18/20 1:56 AM, Martin KaFai Lau wrote:
> On Mon, Aug 03, 2020 at 06:46:49PM +0200, KP Singh wrote:
>> From: KP Singh 
>>
>> Flags/consts:
>>
>>   SK_STORAGE_CREATE_FLAG_MASKBPF_LOCAL_STORAGE_CREATE_FLAG_MASK
>>   BPF_SK_STORAGE_CACHE_SIZE  BPF_LOCAL_STORAGE_CACHE_SIZE
>>   MAX_VALUE_SIZE BPF_LOCAL_STORAGE_MAX_VALUE_SIZE
>>
>> Structs:
>>
>>   bucket bpf_local_storage_map_bucket
>>   bpf_sk_storage_map bpf_local_storage_map
>>   bpf_sk_storage_databpf_local_storage_data
>>   bpf_sk_storage_elembpf_local_storage_elem
>>   bpf_sk_storage bpf_local_storage
>>
>> The "sk" member in bpf_local_storage is also updated to "owner"
>> in preparation for changing the type to void * in a subsequent patch.
>>
>> Functions:
>>
>>   selem_linked_to_sk selem_linked_to_storage
>>   selem_allocbpf_selem_alloc
>>   __selem_unlink_sk  bpf_selem_unlink_storage
>>   __selem_link_skbpf_selem_link_storage
>>   selem_unlink_sk__bpf_selem_unlink_storage
>>   sk_storage_update  bpf_local_storage_update
>>   __sk_storage_lookupbpf_local_storage_lookup
>>   bpf_sk_storage_map_freebpf_local_storage_map_free
>>   bpf_sk_storage_map_alloc   bpf_local_storage_map_alloc
>>   bpf_sk_storage_map_alloc_check bpf_local_storage_map_alloc_check
>>   bpf_sk_storage_map_check_btf   bpf_local_storage_map_check_btf
>>
> 
> [ ... ]
> 
>> @@ -147,85 +148,86 @@ static struct bpf_sk_storage_elem *selem_alloc(struct 
>> bpf_sk_storage_map *smap,
>>   * The caller must ensure selem->smap is still valid to be
>>   * dereferenced for its smap->elem_size and smap->cache_idx.
>>   */
>> -static bool __selem_unlink_sk(struct bpf_sk_storage *sk_storage,
>> -  struct bpf_sk_storage_elem *selem,
>> -  bool uncharge_omem)
>> +static bool bpf_selem_unlink_storage(struct bpf_local_storage 
>> *local_storage,
>> + struct bpf_local_storage_elem *selem,
>> + bool uncharge_omem)
> Please add a "_nolock()" suffix, just to be clear that the unlink_map()
> counter part is locked.  It could be a follow up later.

Done.

> 
>>  {
>> -struct bpf_sk_storage_map *smap;
>> -bool free_sk_storage;
>> +struct bpf_local_storage_map *smap;
>> +bool free_local_storage;

[...]

>> +if (unlikely(!selem_linked_to_storage(selem)))
>>  /* selem has already been unlinked from sk */
>>  return;
>>  
>> -sk_storage = rcu_dereference(selem->sk_storage);
>> -raw_spin_lock_bh(_storage->lock);
>> -if (likely(selem_linked_to_sk(selem)))
>> -free_sk_storage = __selem_unlink_sk(sk_storage, selem, true);
>> -raw_spin_unlock_bh(_storage->lock);
>> +local_storage = rcu_dereference(selem->local_storage);
>> +raw_spin_lock_bh(_storage->lock);
>> +if (likely(selem_linked_to_storage(selem)))
>> +free_local_storage =
>> +bpf_selem_unlink_storage(local_storage, selem, true);
>> +raw_spin_unlock_bh(_storage->lock);
>>  
>> -if (free_sk_storage)
>> -kfree_rcu(sk_storage, rcu);
>> +if (free_local_storage)
>> +kfree_rcu(local_storage, rcu);
>>  }
>>  
>> -static void __selem_link_sk(struct bpf_sk_storage *sk_storage,
>> -struct bpf_sk_storage_elem *selem)
>> +static void bpf_selem_link_storage(struct bpf_local_storage *local_storage,
>> +   struct bpf_local_storage_elem *selem)
> Same here. bpf_selem_link_storage"_nolock"().

Done.

> 
> Please tag the Subject line with "bpf:".

Done. Changed it to:

bpf: Renames in preparation for bpf_local_storage
    
A purely mechanical change to split the renaming from the actual
generalization.

[...]

> 
> Acked-by: Martin KaFai Lau 
> 


Re: [PATCH bpf-next v8 1/7] A purely mechanical change to split the renaming from the actual generalization.

2020-08-17 Thread Martin KaFai Lau
On Mon, Aug 03, 2020 at 06:46:49PM +0200, KP Singh wrote:
> From: KP Singh 
> 
> Flags/consts:
> 
>   SK_STORAGE_CREATE_FLAG_MASK BPF_LOCAL_STORAGE_CREATE_FLAG_MASK
>   BPF_SK_STORAGE_CACHE_SIZE   BPF_LOCAL_STORAGE_CACHE_SIZE
>   MAX_VALUE_SIZE  BPF_LOCAL_STORAGE_MAX_VALUE_SIZE
> 
> Structs:
> 
>   bucket  bpf_local_storage_map_bucket
>   bpf_sk_storage_map  bpf_local_storage_map
>   bpf_sk_storage_data bpf_local_storage_data
>   bpf_sk_storage_elem bpf_local_storage_elem
>   bpf_sk_storage  bpf_local_storage
> 
> The "sk" member in bpf_local_storage is also updated to "owner"
> in preparation for changing the type to void * in a subsequent patch.
> 
> Functions:
> 
>   selem_linked_to_sk  selem_linked_to_storage
>   selem_alloc bpf_selem_alloc
>   __selem_unlink_sk   bpf_selem_unlink_storage
>   __selem_link_sk bpf_selem_link_storage
>   selem_unlink_sk __bpf_selem_unlink_storage
>   sk_storage_update   bpf_local_storage_update
>   __sk_storage_lookup bpf_local_storage_lookup
>   bpf_sk_storage_map_free bpf_local_storage_map_free
>   bpf_sk_storage_map_allocbpf_local_storage_map_alloc
>   bpf_sk_storage_map_alloc_check  bpf_local_storage_map_alloc_check
>   bpf_sk_storage_map_check_btfbpf_local_storage_map_check_btf
> 

[ ... ]

> @@ -147,85 +148,86 @@ static struct bpf_sk_storage_elem *selem_alloc(struct 
> bpf_sk_storage_map *smap,
>   * The caller must ensure selem->smap is still valid to be
>   * dereferenced for its smap->elem_size and smap->cache_idx.
>   */
> -static bool __selem_unlink_sk(struct bpf_sk_storage *sk_storage,
> -   struct bpf_sk_storage_elem *selem,
> -   bool uncharge_omem)
> +static bool bpf_selem_unlink_storage(struct bpf_local_storage *local_storage,
> +  struct bpf_local_storage_elem *selem,
> +  bool uncharge_omem)
Please add a "_nolock()" suffix, just to be clear that the unlink_map()
counter part is locked.  It could be a follow up later.

>  {
> - struct bpf_sk_storage_map *smap;
> - bool free_sk_storage;
> + struct bpf_local_storage_map *smap;
> + bool free_local_storage;
>   struct sock *sk;
>  
>   smap = rcu_dereference(SDATA(selem)->smap);
> - sk = sk_storage->sk;
> + sk = local_storage->owner;
>  
>   /* All uncharging on sk->sk_omem_alloc must be done first.
> -  * sk may be freed once the last selem is unlinked from sk_storage.
> +  * sk may be freed once the last selem is unlinked from local_storage.
>*/
>   if (uncharge_omem)
>   atomic_sub(smap->elem_size, >sk_omem_alloc);
>  
> - free_sk_storage = hlist_is_singular_node(>snode,
> -  _storage->list);
> - if (free_sk_storage) {
> - atomic_sub(sizeof(struct bpf_sk_storage), >sk_omem_alloc);
> - sk_storage->sk = NULL;
> + free_local_storage = hlist_is_singular_node(>snode,
> + _storage->list);
> + if (free_local_storage) {
> + atomic_sub(sizeof(struct bpf_local_storage), 
> >sk_omem_alloc);
> + local_storage->owner = NULL;
>   /* After this RCU_INIT, sk may be freed and cannot be used */
>   RCU_INIT_POINTER(sk->sk_bpf_storage, NULL);
>  
> - /* sk_storage is not freed now.  sk_storage->lock is
> -  * still held and raw_spin_unlock_bh(_storage->lock)
> + /* local_storage is not freed now.  local_storage->lock is
> +  * still held and raw_spin_unlock_bh(_storage->lock)
>* will be done by the caller.
>*
>* Although the unlock will be done under
>* rcu_read_lock(),  it is more intutivie to
> -  * read if kfree_rcu(sk_storage, rcu) is done
> -  * after the raw_spin_unlock_bh(_storage->lock).
> +  * read if kfree_rcu(local_storage, rcu) is done
> +  * after the raw_spin_unlock_bh(_storage->lock).
>*
> -  * Hence, a "bool free_sk_storage" is returned
> +  * Hence, a "bool free_local_storage" is returned
>* to the caller which then calls the kfree_rcu()
>* after unlock.
>*/
>   }
>   hlist_del_init_rcu(>snode);
> - if (rcu_access_pointer(sk_storage->cache[smap->cache_idx]) ==
> + if (rcu_access_pointer(local_storage->cache[smap->cache_idx]) ==
>   SDATA(selem))
> - RCU_INIT_POINTER(sk_storage->cache[smap->cache_idx], NULL);
> + RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL);
>  
>   kfree_rcu(selem, rcu);
>  
> - return 

[PATCH bpf-next v8 1/7] A purely mechanical change to split the renaming from the actual generalization.

2020-08-03 Thread KP Singh
From: KP Singh 

Flags/consts:

  SK_STORAGE_CREATE_FLAG_MASK   BPF_LOCAL_STORAGE_CREATE_FLAG_MASK
  BPF_SK_STORAGE_CACHE_SIZE BPF_LOCAL_STORAGE_CACHE_SIZE
  MAX_VALUE_SIZEBPF_LOCAL_STORAGE_MAX_VALUE_SIZE

Structs:

  bucketbpf_local_storage_map_bucket
  bpf_sk_storage_mapbpf_local_storage_map
  bpf_sk_storage_data   bpf_local_storage_data
  bpf_sk_storage_elem   bpf_local_storage_elem
  bpf_sk_storagebpf_local_storage

The "sk" member in bpf_local_storage is also updated to "owner"
in preparation for changing the type to void * in a subsequent patch.

Functions:

  selem_linked_to_skselem_linked_to_storage
  selem_alloc   bpf_selem_alloc
  __selem_unlink_sk bpf_selem_unlink_storage
  __selem_link_sk   bpf_selem_link_storage
  selem_unlink_sk   __bpf_selem_unlink_storage
  sk_storage_update bpf_local_storage_update
  __sk_storage_lookup   bpf_local_storage_lookup
  bpf_sk_storage_map_free   bpf_local_storage_map_free
  bpf_sk_storage_map_alloc  bpf_local_storage_map_alloc
  bpf_sk_storage_map_alloc_checkbpf_local_storage_map_alloc_check
  bpf_sk_storage_map_check_btf  bpf_local_storage_map_check_btf

Signed-off-by: KP Singh 
---
 include/net/sock.h|   4 +-
 net/core/bpf_sk_storage.c | 455 +++---
 2 files changed, 233 insertions(+), 226 deletions(-)

diff --git a/include/net/sock.h b/include/net/sock.h
index 2cc3ba667908..685aee71b91a 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -246,7 +246,7 @@ struct sock_common {
/* public: */
 };
 
-struct bpf_sk_storage;
+struct bpf_local_storage;
 
 /**
   *struct sock - network layer representation of sockets
@@ -517,7 +517,7 @@ struct sock {
void(*sk_destruct)(struct sock *sk);
struct sock_reuseport __rcu *sk_reuseport_cb;
 #ifdef CONFIG_BPF_SYSCALL
-   struct bpf_sk_storage __rcu *sk_bpf_storage;
+   struct bpf_local_storage __rcu  *sk_bpf_storage;
 #endif
struct rcu_head sk_rcu;
 };
diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
index d3377c90a291..a5cc218834ee 100644
--- a/net/core/bpf_sk_storage.c
+++ b/net/core/bpf_sk_storage.c
@@ -12,33 +12,32 @@
 #include 
 #include 
 
-#define SK_STORAGE_CREATE_FLAG_MASK\
-   (BPF_F_NO_PREALLOC | BPF_F_CLONE)
+#define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_CLONE)
 
-struct bucket {
+struct bpf_local_storage_map_bucket {
struct hlist_head list;
raw_spinlock_t lock;
 };
 
-/* Thp map is not the primary owner of a bpf_sk_storage_elem.
- * Instead, the sk->sk_bpf_storage is.
+/* Thp map is not the primary owner of a bpf_local_storage_elem.
+ * Instead, the container object (eg. sk->sk_bpf_storage) is.
  *
- * The map (bpf_sk_storage_map) is for two purposes
- * 1. Define the size of the "sk local storage".  It is
+ * The map (bpf_local_storage_map) is for two purposes
+ * 1. Define the size of the "local storage".  It is
  *the map's value_size.
  *
  * 2. Maintain a list to keep track of all elems such
  *that they can be cleaned up during the map destruction.
  *
  * When a bpf local storage is being looked up for a
- * particular sk,  the "bpf_map" pointer is actually used
+ * particular object,  the "bpf_map" pointer is actually used
  * as the "key" to search in the list of elem in
- * sk->sk_bpf_storage.
+ * the respective bpf_local_storage owned by the object.
  *
- * Hence, consider sk->sk_bpf_storage is the mini-map
- * with the "bpf_map" pointer as the searching key.
+ * e.g. sk->sk_bpf_storage is the mini-map with the "bpf_map" pointer
+ * as the searching key.
  */
-struct bpf_sk_storage_map {
+struct bpf_local_storage_map {
struct bpf_map map;
/* Lookup elem does not require accessing the map.
 *
@@ -46,55 +45,57 @@ struct bpf_sk_storage_map {
 * link/unlink the elem from the map.  Having
 * multiple buckets to improve contention.
 */
-   struct bucket *buckets;
+   struct bpf_local_storage_map_bucket *buckets;
u32 bucket_log;
u16 elem_size;
u16 cache_idx;
 };
 
-struct bpf_sk_storage_data {
+struct bpf_local_storage_data {
/* smap is used as the searching key when looking up
-* from sk->sk_bpf_storage.
+* from the object's bpf_local_storage.
 *
 * Put it in the same cacheline as the data to minimize
 * the number of cachelines access during the cache hit case.
 */
-   struct bpf_sk_storage_map __rcu *smap;
+   struct bpf_local_storage_map __rcu *smap;
u8 data[] __aligned(8);
 };
 
-/* Linked to bpf_sk_storage and bpf_sk_storage_map */
-struct bpf_sk_storage_elem {