Introduce support for the BPF_F_ALL_CPUS flag in percpu_array maps to
allow updating values for all CPUs with a single value for both
update_elem and update_batch APIs.

Introduce support for the BPF_F_CPU flag in percpu_array maps to allow:

* update value for specified CPU for both update_elem and update_batch
APIs.
* lookup value for specified CPU for both lookup_elem and lookup_batch
APIs.

The BPF_F_CPU flag is passed via:

* map_flags of lookup_elem and update_elem APIs along with embedded cpu
info.
* elem_flags of lookup_batch and update_batch APIs along with embedded
cpu info.

Signed-off-by: Leon Hwang <[email protected]>
---
 include/linux/bpf.h   |  9 +++++++--
 kernel/bpf/arraymap.c | 29 +++++++++++++++++++++++------
 kernel/bpf/syscall.c  |  2 +-
 3 files changed, 31 insertions(+), 9 deletions(-)

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 65737870f2ea6..c43cdd90bfb12 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -2748,7 +2748,7 @@ int map_set_for_each_callback_args(struct 
bpf_verifier_env *env,
                                   struct bpf_func_state *callee);
 
 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
-int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
+int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value, u64 
flags);
 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
                           u64 flags);
 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
@@ -3815,7 +3815,12 @@ bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 
*offsets, void *image)
 
 static inline bool bpf_map_supports_cpu_flags(enum bpf_map_type map_type)
 {
-       return false;
+       switch (map_type) {
+       case BPF_MAP_TYPE_PERCPU_ARRAY:
+               return true;
+       default:
+               return false;
+       }
 }
 
 static inline int bpf_map_check_op_flags(struct bpf_map *map, u64 flags, u64 
allowed_flags)
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 1eeb31c5b317d..67e9e811de3a7 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -307,7 +307,7 @@ static void *percpu_array_map_lookup_percpu_elem(struct 
bpf_map *map, void *key,
        return per_cpu_ptr(array->pptrs[index & array->index_mask], cpu);
 }
 
-int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
+int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value, u64 
map_flags)
 {
        struct bpf_array *array = container_of(map, struct bpf_array, map);
        u32 index = *(u32 *)key;
@@ -325,11 +325,18 @@ int bpf_percpu_array_copy(struct bpf_map *map, void *key, 
void *value)
        size = array->elem_size;
        rcu_read_lock();
        pptr = array->pptrs[index & array->index_mask];
+       if (map_flags & BPF_F_CPU) {
+               cpu = map_flags >> 32;
+               copy_map_value(map, value, per_cpu_ptr(pptr, cpu));
+               check_and_init_map_value(map, value);
+               goto unlock;
+       }
        for_each_possible_cpu(cpu) {
                copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu));
                check_and_init_map_value(map, value + off);
                off += size;
        }
+unlock:
        rcu_read_unlock();
        return 0;
 }
@@ -398,10 +405,11 @@ int bpf_percpu_array_update(struct bpf_map *map, void 
*key, void *value,
        struct bpf_array *array = container_of(map, struct bpf_array, map);
        u32 index = *(u32 *)key;
        void __percpu *pptr;
-       int cpu, off = 0;
+       void *ptr, *val;
        u32 size;
+       int cpu;
 
-       if (unlikely(map_flags > BPF_EXIST))
+       if (unlikely((map_flags & BPF_F_LOCK) || (u32)map_flags > 
BPF_F_ALL_CPUS))
                /* unknown flags */
                return -EINVAL;
 
@@ -422,11 +430,20 @@ int bpf_percpu_array_update(struct bpf_map *map, void 
*key, void *value,
        size = array->elem_size;
        rcu_read_lock();
        pptr = array->pptrs[index & array->index_mask];
+       if (map_flags & BPF_F_CPU) {
+               cpu = map_flags >> 32;
+               ptr = per_cpu_ptr(pptr, cpu);
+               copy_map_value(map, ptr, value);
+               bpf_obj_free_fields(array->map.record, ptr);
+               goto unlock;
+       }
        for_each_possible_cpu(cpu) {
-               copy_map_value_long(map, per_cpu_ptr(pptr, cpu), value + off);
-               bpf_obj_free_fields(array->map.record, per_cpu_ptr(pptr, cpu));
-               off += size;
+               ptr = per_cpu_ptr(pptr, cpu);
+               val = (map_flags & BPF_F_ALL_CPUS) ? value : value + size * cpu;
+               copy_map_value(map, ptr, val);
+               bpf_obj_free_fields(array->map.record, ptr);
        }
+unlock:
        rcu_read_unlock();
        return 0;
 }
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 6edaab858b326..edb7462a34f13 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -318,7 +318,7 @@ static int bpf_map_copy_value(struct bpf_map *map, void 
*key, void *value,
            map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
                err = bpf_percpu_hash_copy(map, key, value);
        } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
-               err = bpf_percpu_array_copy(map, key, value);
+               err = bpf_percpu_array_copy(map, key, value, flags);
        } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
                err = bpf_percpu_cgroup_storage_copy(map, key, value);
        } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
-- 
2.51.2


Reply via email to