[PATCH bpf-next v5 1/9] bpf: Generalize data copying for percpu maps

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Refactor the data copying logic of the following percpu map types:

* percpu_array
* percpu_hash
* lru_percpu_hash
* percpu_cgroup_storage

by introducing two helpers:

* 'bpf_percpu_copy_data()'
* 'bpf_percpu_update_data()'

It is to introduce BPF_F_CPU and BPF_F_ALL_CPUS flags for these percpu
maps with less code churn.

Signed-off-by: Leon Hwang <leon.hwang@xxxxxxxxx>
---
 include/linux/bpf.h        | 28 +++++++++++++++++++++++++++-
 kernel/bpf/arraymap.c      | 14 ++------------
 kernel/bpf/hashtab.c       | 27 ++++-----------------------
 kernel/bpf/local_storage.c | 18 ++++++------------
 4 files changed, 39 insertions(+), 48 deletions(-)

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 8f6e87f0f3a89..ce523a49dc20c 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -547,6 +547,33 @@ static inline void copy_map_value_long(struct bpf_map *map, void *dst, void *src
 	bpf_obj_memcpy(map->record, dst, src, map->value_size, true);
 }
 
+#ifdef CONFIG_BPF_SYSCALL
+static inline void bpf_percpu_copy_data(struct bpf_map *map, void __percpu *pptr, void *value,
+					u32 size)
+{
+	int cpu, off = 0;
+
+	for_each_possible_cpu(cpu) {
+		copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu));
+		off += size;
+	}
+}
+
+void bpf_obj_free_fields(const struct btf_record *rec, void *obj);
+
+static inline void bpf_percpu_update_data(struct bpf_map *map, void __percpu *pptr, void *value,
+					  u32 size)
+{
+	int cpu, off = 0;
+
+	for_each_possible_cpu(cpu) {
+		bpf_obj_free_fields(map->record, per_cpu_ptr(pptr, cpu));
+		bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
+		off += size;
+	}
+}
+#endif
+
 static inline void bpf_obj_swap_uptrs(const struct btf_record *rec, void *dst, void *src)
 {
 	unsigned long *src_uptr, *dst_uptr;
@@ -2417,7 +2444,6 @@ struct btf_record *btf_record_dup(const struct btf_record *rec);
 bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b);
 void bpf_obj_free_timer(const struct btf_record *rec, void *obj);
 void bpf_obj_free_workqueue(const struct btf_record *rec, void *obj);
-void bpf_obj_free_fields(const struct btf_record *rec, void *obj);
 void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu);
 
 struct bpf_map *bpf_map_get(u32 ufd);
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 3d080916faf97..ed9e47dc4137b 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -300,7 +300,6 @@ int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
 	struct bpf_array *array = container_of(map, struct bpf_array, map);
 	u32 index = *(u32 *)key;
 	void __percpu *pptr;
-	int cpu, off = 0;
 	u32 size;
 
 	if (unlikely(index >= array->map.max_entries))
@@ -313,11 +312,7 @@ int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
 	size = array->elem_size;
 	rcu_read_lock();
 	pptr = array->pptrs[index & array->index_mask];
-	for_each_possible_cpu(cpu) {
-		copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu));
-		check_and_init_map_value(map, value + off);
-		off += size;
-	}
+	bpf_percpu_copy_data(map, pptr, value, size);
 	rcu_read_unlock();
 	return 0;
 }
@@ -387,7 +382,6 @@ int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
 	struct bpf_array *array = container_of(map, struct bpf_array, map);
 	u32 index = *(u32 *)key;
 	void __percpu *pptr;
-	int cpu, off = 0;
 	u32 size;
 
 	if (unlikely(map_flags > BPF_EXIST))
@@ -411,11 +405,7 @@ int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
 	size = array->elem_size;
 	rcu_read_lock();
 	pptr = array->pptrs[index & array->index_mask];
-	for_each_possible_cpu(cpu) {
-		copy_map_value_long(map, per_cpu_ptr(pptr, cpu), value + off);
-		bpf_obj_free_fields(array->map.record, per_cpu_ptr(pptr, cpu));
-		off += size;
-	}
+	bpf_percpu_update_data(map, pptr, value, size);
 	rcu_read_unlock();
 	return 0;
 }
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 71f9931ac64cd..0a2c1042d5fdb 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -944,12 +944,8 @@ static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
 		copy_map_value(&htab->map, this_cpu_ptr(pptr), value);
 	} else {
 		u32 size = round_up(htab->map.value_size, 8);
-		int off = 0, cpu;
 
-		for_each_possible_cpu(cpu) {
-			copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value + off);
-			off += size;
-		}
+		bpf_percpu_update_data(&htab->map, pptr, value, size);
 	}
 }
 
@@ -1610,14 +1606,9 @@ static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
 	if (is_percpu) {
 		u32 roundup_value_size = round_up(map->value_size, 8);
 		void __percpu *pptr;
-		int off = 0, cpu;
 
 		pptr = htab_elem_get_ptr(l, key_size);
-		for_each_possible_cpu(cpu) {
-			copy_map_value_long(&htab->map, value + off, per_cpu_ptr(pptr, cpu));
-			check_and_init_map_value(&htab->map, value + off);
-			off += roundup_value_size;
-		}
+		bpf_percpu_copy_data(&htab->map, pptr, value, roundup_value_size);
 	} else {
 		void *src = htab_elem_value(l, map->key_size);
 
@@ -1802,15 +1793,10 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
 		memcpy(dst_key, l->key, key_size);
 
 		if (is_percpu) {
-			int off = 0, cpu;
 			void __percpu *pptr;
 
 			pptr = htab_elem_get_ptr(l, map->key_size);
-			for_each_possible_cpu(cpu) {
-				copy_map_value_long(&htab->map, dst_val + off, per_cpu_ptr(pptr, cpu));
-				check_and_init_map_value(&htab->map, dst_val + off);
-				off += size;
-			}
+			bpf_percpu_copy_data(&htab->map, pptr, dst_val, size);
 		} else {
 			value = htab_elem_value(l, key_size);
 			if (is_fd_htab(htab)) {
@@ -2370,7 +2356,6 @@ int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
 	struct htab_elem *l;
 	void __percpu *pptr;
 	int ret = -ENOENT;
-	int cpu, off = 0;
 	u32 size;
 
 	/* per_cpu areas are zero-filled and bpf programs can only
@@ -2386,11 +2371,7 @@ int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
 	 * eviction heuristics when user space does a map walk.
 	 */
 	pptr = htab_elem_get_ptr(l, map->key_size);
-	for_each_possible_cpu(cpu) {
-		copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu));
-		check_and_init_map_value(map, value + off);
-		off += size;
-	}
+	bpf_percpu_copy_data(map, pptr, value, size);
 	ret = 0;
 out:
 	rcu_read_unlock();
diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c
index c93a756e035c0..a1debbd26a415 100644
--- a/kernel/bpf/local_storage.c
+++ b/kernel/bpf/local_storage.c
@@ -184,7 +184,7 @@ int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *key,
 {
 	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
 	struct bpf_cgroup_storage *storage;
-	int cpu, off = 0;
+	void __percpu *pptr;
 	u32 size;
 
 	rcu_read_lock();
@@ -199,11 +199,8 @@ int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *key,
 	 * will not leak any kernel data
 	 */
 	size = round_up(_map->value_size, 8);
-	for_each_possible_cpu(cpu) {
-		bpf_long_memcpy(value + off,
-				per_cpu_ptr(storage->percpu_buf, cpu), size);
-		off += size;
-	}
+	pptr = storage->percpu_buf;
+	bpf_percpu_copy_data(_map, pptr, value, size);
 	rcu_read_unlock();
 	return 0;
 }
@@ -213,7 +210,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *key,
 {
 	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
 	struct bpf_cgroup_storage *storage;
-	int cpu, off = 0;
+	void __percpu *pptr;
 	u32 size;
 
 	if (map_flags != BPF_ANY && map_flags != BPF_EXIST)
@@ -233,11 +230,8 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *key,
 	 * so no kernel data leaks possible
 	 */
 	size = round_up(_map->value_size, 8);
-	for_each_possible_cpu(cpu) {
-		bpf_long_memcpy(per_cpu_ptr(storage->percpu_buf, cpu),
-				value + off, size);
-		off += size;
-	}
+	pptr = storage->percpu_buf;
+	bpf_percpu_update_data(_map, pptr, value, size);
 	rcu_read_unlock();
 	return 0;
 }
-- 
2.50.1





[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux