summaryrefslogtreecommitdiff
path: root/include/linux/bpf.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/bpf.h')
-rw-r--r--include/linux/bpf.h41
1 files changed, 24 insertions, 17 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index aae85019abde..798aec816970 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -341,57 +341,64 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
}
/* copy everything but bpf_spin_lock, bpf_timer, and kptrs. There could be one of each. */
-static inline void __copy_map_value(struct bpf_map *map, void *dst, void *src, bool long_memcpy)
+static inline void bpf_obj_memcpy(struct btf_field_offs *foffs,
+ void *dst, void *src, u32 size,
+ bool long_memcpy)
{
u32 curr_off = 0;
int i;
- if (likely(!map->field_offs)) {
+ if (likely(!foffs)) {
if (long_memcpy)
- bpf_long_memcpy(dst, src, round_up(map->value_size, 8));
+ bpf_long_memcpy(dst, src, round_up(size, 8));
else
- memcpy(dst, src, map->value_size);
+ memcpy(dst, src, size);
return;
}
- for (i = 0; i < map->field_offs->cnt; i++) {
- u32 next_off = map->field_offs->field_off[i];
+ for (i = 0; i < foffs->cnt; i++) {
+ u32 next_off = foffs->field_off[i];
u32 sz = next_off - curr_off;
memcpy(dst + curr_off, src + curr_off, sz);
- curr_off += map->field_offs->field_sz[i];
+ curr_off += foffs->field_sz[i];
}
- memcpy(dst + curr_off, src + curr_off, map->value_size - curr_off);
+ memcpy(dst + curr_off, src + curr_off, size - curr_off);
}
static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
{
- __copy_map_value(map, dst, src, false);
+ bpf_obj_memcpy(map->field_offs, dst, src, map->value_size, false);
}
static inline void copy_map_value_long(struct bpf_map *map, void *dst, void *src)
{
- __copy_map_value(map, dst, src, true);
+ bpf_obj_memcpy(map->field_offs, dst, src, map->value_size, true);
}
-static inline void zero_map_value(struct bpf_map *map, void *dst)
+static inline void bpf_obj_memzero(struct btf_field_offs *foffs, void *dst, u32 size)
{
u32 curr_off = 0;
int i;
- if (likely(!map->field_offs)) {
- memset(dst, 0, map->value_size);
+ if (likely(!foffs)) {
+ memset(dst, 0, size);
return;
}
- for (i = 0; i < map->field_offs->cnt; i++) {
- u32 next_off = map->field_offs->field_off[i];
+ for (i = 0; i < foffs->cnt; i++) {
+ u32 next_off = foffs->field_off[i];
u32 sz = next_off - curr_off;
memset(dst + curr_off, 0, sz);
- curr_off += map->field_offs->field_sz[i];
+ curr_off += foffs->field_sz[i];
}
- memset(dst + curr_off, 0, map->value_size - curr_off);
+ memset(dst + curr_off, 0, size - curr_off);
+}
+
+static inline void zero_map_value(struct bpf_map *map, void *dst)
+{
+ bpf_obj_memzero(map->field_offs, dst, map->value_size);
}
void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,