summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-12-18 10:49:22 -0500
committerDavid S. Miller <davem@davemloft.net>2017-12-18 10:49:22 -0500
commitb36025b19a2301ba0cac05e6ffad5668013d2c77 (patch)
tree2fff4e78df48f5d9af54b35d9aa4aa8dd1494188 /lib
parentf870c1ff65a6d1f3a083f277280802ee09a5b44d (diff)
parentc1b08ebe5003ae291470cb6e26923628ab19606f (diff)
downloadlwn-b36025b19a2301ba0cac05e6ffad5668013d2c77.tar.gz
lwn-b36025b19a2301ba0cac05e6ffad5668013d2c77.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Daniel Borkmann says: ==================== pull-request: bpf 2017-12-17 The following pull-request contains BPF updates for your *net* tree. The main changes are: 1) Fix a corner case in generic XDP where we have non-linear skbs but enough tailroom in the skb to not miss to linearizing there, from Song. 2) Fix BPF JIT bugs in s390x and ppc64 to not recache skb data when BPF context is not skb, from Daniel. 3) Fix a BPF JIT bug in sparc64 where recaching skb data after helper call would use the wrong register for the skb, from Daniel. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib')
-rw-r--r--lib/test_bpf.c43
1 files changed, 43 insertions, 0 deletions
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index aa8812ae6776..9e9748089270 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -435,6 +435,41 @@ loop:
return 0;
}
+static int bpf_fill_ld_abs_vlan_push_pop2(struct bpf_test *self)
+{
+ struct bpf_insn *insn;
+
+ insn = kmalloc_array(16, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ /* Due to func address being non-const, we need to
+ * assemble this here.
+ */
+ insn[0] = BPF_MOV64_REG(R6, R1);
+ insn[1] = BPF_LD_ABS(BPF_B, 0);
+ insn[2] = BPF_LD_ABS(BPF_H, 0);
+ insn[3] = BPF_LD_ABS(BPF_W, 0);
+ insn[4] = BPF_MOV64_REG(R7, R6);
+ insn[5] = BPF_MOV64_IMM(R6, 0);
+ insn[6] = BPF_MOV64_REG(R1, R7);
+ insn[7] = BPF_MOV64_IMM(R2, 1);
+ insn[8] = BPF_MOV64_IMM(R3, 2);
+ insn[9] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ bpf_skb_vlan_push_proto.func - __bpf_call_base);
+ insn[10] = BPF_MOV64_REG(R6, R7);
+ insn[11] = BPF_LD_ABS(BPF_B, 0);
+ insn[12] = BPF_LD_ABS(BPF_H, 0);
+ insn[13] = BPF_LD_ABS(BPF_W, 0);
+ insn[14] = BPF_MOV64_IMM(R0, 42);
+ insn[15] = BPF_EXIT_INSN();
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = 16;
+
+ return 0;
+}
+
static int bpf_fill_jump_around_ld_abs(struct bpf_test *self)
{
unsigned int len = BPF_MAXINSNS;
@@ -6066,6 +6101,14 @@ static struct bpf_test tests[] = {
{},
{ {0x1, 0x42 } },
},
+ {
+ "LD_ABS with helper changing skb data",
+ { },
+ INTERNAL,
+ { 0x34 },
+ { { ETH_HLEN, 42 } },
+ .fill_helper = bpf_fill_ld_abs_vlan_push_pop2,
+ },
};
static struct net_device dev;