diff options
author | David S. Miller <davem@davemloft.net> | 2014-05-13 13:13:33 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-05-13 13:13:33 -0400 |
commit | 1268e253a81e504bc5d5cb7f887dbd538984f137 (patch) | |
tree | 9c5bb7744164b5f90943612f55603c26577854e4 /net/core | |
parent | 62555582916c7b18558c91faaf271a3edbaeff83 (diff) |
net: filter: Fix redefinition warnings on x86-64.
Do not collide with the x86-64 PTRACE user API namespace.
net/core/filter.c:57:0: warning: "R8" redefined [enabled by default]
arch/x86/include/uapi/asm/ptrace-abi.h:38:0: note: this is the location of the previous definition
Fix by adding a BPF_ prefix to the register macros.
Reported-by: Randy Dunlap <rdunlap@infradead.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/filter.c | 71 |
1 files changed, 37 insertions, 34 deletions
diff --git a/net/core/filter.c b/net/core/filter.c index 9aaa05ad8fe3..c442a0d7d0f7 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -46,17 +46,17 @@ #include <linux/if_vlan.h> /* Registers */ -#define R0 regs[BPF_REG_0] -#define R1 regs[BPF_REG_1] -#define R2 regs[BPF_REG_2] -#define R3 regs[BPF_REG_3] -#define R4 regs[BPF_REG_4] -#define R5 regs[BPF_REG_5] -#define R6 regs[BPF_REG_6] -#define R7 regs[BPF_REG_7] -#define R8 regs[BPF_REG_8] -#define R9 regs[BPF_REG_9] -#define R10 regs[BPF_REG_10] +#define BPF_R0 regs[BPF_REG_0] +#define BPF_R1 regs[BPF_REG_1] +#define BPF_R2 regs[BPF_REG_2] +#define BPF_R3 regs[BPF_REG_3] +#define BPF_R4 regs[BPF_REG_4] +#define BPF_R5 regs[BPF_REG_5] +#define BPF_R6 regs[BPF_REG_6] +#define BPF_R7 regs[BPF_REG_7] +#define BPF_R8 regs[BPF_REG_8] +#define BPF_R9 regs[BPF_REG_9] +#define BPF_R10 regs[BPF_REG_10] /* Named registers */ #define A regs[insn->a_reg] @@ -383,10 +383,12 @@ select_insn: /* CALL */ JMP_CALL_0: - /* Function call scratches R1-R5 registers, preserves R6-R9, - * and stores return value into R0. + /* Function call scratches BPF_R1-BPF_R5 registers, + * preserves BPF_R6-BPF_R9, and stores return value + * into BPF_R0. */ - R0 = (__bpf_call_base + insn->imm)(R1, R2, R3, R4, R5); + BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3, + BPF_R4, BPF_R5); CONT; /* JMP */ @@ -478,7 +480,7 @@ select_insn: } CONT; JMP_EXIT_0: - return R0; + return BPF_R0; /* STX and ST and LDX*/ #define LDST(SIZEOP, SIZE) \ @@ -505,18 +507,19 @@ select_insn: atomic64_add((u64) X, (atomic64_t *)(unsigned long) (A + insn->off)); CONT; - LD_ABS_W: /* R0 = ntohl(*(u32 *) (skb->data + K)) */ + LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + K)) */ off = K; load_word: - /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only - * appearing in the programs where ctx == skb. All programs - * keep 'ctx' in regs[BPF_REG_CTX] == R6, sk_convert_filter() - * saves it in R6, internal BPF verifier will check that - * R6 == ctx. + /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are + * only appearing in the programs where ctx == + * skb. All programs keep 'ctx' in regs[BPF_REG_CTX] + * == BPF_R6, sk_convert_filter() saves it in BPF_R6, + * internal BPF verifier will check that BPF_R6 == + * ctx. * - * BPF_ABS and BPF_IND are wrappers of function calls, so - * they scratch R1-R5 registers, preserve R6-R9, and store - * return value into R0. + * BPF_ABS and BPF_IND are wrappers of function calls, + * so they scratch BPF_R1-BPF_R5 registers, preserve + * BPF_R6-BPF_R9, and store return value into BPF_R0. * * Implicit input: * ctx @@ -526,39 +529,39 @@ load_word: * K == 32-bit immediate * * Output: - * R0 - 8/16/32-bit skb data converted to cpu endianness + * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness */ ptr = load_pointer((struct sk_buff *) ctx, off, 4, &tmp); if (likely(ptr != NULL)) { - R0 = get_unaligned_be32(ptr); + BPF_R0 = get_unaligned_be32(ptr); CONT; } return 0; - LD_ABS_H: /* R0 = ntohs(*(u16 *) (skb->data + K)) */ + LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + K)) */ off = K; load_half: ptr = load_pointer((struct sk_buff *) ctx, off, 2, &tmp); if (likely(ptr != NULL)) { - R0 = get_unaligned_be16(ptr); + BPF_R0 = get_unaligned_be16(ptr); CONT; } return 0; - LD_ABS_B: /* R0 = *(u8 *) (ctx + K) */ + LD_ABS_B: /* BPF_R0 = *(u8 *) (ctx + K) */ off = K; load_byte: ptr = load_pointer((struct sk_buff *) ctx, off, 1, &tmp); if (likely(ptr != NULL)) { - R0 = *(u8 *)ptr; + BPF_R0 = *(u8 *)ptr; CONT; } return 0; - LD_IND_W: /* R0 = ntohl(*(u32 *) (skb->data + X + K)) */ + LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + X + K)) */ off = K + X; goto load_word; - LD_IND_H: /* R0 = ntohs(*(u16 *) (skb->data + X + K)) */ + LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + X + K)) */ off = K + X; goto load_half; - LD_IND_B: /* R0 = *(u8 *) (skb->data + X + K) */ + LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + X + K) */ off = K + X; goto load_byte; @@ -1001,7 +1004,7 @@ do_pass: *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_TMP, BPF_REG_A); insn++; - /* A = R0 = *(u8 *) (skb->data + K) */ + /* A = BPF_R0 = *(u8 *) (skb->data + K) */ *insn = BPF_LD_ABS(BPF_B, fp->k); insn++; |