1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
|
/*
* Linux Socket Filter Data Structures
*/
#ifndef __LINUX_FILTER_H__
#define __LINUX_FILTER_H__
#include <linux/compiler.h>
#include <linux/types.h>
#ifdef __KERNEL__
#include <asm/atomic.h>
#endif
/*
* Current version of the filter code architecture.
*/
#define BPF_MAJOR_VERSION 1
#define BPF_MINOR_VERSION 1
/*
* Try and keep these values and structures similar to BSD, especially
* the BPF code definitions which need to match so you can share filters
*/
struct sock_filter { /* Filter block */
__u16 code; /* Actual filter code */
__u8 jt; /* Jump true */
__u8 jf; /* Jump false */
__u32 k; /* Generic multiuse field */
};
struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
unsigned short len; /* Number of filter blocks */
struct sock_filter __user *filter;
};
/*
* Instruction classes
*/
#define BPF_CLASS(code) ((code) & 0x07)
#define BPF_LD 0x00
#define BPF_LDX 0x01
#define BPF_ST 0x02
#define BPF_STX 0x03
#define BPF_ALU 0x04
#define BPF_JMP 0x05
#define BPF_RET 0x06
#define BPF_MISC 0x07
/* ld/ldx fields */
#define BPF_SIZE(code) ((code) & 0x18)
#define BPF_W 0x00
#define BPF_H 0x08
#define BPF_B 0x10
#define BPF_MODE(code) ((code) & 0xe0)
#define BPF_IMM 0x00
#define BPF_ABS 0x20
#define BPF_IND 0x40
#define BPF_MEM 0x60
#define BPF_LEN 0x80
#define BPF_MSH 0xa0
/* alu/jmp fields */
#define BPF_OP(code) ((code) & 0xf0)
#define BPF_ADD 0x00
#define BPF_SUB 0x10
#define BPF_MUL 0x20
#define BPF_DIV 0x30
#define BPF_OR 0x40
#define BPF_AND 0x50
#define BPF_LSH 0x60
#define BPF_RSH 0x70
#define BPF_NEG 0x80
#define BPF_JA 0x00
#define BPF_JEQ 0x10
#define BPF_JGT 0x20
#define BPF_JGE 0x30
#define BPF_JSET 0x40
#define BPF_SRC(code) ((code) & 0x08)
#define BPF_K 0x00
#define BPF_X 0x08
/* ret - BPF_K and BPF_X also apply */
#define BPF_RVAL(code) ((code) & 0x18)
#define BPF_A 0x10
/* misc */
#define BPF_MISCOP(code) ((code) & 0xf8)
#define BPF_TAX 0x00
#define BPF_TXA 0x80
#ifndef BPF_MAXINSNS
#define BPF_MAXINSNS 4096
#endif
/*
* Macros for filter block array initializers.
*/
#ifndef BPF_STMT
#define BPF_STMT(code, k) { (unsigned short)(code), 0, 0, k }
#endif
#ifndef BPF_JUMP
#define BPF_JUMP(code, k, jt, jf) { (unsigned short)(code), jt, jf, k }
#endif
/*
* Number of scratch memory words for: BPF_ST and BPF_STX
*/
#define BPF_MEMWORDS 16
/* RATIONALE. Negative offsets are invalid in BPF.
We use them to reference ancillary data.
Unlike introduction new instructions, it does not break
existing compilers/optimizers.
*/
#define SKF_AD_OFF (-0x1000)
#define SKF_AD_PROTOCOL 0
#define SKF_AD_PKTTYPE 4
#define SKF_AD_IFINDEX 8
#define SKF_AD_NLATTR 12
#define SKF_AD_NLATTR_NEST 16
#define SKF_AD_MARK 20
#define SKF_AD_QUEUE 24
#define SKF_AD_HATYPE 28
#define SKF_AD_RXHASH 32
#define SKF_AD_CPU 36
#define SKF_AD_MAX 40
#define SKF_NET_OFF (-0x100000)
#define SKF_LL_OFF (-0x200000)
#ifdef __KERNEL__
struct sk_filter
{
atomic_t refcnt;
unsigned int len; /* Number of filter blocks */
unsigned int (*bpf_func)(const struct sk_buff *skb,
const struct sock_filter *filter);
struct rcu_head rcu;
struct sock_filter insns[0];
};
static inline unsigned int sk_filter_len(const struct sk_filter *fp)
{
return fp->len * sizeof(struct sock_filter) + sizeof(*fp);
}
struct sk_buff;
struct sock;
extern int sk_filter(struct sock *sk, struct sk_buff *skb);
extern unsigned int sk_run_filter(const struct sk_buff *skb,
const struct sock_filter *filter);
extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
extern int sk_detach_filter(struct sock *sk);
extern int sk_chk_filter(struct sock_filter *filter, int flen);
#ifdef CONFIG_BPF_JIT
extern void bpf_jit_compile(struct sk_filter *fp);
extern void bpf_jit_free(struct sk_filter *fp);
#define SK_RUN_FILTER(FILTER, SKB) (*FILTER->bpf_func)(SKB, FILTER->insns)
#else
static inline void bpf_jit_compile(struct sk_filter *fp)
{
}
static inline void bpf_jit_free(struct sk_filter *fp)
{
}
#define SK_RUN_FILTER(FILTER, SKB) sk_run_filter(SKB, FILTER->insns)
#endif
enum {
BPF_S_RET_K = 1,
BPF_S_RET_A,
BPF_S_ALU_ADD_K,
BPF_S_ALU_ADD_X,
BPF_S_ALU_SUB_K,
BPF_S_ALU_SUB_X,
BPF_S_ALU_MUL_K,
BPF_S_ALU_MUL_X,
BPF_S_ALU_DIV_X,
BPF_S_ALU_AND_K,
BPF_S_ALU_AND_X,
BPF_S_ALU_OR_K,
BPF_S_ALU_OR_X,
BPF_S_ALU_LSH_K,
BPF_S_ALU_LSH_X,
BPF_S_ALU_RSH_K,
BPF_S_ALU_RSH_X,
BPF_S_ALU_NEG,
BPF_S_LD_W_ABS,
BPF_S_LD_H_ABS,
BPF_S_LD_B_ABS,
BPF_S_LD_W_LEN,
BPF_S_LD_W_IND,
BPF_S_LD_H_IND,
BPF_S_LD_B_IND,
BPF_S_LD_IMM,
BPF_S_LDX_W_LEN,
BPF_S_LDX_B_MSH,
BPF_S_LDX_IMM,
BPF_S_MISC_TAX,
BPF_S_MISC_TXA,
BPF_S_ALU_DIV_K,
BPF_S_LD_MEM,
BPF_S_LDX_MEM,
BPF_S_ST,
BPF_S_STX,
BPF_S_JMP_JA,
BPF_S_JMP_JEQ_K,
BPF_S_JMP_JEQ_X,
BPF_S_JMP_JGE_K,
BPF_S_JMP_JGE_X,
BPF_S_JMP_JGT_K,
BPF_S_JMP_JGT_X,
BPF_S_JMP_JSET_K,
BPF_S_JMP_JSET_X,
/* Ancillary data */
BPF_S_ANC_PROTOCOL,
BPF_S_ANC_PKTTYPE,
BPF_S_ANC_IFINDEX,
BPF_S_ANC_NLATTR,
BPF_S_ANC_NLATTR_NEST,
BPF_S_ANC_MARK,
BPF_S_ANC_QUEUE,
BPF_S_ANC_HATYPE,
BPF_S_ANC_RXHASH,
BPF_S_ANC_CPU,
};
#endif /* __KERNEL__ */
#endif /* __LINUX_FILTER_H__ */
|