aboutsummaryrefslogtreecommitdiff
path: root/sbin/ipf/ipf/bpf_filter.c
diff options
context:
space:
mode:
Diffstat (limited to 'sbin/ipf/ipf/bpf_filter.c')
-rw-r--r--sbin/ipf/ipf/bpf_filter.c577
1 files changed, 577 insertions, 0 deletions
diff --git a/sbin/ipf/ipf/bpf_filter.c b/sbin/ipf/ipf/bpf_filter.c
new file mode 100644
index 000000000000..bd7012d2f1ab
--- /dev/null
+++ b/sbin/ipf/ipf/bpf_filter.c
@@ -0,0 +1,577 @@
+
+/*-
+ * Copyright (c) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from the Stanford/CMU enet packet filter,
+ * (net/enet.c) distributed as part of 4.3BSD, and code contributed
+ * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
+ * Berkeley Laboratory.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <sys/socket.h>
+
+#include <netinet/in.h>
+#include <net/if.h>
+
+#include "netinet/ip_compat.h"
+#include "bpf-ipf.h"
+
+
+#if (defined(__hpux) || SOLARIS) && (defined(_KERNEL) || defined(KERNEL))
+# include <sys/sysmacros.h>
+# include <sys/stream.h>
+#endif
+
+#include "pcap-ipf.h"
+
+#if !defined(KERNEL) && !defined(_KERNEL)
+#include <stdlib.h>
+#endif
+
+#define int32 bpf_int32
+#define u_int32 bpf_u_int32
+
+static int m_xword(mb_t *, int, int *);
+static int m_xhalf(mb_t *, int, int *);
+
+#ifndef LBL_ALIGN
+/*
+ * XXX - IA-64? If not, this probably won't work on Win64 IA-64
+ * systems, unless LBL_ALIGN is defined elsewhere for them.
+ * XXX - SuperH? If not, this probably won't work on WinCE SuperH
+ * systems, unless LBL_ALIGN is defined elsewhere for them.
+ */
+#if defined(sparc) || defined(__sparc__) || defined(mips) || \
+ defined(ibm032) || defined(__alpha) || defined(__hpux) || \
+ defined(__arm__)
+#define LBL_ALIGN
+#endif
+#endif
+
+#ifndef LBL_ALIGN
+
+#define EXTRACT_SHORT(p) ((u_short)ntohs(*(u_short *)p))
+#define EXTRACT_LONG(p) (ntohl(*(u_int32 *)p))
+#else
+#define EXTRACT_SHORT(p)\
+ ((u_short)\
+ ((u_short)*((u_char *)p+0)<<8|\
+ (u_short)*((u_char *)p+1)<<0))
+#define EXTRACT_LONG(p)\
+ ((u_int32)*((u_char *)p+0)<<24|\
+ (u_int32)*((u_char *)p+1)<<16|\
+ (u_int32)*((u_char *)p+2)<<8|\
+ (u_int32)*((u_char *)p+3)<<0)
+#endif
+
+#define MINDEX(len, _m, _k) \
+{ \
+ len = M_LEN(m); \
+ while ((_k) >= len) { \
+ (_k) -= len; \
+ (_m) = (_m)->m_next; \
+ if ((_m) == 0) \
+ return (0); \
+ len = M_LEN(m); \
+ } \
+}
+
+static int
+m_xword(mb_t *m, int k, int *err)
+{
+ register int len;
+ register u_char *cp, *np;
+ register mb_t *m0;
+
+ MINDEX(len, m, k);
+ cp = MTOD(m, u_char *) + k;
+ if (len - k >= 4) {
+ *err = 0;
+ return (EXTRACT_LONG(cp));
+ }
+ m0 = m->m_next;
+ if (m0 == NULL || M_LEN(m0) + len - k < 4)
+ goto bad;
+ *err = 0;
+ np = MTOD(m0, u_char *);
+ switch (len - k) {
+
+ case 1:
+ return (cp[0] << 24) | (np[0] << 16) | (np[1] << 8) | np[2];
+
+ case 2:
+ return (cp[0] << 24) | (cp[1] << 16) | (np[0] << 8) | np[1];
+
+ default:
+ return (cp[0] << 24) | (cp[1] << 16) | (cp[2] << 8) | np[0];
+ }
+ bad:
+ *err = 1;
+ return (0);
+}
+
+static int
+m_xhalf(mb_t *m, int k, int *err)
+{
+ register int len;
+ register u_char *cp;
+ register mb_t *m0;
+
+ MINDEX(len, m, k);
+ cp = MTOD(m, u_char *) + k;
+ if (len - k >= 2) {
+ *err = 0;
+ return (EXTRACT_SHORT(cp));
+ }
+ m0 = m->m_next;
+ if (m0 == NULL)
+ goto bad;
+ *err = 0;
+ return (cp[0] << 8) | MTOD(m0, u_char *)[0];
+ bad:
+ *err = 1;
+ return (0);
+}
+
+/*
+ * Execute the filter program starting at pc on the packet p
+ * wirelen is the length of the original packet
+ * buflen is the amount of data present
+ * For the kernel, p is assumed to be a pointer to an mbuf if buflen is 0,
+ * in all other cases, p is a pointer to a buffer and buflen is its size.
+ */
+u_int
+bpf_filter(struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen)
+{
+ register u_int32 A, X;
+ register int k;
+ int32 mem[BPF_MEMWORDS];
+ mb_t *m, *n;
+ int merr = 0; /* XXX: GCC */
+ int len;
+
+ if (buflen == 0) {
+ m = (mb_t *)p;
+ p = MTOD(m, u_char *);
+ buflen = M_LEN(m);
+ } else
+ m = NULL;
+
+ if (pc == NULL)
+ /*
+ * No filter means accept all.
+ */
+ return (u_int)-1;
+ A = 0;
+ X = 0;
+ --pc;
+ while (1) {
+ ++pc;
+ switch (pc->code) {
+
+ default:
+ return (0);
+ case BPF_RET|BPF_K:
+ return (u_int)pc->k;
+
+ case BPF_RET|BPF_A:
+ return (u_int)A;
+
+ case BPF_LD|BPF_W|BPF_ABS:
+ k = pc->k;
+ if (k + sizeof(int32) > buflen) {
+ if (m == NULL)
+ return (0);
+ A = m_xword(m, k, &merr);
+ if (merr != 0)
+ return (0);
+ continue;
+ }
+ A = EXTRACT_LONG(&p[k]);
+ continue;
+
+ case BPF_LD|BPF_H|BPF_ABS:
+ k = pc->k;
+ if (k + sizeof(short) > buflen) {
+ if (m == NULL)
+ return (0);
+ A = m_xhalf(m, k, &merr);
+ if (merr != 0)
+ return (0);
+ continue;
+ }
+ A = EXTRACT_SHORT(&p[k]);
+ continue;
+
+ case BPF_LD|BPF_B|BPF_ABS:
+ k = pc->k;
+ if (k >= buflen) {
+ if (m == NULL)
+ return (0);
+ n = m;
+ MINDEX(len, n, k);
+ A = MTOD(n, u_char *)[k];
+ continue;
+ }
+ A = p[k];
+ continue;
+
+ case BPF_LD|BPF_W|BPF_LEN:
+ A = wirelen;
+ continue;
+
+ case BPF_LDX|BPF_W|BPF_LEN:
+ X = wirelen;
+ continue;
+
+ case BPF_LD|BPF_W|BPF_IND:
+ k = X + pc->k;
+ if (k + sizeof(int32) > buflen) {
+ if (m == NULL)
+ return (0);
+ A = m_xword(m, k, &merr);
+ if (merr != 0)
+ return (0);
+ continue;
+ }
+ A = EXTRACT_LONG(&p[k]);
+ continue;
+
+ case BPF_LD|BPF_H|BPF_IND:
+ k = X + pc->k;
+ if (k + sizeof(short) > buflen) {
+ if (m == NULL)
+ return (0);
+ A = m_xhalf(m, k, &merr);
+ if (merr != 0)
+ return (0);
+ continue;
+ }
+ A = EXTRACT_SHORT(&p[k]);
+ continue;
+
+ case BPF_LD|BPF_B|BPF_IND:
+ k = X + pc->k;
+ if (k >= buflen) {
+ if (m == NULL)
+ return (0);
+ n = m;
+ MINDEX(len, n, k);
+ A = MTOD(n, u_char *)[k];
+ continue;
+ }
+ A = p[k];
+ continue;
+
+ case BPF_LDX|BPF_MSH|BPF_B:
+ k = pc->k;
+ if (k >= buflen) {
+ if (m == NULL)
+ return (0);
+ n = m;
+ MINDEX(len, n, k);
+ X = (MTOD(n, char *)[k] & 0xf) << 2;
+ continue;
+ }
+ X = (p[pc->k] & 0xf) << 2;
+ continue;
+
+ case BPF_LD|BPF_IMM:
+ A = pc->k;
+ continue;
+
+ case BPF_LDX|BPF_IMM:
+ X = pc->k;
+ continue;
+
+ case BPF_LD|BPF_MEM:
+ A = mem[pc->k];
+ continue;
+
+ case BPF_LDX|BPF_MEM:
+ X = mem[pc->k];
+ continue;
+
+ case BPF_ST:
+ mem[pc->k] = A;
+ continue;
+
+ case BPF_STX:
+ mem[pc->k] = X;
+ continue;
+
+ case BPF_JMP|BPF_JA:
+ pc += pc->k;
+ continue;
+
+ case BPF_JMP|BPF_JGT|BPF_K:
+ pc += (A > pc->k) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_JMP|BPF_JGE|BPF_K:
+ pc += (A >= pc->k) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_JMP|BPF_JEQ|BPF_K:
+ pc += (A == pc->k) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_JMP|BPF_JSET|BPF_K:
+ pc += (A & pc->k) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_JMP|BPF_JGT|BPF_X:
+ pc += (A > X) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_JMP|BPF_JGE|BPF_X:
+ pc += (A >= X) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_JMP|BPF_JEQ|BPF_X:
+ pc += (A == X) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_JMP|BPF_JSET|BPF_X:
+ pc += (A & X) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_ALU|BPF_ADD|BPF_X:
+ A += X;
+ continue;
+
+ case BPF_ALU|BPF_SUB|BPF_X:
+ A -= X;
+ continue;
+
+ case BPF_ALU|BPF_MUL|BPF_X:
+ A *= X;
+ continue;
+
+ case BPF_ALU|BPF_DIV|BPF_X:
+ if (X == 0)
+ return (0);
+ A /= X;
+ continue;
+
+ case BPF_ALU|BPF_AND|BPF_X:
+ A &= X;
+ continue;
+
+ case BPF_ALU|BPF_OR|BPF_X:
+ A |= X;
+ continue;
+
+ case BPF_ALU|BPF_LSH|BPF_X:
+ A <<= X;
+ continue;
+
+ case BPF_ALU|BPF_RSH|BPF_X:
+ A >>= X;
+ continue;
+
+ case BPF_ALU|BPF_ADD|BPF_K:
+ A += pc->k;
+ continue;
+
+ case BPF_ALU|BPF_SUB|BPF_K:
+ A -= pc->k;
+ continue;
+
+ case BPF_ALU|BPF_MUL|BPF_K:
+ A *= pc->k;
+ continue;
+
+ case BPF_ALU|BPF_DIV|BPF_K:
+ A /= pc->k;
+ continue;
+
+ case BPF_ALU|BPF_AND|BPF_K:
+ A &= pc->k;
+ continue;
+
+ case BPF_ALU|BPF_OR|BPF_K:
+ A |= pc->k;
+ continue;
+
+ case BPF_ALU|BPF_LSH|BPF_K:
+ A <<= pc->k;
+ continue;
+
+ case BPF_ALU|BPF_RSH|BPF_K:
+ A >>= pc->k;
+ continue;
+
+ case BPF_ALU|BPF_NEG:
+ A = -A;
+ continue;
+
+ case BPF_MISC|BPF_TAX:
+ X = A;
+ continue;
+
+ case BPF_MISC|BPF_TXA:
+ A = X;
+ continue;
+ }
+ }
+}
+
+
+/*
+ * Return true if the 'fcode' is a valid filter program.
+ * The constraints are that each jump be forward and to a valid
+ * code, that memory accesses are within valid ranges (to the
+ * extent that this can be checked statically; loads of packet
+ * data have to be, and are, also checked at run time), and that
+ * the code terminates with either an accept or reject.
+ *
+ * The kernel needs to be able to verify an application's filter code.
+ * Otherwise, a bogus program could easily crash the system.
+ */
+int
+bpf_validate(struct bpf_insn *f, int len)
+{
+ u_int i, from;
+ const struct bpf_insn *p;
+
+ if (len == 0)
+ return (1);
+
+ if (len < 1 || len > BPF_MAXINSNS)
+ return (0);
+
+ for (i = 0; i < len; ++i) {
+ p = &f[i];
+ switch (BPF_CLASS(p->code)) {
+ /*
+ * Check that memory operations use valid addresses.
+ */
+ case BPF_LD:
+ case BPF_LDX:
+ switch (BPF_MODE(p->code)) {
+ case BPF_IMM:
+ break;
+ case BPF_ABS:
+ case BPF_IND:
+ case BPF_MSH:
+ /*
+ * More strict check with actual packet length
+ * is done runtime.
+ */
+#if 0
+ if (p->k >= bpf_maxbufsize)
+ return (0);
+#endif
+ break;
+ case BPF_MEM:
+ if (p->k >= BPF_MEMWORDS)
+ return (0);
+ break;
+ case BPF_LEN:
+ break;
+ default:
+ return (0);
+ }
+ break;
+ case BPF_ST:
+ case BPF_STX:
+ if (p->k >= BPF_MEMWORDS)
+ return (0);
+ break;
+ case BPF_ALU:
+ switch (BPF_OP(p->code)) {
+ case BPF_ADD:
+ case BPF_SUB:
+ case BPF_OR:
+ case BPF_AND:
+ case BPF_LSH:
+ case BPF_RSH:
+ case BPF_NEG:
+ break;
+ case BPF_DIV:
+ /*
+ * Check for constant division by 0.
+ */
+ if (BPF_RVAL(p->code) == BPF_K && p->k == 0)
+ return (0);
+ default:
+ return (0);
+ }
+ break;
+ case BPF_JMP:
+ /*
+ * Check that jumps are within the code block,
+ * and that unconditional branches don't go
+ * backwards as a result of an overflow.
+ * Unconditional branches have a 32-bit offset,
+ * so they could overflow; we check to make
+ * sure they don't. Conditional branches have
+ * an 8-bit offset, and the from address is <=
+ * BPF_MAXINSNS, and we assume that BPF_MAXINSNS
+ * is sufficiently small that adding 255 to it
+ * won't overflow.
+ *
+ * We know that len is <= BPF_MAXINSNS, and we
+ * assume that BPF_MAXINSNS is < the maximum size
+ * of a u_int, so that i + 1 doesn't overflow.
+ */
+ from = i + 1;
+ switch (BPF_OP(p->code)) {
+ case BPF_JA:
+ if (from + p->k < from || from + p->k >= len)
+ return (0);
+ break;
+ case BPF_JEQ:
+ case BPF_JGT:
+ case BPF_JGE:
+ case BPF_JSET:
+ if (from + p->jt >= len || from + p->jf >= len)
+ return (0);
+ break;
+ default:
+ return (0);
+ }
+ break;
+ case BPF_RET:
+ break;
+ case BPF_MISC:
+ break;
+ default:
+ return (0);
+ }
+ }
+ return (BPF_CLASS(f[len - 1].code) == BPF_RET);
+}