aboutsummaryrefslogtreecommitdiff
path: root/sys/netpfil
diff options
context:
space:
mode:
Diffstat (limited to 'sys/netpfil')
-rw-r--r--sys/netpfil/pf/pf.c21
-rw-r--r--sys/netpfil/pf/pf.h4
-rw-r--r--sys/netpfil/pf/pf_ioctl.c26
-rw-r--r--sys/netpfil/pf/pf_nl.c132
-rw-r--r--sys/netpfil/pf/pf_nl.h19
-rw-r--r--sys/netpfil/pf/pf_norm.c2
-rw-r--r--sys/netpfil/pf/pf_ruleset.c16
-rw-r--r--sys/netpfil/pf/pf_table.c2
8 files changed, 193 insertions, 29 deletions
diff --git a/sys/netpfil/pf/pf.c b/sys/netpfil/pf/pf.c
index 4801b3e2c766..9d83e7b82e6f 100644
--- a/sys/netpfil/pf/pf.c
+++ b/sys/netpfil/pf/pf.c
@@ -621,7 +621,7 @@ static void
pf_packet_rework_nat(struct pf_pdesc *pd, int off, struct pf_state_key *nk)
{
- switch (pd->proto) {
+ switch (pd->virtual_proto) {
case IPPROTO_TCP: {
struct tcphdr *th = &pd->hdr.tcp;
@@ -1254,6 +1254,21 @@ pf_initialize(void)
MTX_DEF | MTX_DUPOK);
}
+ /* Anchors */
+ V_pf_anchor_z = uma_zcreate("pf anchors",
+ sizeof(struct pf_kanchor), NULL, NULL, NULL, NULL,
+ UMA_ALIGN_PTR, 0);
+ V_pf_limits[PF_LIMIT_ANCHORS].zone = V_pf_anchor_z;
+ uma_zone_set_max(V_pf_anchor_z, PF_ANCHOR_HIWAT);
+ uma_zone_set_warning(V_pf_anchor_z, "PF anchor limit reached");
+
+ V_pf_eth_anchor_z = uma_zcreate("pf Ethernet anchors",
+ sizeof(struct pf_keth_anchor), NULL, NULL, NULL, NULL,
+ UMA_ALIGN_PTR, 0);
+ V_pf_limits[PF_LIMIT_ETH_ANCHORS].zone = V_pf_eth_anchor_z;
+ uma_zone_set_max(V_pf_eth_anchor_z, PF_ANCHOR_HIWAT);
+ uma_zone_set_warning(V_pf_eth_anchor_z, "PF Ethernet anchor limit reached");
+
/* ALTQ */
TAILQ_INIT(&V_pf_altqs[0]);
TAILQ_INIT(&V_pf_altqs[1]);
@@ -1332,6 +1347,8 @@ pf_cleanup(void)
uma_zdestroy(V_pf_state_z);
uma_zdestroy(V_pf_state_key_z);
uma_zdestroy(V_pf_udp_mapping_z);
+ uma_zdestroy(V_pf_anchor_z);
+ uma_zdestroy(V_pf_eth_anchor_z);
}
static int
@@ -6376,7 +6393,7 @@ pf_translate_compat(struct pf_test_ctx *ctx)
KASSERT(ctx->sk != NULL, ("%s: null sk", __func__));
KASSERT(ctx->nk != NULL, ("%s: null nk", __func__));
- switch (pd->proto) {
+ switch (pd->virtual_proto) {
case IPPROTO_TCP:
if (PF_ANEQ(&pd->nsaddr, &nk->addr[pd->sidx], pd->af) ||
nk->port[pd->sidx] != pd->nsport) {
diff --git a/sys/netpfil/pf/pf.h b/sys/netpfil/pf/pf.h
index cfff58064922..51b3fd6390e1 100644
--- a/sys/netpfil/pf/pf.h
+++ b/sys/netpfil/pf/pf.h
@@ -120,7 +120,8 @@ enum {
enum { PF_NOPFROUTE, PF_FASTROUTE, PF_ROUTETO, PF_DUPTO, PF_REPLYTO };
enum { PF_LIMIT_STATES, PF_LIMIT_SRC_NODES, PF_LIMIT_FRAGS,
- PF_LIMIT_TABLE_ENTRIES, PF_LIMIT_MAX };
+ PF_LIMIT_TABLE_ENTRIES, PF_LIMIT_ANCHORS, PF_LIMIT_ETH_ANCHORS,
+ PF_LIMIT_MAX };
#define PF_POOL_IDMASK 0x0f
enum { PF_POOL_NONE, PF_POOL_BITMASK, PF_POOL_RANDOM,
PF_POOL_SRCHASH, PF_POOL_ROUNDROBIN };
@@ -490,6 +491,7 @@ struct pf_osfp_ioctl {
#define PF_ANCHOR_NAME_SIZE 64
#define PF_ANCHOR_MAXPATH (MAXPATHLEN - PF_ANCHOR_NAME_SIZE - 1)
+#define PF_ANCHOR_HIWAT 512
#define PF_OPTIMIZER_TABLE_PFX "__automatic_"
struct pf_rule {
diff --git a/sys/netpfil/pf/pf_ioctl.c b/sys/netpfil/pf/pf_ioctl.c
index 178ee01649c6..e5da05a958f6 100644
--- a/sys/netpfil/pf/pf_ioctl.c
+++ b/sys/netpfil/pf/pf_ioctl.c
@@ -331,6 +331,8 @@ pfattach_vnet(void)
V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
+ V_pf_limits[PF_LIMIT_ANCHORS].limit = PF_ANCHOR_HIWAT;
+ V_pf_limits[PF_LIMIT_ETH_ANCHORS].limit = PF_ANCHOR_HIWAT;
RB_INIT(&V_pf_anchors);
pf_init_kruleset(&pf_main_ruleset);
@@ -4973,6 +4975,7 @@ DIOCCHANGEADDR_error:
goto fail;
}
PF_RULES_WLOCK();
+ io->pfrio_nadd = 0;
error = pfr_add_addrs(&io->pfrio_table, pfras,
io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
PFR_FLAG_USERIOCTL);
@@ -6441,19 +6444,14 @@ shutdown_pf(void)
int error = 0;
u_int32_t t[5];
char nn = '\0';
- struct pf_kanchor *anchor;
- struct pf_keth_anchor *eth_anchor;
+ struct pf_kanchor *anchor, *tmp_anchor;
+ struct pf_keth_anchor *eth_anchor, *tmp_eth_anchor;
int rs_num;
do {
/* Unlink rules of all user defined anchors */
- RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) {
- /* Wildcard based anchors may not have a respective
- * explicit anchor rule or they may be left empty
- * without rules. It leads to anchor.refcnt=0, and the
- * rest of the logic does not expect it. */
- if (anchor->refcnt == 0)
- anchor->refcnt = 1;
+ RB_FOREACH_SAFE(anchor, pf_kanchor_global, &V_pf_anchors,
+ tmp_anchor) {
for (rs_num = 0; rs_num < PF_RULESET_MAX; ++rs_num) {
if ((error = pf_begin_rules(&t[rs_num], rs_num,
anchor->path)) != 0) {
@@ -6471,14 +6469,8 @@ shutdown_pf(void)
}
/* Unlink rules of all user defined ether anchors */
- RB_FOREACH(eth_anchor, pf_keth_anchor_global,
- &V_pf_keth_anchors) {
- /* Wildcard based anchors may not have a respective
- * explicit anchor rule or they may be left empty
- * without rules. It leads to anchor.refcnt=0, and the
- * rest of the logic does not expect it. */
- if (eth_anchor->refcnt == 0)
- eth_anchor->refcnt = 1;
+ RB_FOREACH_SAFE(eth_anchor, pf_keth_anchor_global,
+ &V_pf_keth_anchors, tmp_eth_anchor) {
if ((error = pf_begin_eth(&t[0], eth_anchor->path))
!= 0) {
DPFPRINTF(PF_DEBUG_MISC, "%s: eth "
diff --git a/sys/netpfil/pf/pf_nl.c b/sys/netpfil/pf/pf_nl.c
index c5de1e84a287..09754359ec2d 100644
--- a/sys/netpfil/pf/pf_nl.c
+++ b/sys/netpfil/pf/pf_nl.c
@@ -2082,6 +2082,123 @@ pf_handle_clear_addrs(struct nlmsghdr *hdr, struct nl_pstate *npt)
return (error);
}
+TAILQ_HEAD(pfr_addrq, pfr_addr_item);
+struct nl_parsed_table_addrs {
+ struct pfr_table table;
+ uint32_t flags;
+ struct pfr_addr addrs[256];
+ size_t addr_count;
+ int nadd;
+ int ndel;
+};
+#define _OUT(_field) offsetof(struct pfr_addr, _field)
+static const struct nlattr_parser nla_p_pfr_addr[] = {
+ { .type = PFR_A_AF, .off = _OUT(pfra_af), .cb = nlattr_get_uint8 },
+ { .type = PFR_A_NET, .off = _OUT(pfra_net), .cb = nlattr_get_uint8 },
+ { .type = PFR_A_NOT, .off = _OUT(pfra_not), .cb = nlattr_get_bool },
+ { .type = PFR_A_ADDR, .off = _OUT(pfra_u), .cb = nlattr_get_in6_addr },
+};
+#undef _OUT
+NL_DECLARE_ATTR_PARSER(pfra_addr_parser, nla_p_pfr_addr);
+
+static int
+nlattr_get_pfr_addr(struct nlattr *nla, struct nl_pstate *npt, const void *arg,
+ void *target)
+{
+ struct nl_parsed_table_addrs *attrs = target;
+ struct pfr_addr addr = { 0 };
+ int error;
+
+ if (attrs->addr_count >= nitems(attrs->addrs))
+ return (E2BIG);
+
+ error = nlattr_get_nested(nla, npt, &pfra_addr_parser, &addr);
+ if (error != 0)
+ return (error);
+
+ memcpy(&attrs->addrs[attrs->addr_count], &addr, sizeof(addr));
+ attrs->addr_count++;
+
+ return (0);
+}
+
+NL_DECLARE_ATTR_PARSER(nested_table_parser, nla_p_table);
+
+#define _OUT(_field) offsetof(struct nl_parsed_table_addrs, _field)
+static const struct nlattr_parser nla_p_table_addr[] = {
+ { .type = PF_TA_TABLE, .off = _OUT(table), .arg = &nested_table_parser, .cb = nlattr_get_nested },
+ { .type = PF_TA_ADDR, .cb = nlattr_get_pfr_addr },
+ { .type = PF_TA_FLAGS, .off = _OUT(flags), .cb = nlattr_get_uint32 },
+};
+NL_DECLARE_PARSER(table_addr_parser, struct genlmsghdr, nlf_p_empty, nla_p_table_addr);
+#undef _OUT
+
+static int
+pf_handle_table_add_addrs(struct nlmsghdr *hdr, struct nl_pstate *npt)
+{
+ struct nl_parsed_table_addrs attrs = { 0 };
+ struct nl_writer *nw = npt->nw;
+ struct genlmsghdr *ghdr_new;
+ int error;
+
+ error = nl_parse_nlmsg(hdr, &table_addr_parser, npt, &attrs);
+ if (error != 0)
+ return (error);
+
+ PF_RULES_WLOCK();
+ error = pfr_add_addrs(&attrs.table, &attrs.addrs[0],
+ attrs.addr_count, &attrs.nadd, attrs.flags | PFR_FLAG_USERIOCTL);
+ PF_RULES_WUNLOCK();
+
+ if (!nlmsg_reply(nw, hdr, sizeof(struct genlmsghdr)))
+ return (ENOMEM);
+
+ ghdr_new = nlmsg_reserve_object(nw, struct genlmsghdr);
+ ghdr_new->cmd = PFNL_CMD_TABLE_ADD_ADDR;
+ ghdr_new->version = 0;
+ ghdr_new->reserved = 0;
+
+ nlattr_add_u32(nw, PF_TA_NBR_ADDED, attrs.nadd);
+
+ if (!nlmsg_end(nw))
+ return (ENOMEM);
+
+ return (error);
+}
+
+static int
+pf_handle_table_del_addrs(struct nlmsghdr *hdr, struct nl_pstate *npt)
+{
+ struct nl_parsed_table_addrs attrs = { 0 };
+ struct nl_writer *nw = npt->nw;
+ struct genlmsghdr *ghdr_new;
+ int error;
+
+ error = nl_parse_nlmsg(hdr, &table_addr_parser, npt, &attrs);
+ if (error != 0)
+ return (error);
+
+ PF_RULES_WLOCK();
+ error = pfr_del_addrs(&attrs.table, &attrs.addrs[0],
+ attrs.addr_count, &attrs.ndel, attrs.flags | PFR_FLAG_USERIOCTL);
+ PF_RULES_WUNLOCK();
+
+ if (!nlmsg_reply(nw, hdr, sizeof(struct genlmsghdr)))
+ return (ENOMEM);
+
+ ghdr_new = nlmsg_reserve_object(nw, struct genlmsghdr);
+ ghdr_new->cmd = PFNL_CMD_TABLE_DEL_ADDR;
+ ghdr_new->version = 0;
+ ghdr_new->reserved = 0;
+
+ nlattr_add_u32(nw, PF_TA_NBR_DELETED, attrs.ndel);
+
+ if (!nlmsg_end(nw))
+ return (ENOMEM);
+
+ return (error);
+}
+
static const struct nlhdr_parser *all_parsers[] = {
&state_parser,
&addrule_parser,
@@ -2096,6 +2213,7 @@ static const struct nlhdr_parser *all_parsers[] = {
&add_addr_parser,
&ruleset_parser,
&table_parser,
+ &table_addr_parser,
};
static uint16_t family_id;
@@ -2318,6 +2436,20 @@ static const struct genl_cmd pf_cmds[] = {
.cmd_flags = GENL_CMD_CAP_DO | GENL_CMD_CAP_HASPOL,
.cmd_priv = PRIV_NETINET_PF,
},
+ {
+ .cmd_num = PFNL_CMD_TABLE_ADD_ADDR,
+ .cmd_name = "TABLE_ADD_ADDRS",
+ .cmd_cb = pf_handle_table_add_addrs,
+ .cmd_flags = GENL_CMD_CAP_DO | GENL_CMD_CAP_HASPOL,
+ .cmd_priv = PRIV_NETINET_PF,
+ },
+ {
+ .cmd_num = PFNL_CMD_TABLE_DEL_ADDR,
+ .cmd_name = "TABLE_DEL_ADDRS",
+ .cmd_cb = pf_handle_table_del_addrs,
+ .cmd_flags = GENL_CMD_CAP_DO | GENL_CMD_CAP_HASPOL,
+ .cmd_priv = PRIV_NETINET_PF,
+ },
};
void
diff --git a/sys/netpfil/pf/pf_nl.h b/sys/netpfil/pf/pf_nl.h
index d263a0b22deb..87daac393821 100644
--- a/sys/netpfil/pf/pf_nl.h
+++ b/sys/netpfil/pf/pf_nl.h
@@ -67,6 +67,8 @@ enum {
PFNL_CMD_GET_TSTATS = 29,
PFNL_CMD_CLR_TSTATS = 30,
PFNL_CMD_CLR_ADDRS = 31,
+ PFNL_CMD_TABLE_ADD_ADDR = 32,
+ PFNL_CMD_TABLE_DEL_ADDR = 33,
__PFNL_CMD_MAX,
};
#define PFNL_CMD_MAX (__PFNL_CMD_MAX -1)
@@ -461,6 +463,23 @@ enum pf_tstats_t {
PF_TS_NZERO = 9, /* u64 */
};
+enum pfr_addr_t {
+ PFR_A_UNSPEC,
+ PFR_A_AF = 1, /* uint8_t */
+ PFR_A_NET = 2, /* uint8_t */
+ PFR_A_NOT = 3, /* bool */
+ PFR_A_ADDR = 4, /* in6_addr */
+};
+
+enum pf_table_addrs_t {
+ PF_TA_UNSPEC,
+ PF_TA_TABLE = 1, /* nested, pf_table_t */
+ PF_TA_ADDR = 2, /* nested, pfr_addr_t */
+ PF_TA_FLAGS = 3, /* u32 */
+ PF_TA_NBR_ADDED = 4, /* u32 */
+ PF_TA_NBR_DELETED = 5, /* u32 */
+};
+
#ifdef _KERNEL
void pf_nl_register(void);
diff --git a/sys/netpfil/pf/pf_norm.c b/sys/netpfil/pf/pf_norm.c
index 8cea9557633c..a684d778ab42 100644
--- a/sys/netpfil/pf/pf_norm.c
+++ b/sys/netpfil/pf/pf_norm.c
@@ -118,6 +118,8 @@ VNET_DEFINE_STATIC(uma_zone_t, pf_frnode_z);
#define V_pf_frnode_z VNET(pf_frnode_z)
VNET_DEFINE_STATIC(uma_zone_t, pf_frag_z);
#define V_pf_frag_z VNET(pf_frag_z)
+VNET_DEFINE(uma_zone_t, pf_anchor_z);
+VNET_DEFINE(uma_zone_t, pf_eth_anchor_z);
TAILQ_HEAD(pf_fragqueue, pf_fragment);
TAILQ_HEAD(pf_cachequeue, pf_fragment);
diff --git a/sys/netpfil/pf/pf_ruleset.c b/sys/netpfil/pf/pf_ruleset.c
index 43b51f2933f4..039908a53126 100644
--- a/sys/netpfil/pf/pf_ruleset.c
+++ b/sys/netpfil/pf/pf_ruleset.c
@@ -238,7 +238,7 @@ pf_create_kanchor(struct pf_kanchor *parent, const char *aname)
((parent != NULL) && (strlen(parent->path) >= PF_ANCHOR_MAXPATH)))
return (NULL);
- anchor = rs_malloc(sizeof(*anchor));
+ anchor = uma_zalloc(V_pf_anchor_z, M_NOWAIT | M_ZERO);
if (anchor == NULL)
return (NULL);
@@ -259,7 +259,7 @@ pf_create_kanchor(struct pf_kanchor *parent, const char *aname)
printf("%s: RB_INSERT1 "
"'%s' '%s' collides with '%s' '%s'\n", __func__,
anchor->path, anchor->name, dup->path, dup->name);
- rs_free(anchor);
+ uma_zfree(V_pf_anchor_z, anchor);
return (NULL);
}
@@ -273,7 +273,7 @@ pf_create_kanchor(struct pf_kanchor *parent, const char *aname)
anchor->name, dup->path, dup->name);
RB_REMOVE(pf_kanchor_global, &V_pf_anchors,
anchor);
- rs_free(anchor);
+ uma_zfree(V_pf_anchor_z, anchor);
return (NULL);
}
}
@@ -350,7 +350,7 @@ pf_remove_if_empty_kruleset(struct pf_kruleset *ruleset)
if ((parent = ruleset->anchor->parent) != NULL)
RB_REMOVE(pf_kanchor_node, &parent->children,
ruleset->anchor);
- rs_free(ruleset->anchor);
+ uma_zfree(V_pf_anchor_z, ruleset->anchor);
if (parent == NULL)
return;
ruleset = &parent->ruleset;
@@ -613,7 +613,7 @@ pf_find_or_create_keth_ruleset(const char *path)
rs_free(p);
return (NULL);
}
- anchor = (struct pf_keth_anchor *)rs_malloc(sizeof(*anchor));
+ anchor = uma_zalloc(V_pf_eth_anchor_z, M_NOWAIT | M_ZERO);
if (anchor == NULL) {
rs_free(p);
return (NULL);
@@ -631,7 +631,7 @@ pf_find_or_create_keth_ruleset(const char *path)
printf("%s: RB_INSERT1 "
"'%s' '%s' collides with '%s' '%s'\n", __func__,
anchor->path, anchor->name, dup->path, dup->name);
- rs_free(anchor);
+ uma_zfree(V_pf_eth_anchor_z, anchor);
rs_free(p);
return (NULL);
}
@@ -645,7 +645,7 @@ pf_find_or_create_keth_ruleset(const char *path)
anchor->name, dup->path, dup->name);
RB_REMOVE(pf_keth_anchor_global, &V_pf_keth_anchors,
anchor);
- rs_free(anchor);
+ uma_zfree(V_pf_eth_anchor_z, anchor);
rs_free(p);
return (NULL);
}
@@ -754,7 +754,7 @@ pf_remove_if_empty_keth_ruleset(struct pf_keth_ruleset *ruleset)
if ((parent = ruleset->anchor->parent) != NULL)
RB_REMOVE(pf_keth_anchor_node, &parent->children,
ruleset->anchor);
- rs_free(ruleset->anchor);
+ uma_zfree(V_pf_eth_anchor_z, ruleset->anchor);
if (parent == NULL)
return;
ruleset = &parent->ruleset;
diff --git a/sys/netpfil/pf/pf_table.c b/sys/netpfil/pf/pf_table.c
index ecc185f89ad7..73ec18fa7646 100644
--- a/sys/netpfil/pf/pf_table.c
+++ b/sys/netpfil/pf/pf_table.c
@@ -294,7 +294,7 @@ pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
else
pfr_destroy_kentries(&workq);
if (nadd != NULL)
- *nadd = xadd;
+ *nadd += xadd;
pfr_destroy_ktable(tmpkt, 0);
return (0);
_bad: