summaryrefslogtreecommitdiffstats
path: root/src/lib.c
diff options
context:
space:
mode:
authorPablo Neira Ayuso <pablo@netfilter.org>2025-06-04 23:12:30 +0200
committerPablo Neira Ayuso <pablo@netfilter.org>2025-06-05 00:03:59 +0200
commit009cabe369b71cf5452286fd338eae382718789a (patch)
tree9f14eb626cee252ddf79d34cda9b98b57f5a9454 /src/lib.c
initial commitHEADmaster
knft is a tool to improve test coverage for the low-level nftables kernel API by providing a relatively simple way to define a transaction batch with nftables objects without having to mingle with netlink. A set of tests 612 test (.t) files are included. knft also provides a rudimentary deterministic fuzzer (via -f option) along with several fuzzing modes that mangle existing tests in different ways to improve coverage for error unwinding paths: deltable \ delbasechain \ delchain \ delrule | - delete object in this batch delset / delelem / delobj / flushset - flush set dup - duplicate object reverse-commit - turn commit into abort reverse-abort - turn abort into commit table-dormant - inject table dormant flag table-wakeup - inject table wake-up flag swap - swap objects bogus - inject bogus object to make the transaction fail To inspect how the selected fuzzing mode mangles the test, you can use the -d option to enable debugging along with -c to run it in dry-run mode, eg. # src/./knft -c -f deltable -d tests/expr/meta/03-mark_ok.t tests/expr/meta/03-mark_ok.t... [FUZZING] tests/expr/meta/03-mark_ok.t (deltable) >>>> fuzz_loop at index 0 in state=0 add_table(NFPROTO_IPV4, "test", NULL, NULL, NULL); del_table(NFPROTO_IPV4, "test", NULL); add_chain("test", NULL, NULL, NULL, NULL); add_rule("test", "0x1", NULL, NULL, NULL); meta(NULL, "NFT_REG32_15", "3"); cmp("NFT_REG32_15", "0", "ffffffff"); commit(); <<<< fuzz_loop backtrack STACK limit reached ==== still more tries at index 0 in state=0 add_table(NFPROTO_IPV4, "test", NULL, NULL, NULL); add_chain("test", NULL, NULL, NULL, NULL); del_table(NFPROTO_IPV4, "test", NULL); add_rule("test", "0x1", NULL, NULL, NULL); meta(NULL, "NFT_REG32_15", "3"); cmp("NFT_REG32_15", "0", "ffffffff"); commit(); <<<< fuzz_loop backtrack STACK limit reached ... knft provides a few more options: -e to display the error reported by the kernel. -n to perform test runs without flushing the existing ruleset. This tool requires libmnl to build and to parse the netlink messages that are sent and received by the kernel. This tool is released under the GPLv2 (or any later). This project is funded through the NGI0 Entrust established by NLnet (https://nlnet.nl) with support from the European Commission's Next Generation Internet programme.
Diffstat (limited to 'src/lib.c')
-rw-r--r--src/lib.c2404
1 files changed, 2404 insertions, 0 deletions
diff --git a/src/lib.c b/src/lib.c
new file mode 100644
index 0000000..05bc188
--- /dev/null
+++ b/src/lib.c
@@ -0,0 +1,2404 @@
+/*
+ * (C) 2024-2025 by Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/* Funded through the NGI0 Entrust established by NLnet (https://nlnet.nl)
+ * with support from the European Commission's Next Generation Internet
+ * programme.
+ */
+
+#include <stdlib.h>
+#include <sys/select.h>
+#include <sys/time.h>
+#include <string.h>
+#include <time.h>
+#include <errno.h>
+#include <arpa/inet.h>
+#include <libmnl/libmnl.h>
+#include <libnftnl/common.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <linux/netfilter/nf_tables_compat.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nfnetlink_hook.h>
+#include <limits.h>
+#include "lib.h"
+#include "test.h"
+
+static struct mnl_socket *__setup_socket(struct test_batch *batch)
+{
+ unsigned int newbuffsiz;
+ struct mnl_socket *nl;
+
+ nl = mnl_socket_open(NETLINK_NETFILTER);
+ if (nl == NULL) {
+ perror("mnl_socket_open");
+ exit(EXIT_FAILURE);
+ }
+
+ if (mnl_socket_bind(nl, 0, MNL_SOCKET_AUTOPID) < 0) {
+ perror("mnl_socket_bind");
+ exit(EXIT_FAILURE);
+ }
+
+ newbuffsiz = sizeof(batch->buf);
+
+ /* Rise sender buffer length to avoid hitting -EMSGSIZE */
+ setsockopt(mnl_socket_get_fd(nl), SOL_SOCKET, SO_SNDBUF,
+ &newbuffsiz, sizeof(socklen_t));
+
+ /* unpriviledged containers check for CAP_NET_ADMIN on the init_user_ns. */
+ setsockopt(mnl_socket_get_fd(nl), SOL_SOCKET, SO_SNDBUFFORCE,
+ &newbuffsiz, sizeof(socklen_t));
+
+ return nl;
+}
+
+static struct mnl_nlmsg_batch *__setup_batch(char *buf, uint32_t bufsiz)
+{
+ struct mnl_nlmsg_batch *batch;
+
+ batch = mnl_nlmsg_batch_start(buf, bufsiz);
+ nftnl_batch_begin(mnl_nlmsg_batch_current(batch), 0);
+ mnl_nlmsg_batch_next(batch);
+
+ return batch;
+}
+
+void setup_batch(struct test_batch *batch)
+{
+ INIT_LIST_HEAD(&batch->cmd);
+ batch->batch = __setup_batch(batch->buf, sizeof(batch->buf));
+ batch->nl = __setup_socket(batch);
+}
+
+static void add_cmd(struct test_batch *batch, enum test_batch_type type, void *obj)
+{
+ struct test_batch_cmd *cmd;
+
+ cmd = calloc(1, sizeof(struct test_batch_cmd));
+ if (!cmd)
+ return;
+
+ cmd->type = type;
+ cmd->obj = obj;
+ cmd->lineno = batch->lineno;
+ list_add_tail(&cmd->list, &batch->cmd);
+}
+
+static void setup_table_ctx(struct test_batch *batch,
+ const struct table *table)
+{
+ if (table) {
+ if (table->nfproto)
+ batch->ctx.family = *table->nfproto;
+ if (table->name)
+ batch->ctx.table = table->name;
+ } else {
+ batch->ctx.family = NFPROTO_UNSPEC;
+ batch->ctx.table = NULL;
+ }
+}
+
+static void setup_batch_ctx(struct test_batch *batch,
+ struct rule *rule,
+ struct set *set)
+{
+ batch->ctx.rule = rule;
+ batch->ctx.set = set;
+}
+
+void free_table(struct table *table)
+{
+ free((void *)table->userdata);
+ free((void *)table->name);
+ free(table);
+}
+
+void add_table(struct test_batch *batch, struct table *table)
+{
+ add_cmd(batch, ADD_TABLE, table);
+ setup_table_ctx(batch, table);
+}
+
+void set_table(struct test_batch *batch, struct table *table)
+{
+ add_cmd(batch, SET_TABLE, table);
+ setup_table_ctx(batch, table);
+}
+
+void del_table(struct test_batch *batch, struct table *table)
+{
+ add_cmd(batch, DEL_TABLE, table);
+}
+
+void free_chain(struct chain *chain)
+{
+ free((void *)chain->table);
+ free((void *)chain->name);
+ free((void *)chain->type);
+ free((void *)chain->dev);
+ array_free(chain->dev_array);
+ free((void *)chain->userdata);
+ free(chain);
+}
+
+void add_chain(struct test_batch *batch, struct chain *chain)
+{
+ if (batch->ctx.table) {
+ chain->table = strdup(batch->ctx.table);
+ *chain->family = batch->ctx.family;
+ }
+
+ add_cmd(batch, ADD_CHAIN, chain);
+ setup_batch_ctx(batch, NULL, NULL);
+}
+
+struct array *array_alloc(void)
+{
+ return calloc(1, sizeof(struct array));
+}
+
+bool array_add(struct array *array, void *data)
+{
+ if (array->num >= ARRAY_MAX)
+ return false;
+
+ array->data[array->num++] = data;
+
+ return true;
+}
+
+void array_free(struct array *array)
+{
+ int i;
+
+ if (!array)
+ return;
+
+ for (i = 0; i < array->num; i++)
+ free(array->data[i]);
+
+ free(array);
+}
+
+void add_basechain(struct test_batch *batch, struct chain *chain)
+{
+ if (batch->ctx.table) {
+ chain->table = strdup(batch->ctx.table);
+ *chain->family = batch->ctx.family;
+ }
+
+ add_cmd(batch, ADD_BASECHAIN, chain);
+ setup_batch_ctx(batch, NULL, NULL);
+}
+
+void del_basechain(struct test_batch *batch, struct chain *chain)
+{
+ if (batch->ctx.table) {
+ chain->table = strdup(batch->ctx.table);
+ *chain->family = batch->ctx.family;
+ }
+
+ add_cmd(batch, DEL_BASECHAIN, chain);
+}
+
+void free_flowtable(struct flowtable *flowtable)
+{
+ free((void *)flowtable->table);
+ free((void *)flowtable->name);
+ array_free(flowtable->dev_array);
+ free(flowtable);
+}
+
+void add_flowtable(struct test_batch *batch, struct flowtable *flowtable)
+{
+ if (batch->ctx.table) {
+ flowtable->table = strdup(batch->ctx.table);
+ *flowtable->family = batch->ctx.family;
+ }
+
+ add_cmd(batch, ADD_FLOWTABLE, flowtable);
+ setup_batch_ctx(batch, NULL, NULL);
+}
+
+void del_flowtable(struct test_batch *batch, struct flowtable *flowtable)
+{
+ if (batch->ctx.table) {
+ flowtable->table = strdup(batch->ctx.table);
+ *flowtable->family = batch->ctx.family;
+ }
+
+ add_cmd(batch, DEL_FLOWTABLE, flowtable);
+}
+
+void free_expr_list(struct list_head *expr_list)
+{
+ struct expr *expr, *next;
+
+ list_for_each_entry_safe(expr, next, expr_list, list) {
+ switch (expr->type) {
+ case BITWISE:
+ free_bitwise((struct bitwise *)expr);
+ break;
+ case BYTEORDER:
+ free_byteorder((struct byteorder *)expr);
+ break;
+ case CMP:
+ free_cmp((struct cmp *)expr);
+ break;
+ case COUNTER:
+ free_counter((struct counter *)expr);
+ break;
+ case CONNLIMIT:
+ free_connlimit((struct connlimit *)expr);
+ break;
+ case CT:
+ free_ct((struct ct *)expr);
+ break;
+ case DUP:
+ free_dup((struct dup *)expr);
+ break;
+ case DYNSET:
+ free_dynset((struct dynset *)expr);
+ break;
+ case EXTHDR:
+ free_exthdr((struct exthdr *)expr);
+ break;
+ case FIB:
+ free_fib((struct fib *)expr);
+ break;
+ case FWD:
+ free_fwd((struct fwd *)expr);
+ break;
+ case HASH:
+ free_hash((struct hash *)expr);
+ break;
+ case INNER:
+ free_inner((struct inner *)expr);
+ break;
+ case IMMEDIATE:
+ free_immediate((struct immediate *)expr);
+ break;
+ case LAST:
+ free_last((struct last *)expr);
+ break;
+ case LIMIT:
+ free_limit((struct limit *)expr);
+ break;
+ case LOG:
+ free_log((struct log *)expr);
+ break;
+ case LOOKUP:
+ free_lookup((struct lookup *)expr);
+ break;
+ case MASQ:
+ free_masq((struct masq *)expr);
+ break;
+ case MATCH:
+ free_match((struct match *)expr);
+ break;
+ case META:
+ free_meta((struct meta *)expr);
+ break;
+ case NAT:
+ free_nat((struct nat *)expr);
+ break;
+ case NUMGEN:
+ free_numgen((struct numgen *)expr);
+ break;
+ case OBJREF:
+ free_objref((struct objref *)expr);
+ break;
+ case OSF:
+ free_osf((struct osf *)expr);
+ break;
+ case PAYLOAD:
+ free_payload((struct payload *)expr);
+ break;
+ case QUEUE:
+ free_queue((struct queue *)expr);
+ break;
+ case QUOTA:
+ free_quota((struct quota *)expr);
+ break;
+ case RANGE:
+ free_range((struct range *)expr);
+ break;
+ case REDIR:
+ free_redir((struct redir *)expr);
+ break;
+ case REJECT:
+ free_reject((struct reject *)expr);
+ break;
+ case RT:
+ free_rt((struct rt *)expr);
+ break;
+ case SOCKET:
+ free_socket((struct socket *)expr);
+ break;
+ case SYNPROXY:
+ free_synproxy((struct synproxy *)expr);
+ break;
+ case TARGET:
+ free_target((struct target *)expr);
+ break;
+ case TPROXY:
+ free_tproxy((struct tproxy *)expr);
+ break;
+ case TUNNEL:
+ free_tunnel((struct tunnel *)expr);
+ break;
+ case XFRM:
+ free_xfrm((struct xfrm *)expr);
+ break;
+ }
+ }
+}
+
+void free_rule(struct rule *rule)
+{
+ free_expr_list(&rule->expr_list);
+ free((void *)rule->table);
+ free((void *)rule->chain);
+ free((void *)rule->userdata);
+ free(rule);
+}
+
+void add_rule(struct test_batch *batch, struct rule *rule)
+{
+ if (batch->ctx.table) {
+ rule->table = strdup(batch->ctx.table);
+ *rule->family = batch->ctx.family;
+ }
+
+ add_cmd(batch, ADD_RULE, rule);
+ setup_batch_ctx(batch, rule, NULL);
+}
+
+int add_expr(struct test_batch *batch, struct expr *expr)
+{
+ if (batch->ctx.expr_list) {
+ list_add_tail(&expr->list, batch->ctx.expr_list);
+ batch->ctx.expr_list = NULL;
+ } else if (batch->ctx.rule) {
+ list_add_tail(&expr->list, &batch->ctx.rule->expr_list);
+ } else if (batch->ctx.set) {
+ list_add_tail(&expr->list, &batch->ctx.set->expr_list);
+ } else {
+ return -1;
+ }
+
+ return 0;
+}
+
+void free_obj(struct obj *obj)
+{
+ free((void *)obj->table);
+ free((void *)obj->name);
+ free((void *)obj->userdata);
+
+ if (obj->type) {
+ switch (*obj->type) {
+ case NFT_OBJECT_COUNTER:
+ free(obj->u.counter);
+ break;
+ case NFT_OBJECT_QUOTA:
+ free(obj->u.quota);
+ break;
+ case NFT_OBJECT_LIMIT:
+ free(obj->u.limit);
+ break;
+ case NFT_OBJECT_CONNLIMIT:
+ free(obj->u.connlimit);
+ break;
+ case NFT_OBJECT_TUNNEL:
+ free(obj->u.tun);
+ break;
+ case NFT_OBJECT_CT_EXPECT:
+ free(obj->u.ct_expect);
+ break;
+ case NFT_OBJECT_SYNPROXY:
+ free(obj->u.synproxy);
+ break;
+ case NFT_OBJECT_CT_HELPER:
+ free((void *)obj->u.ct_helper->name);
+ free(obj->u.ct_helper);
+ break;
+ case NFT_OBJECT_CT_TIMEOUT:
+ array_u32_free(obj->u.ct_timeout->timeout_array);
+ free(obj->u.ct_timeout);
+ break;
+ case NFT_OBJECT_SECMARK:
+ free((void *)obj->u.secmark->ctx);
+ free(obj->u.secmark);
+ break;
+ }
+ }
+
+ free(obj);
+}
+
+void add_obj(struct test_batch *batch, struct obj *obj)
+{
+ if (batch->ctx.table) {
+ obj->table = strdup(batch->ctx.table);
+ *obj->family = batch->ctx.family;
+ }
+
+ add_cmd(batch, ADD_OBJECT, obj);
+ setup_batch_ctx(batch, NULL, NULL);
+}
+
+void del_obj(struct test_batch *batch, struct obj *obj)
+{
+ if (batch->ctx.table) {
+ obj->table = strdup(batch->ctx.table);
+ *obj->family = batch->ctx.family;
+ }
+
+ add_cmd(batch, DEL_OBJECT, obj);
+}
+
+struct array_u8 *array_u8_alloc(void)
+{
+ return calloc(1, sizeof(struct array_u8));
+}
+
+bool array_u8_add(struct array_u8 *array, const char *value)
+{
+ uint32_t val;
+ char *endptr;
+
+ if (array->num >= ARRAY_MAX)
+ return false;
+
+ val = strtoul(value, &endptr, 0);
+ if (!endptr)
+ return false;
+
+ if (val > UINT16_MAX)
+ return false;
+
+ array->data[array->num++] = val;
+ free((void *)value);
+
+ return true;
+}
+
+void array_u8_free(struct array_u8 *array)
+{
+ free(array);
+}
+
+struct array_u32 *array_u32_alloc(void)
+{
+ return calloc(1, sizeof(struct array_u32));
+}
+
+bool array_u32_add(struct array_u32 *array, const char *value)
+{
+ uint32_t val;
+ char *endptr;
+
+ if (array->num >= ARRAY_MAX)
+ return false;
+
+ val = strtoul(value, &endptr, 0);
+ if (!endptr)
+ return false;
+
+ if (val > UINT16_MAX)
+ return false;
+
+ array->data[array->num++] = val;
+ free((void *)value);
+
+ return true;
+}
+
+void array_u32_free(struct array_u32 *array)
+{
+ free(array);
+}
+
+void free_set(struct set *set)
+{
+ free_expr_list(&set->expr_list);
+ free((void *)set->userdata);
+ free((void *)set->table);
+ free((void *)set->name);
+ array_u8_free(set->field_array);
+ free(set);
+}
+
+void add_set(struct test_batch *batch, struct set *set)
+{
+ if (batch->ctx.table) {
+ set->table = strdup(batch->ctx.table);
+ *set->family = batch->ctx.family;
+ }
+
+ add_cmd(batch, ADD_SET, set);
+ setup_batch_ctx(batch, NULL, set);
+}
+
+void set_set(struct test_batch *batch, struct set *set)
+{
+ if (batch->ctx.table) {
+ set->table = strdup(batch->ctx.table);
+ *set->family = batch->ctx.family;
+ }
+
+ add_cmd(batch, SET_SET, set);
+ setup_batch_ctx(batch, NULL, set);
+}
+
+void free_elem(struct elem *elem)
+{
+ free((void *)elem->table);
+ free((void *)elem->set);
+ free((void *)elem->chain);
+ free((void *)elem->objname);
+ free((void *)elem->userdata);
+ free(elem);
+}
+
+void add_elem(struct test_batch *batch, struct elem *elem)
+{
+ if (batch->ctx.table) {
+ elem->table = strdup(batch->ctx.table);
+ *elem->family = batch->ctx.family;
+ }
+ if (batch->ctx.set) {
+ if (batch->ctx.set->name)
+ elem->set = strdup(batch->ctx.set->name);
+ if (batch->ctx.set->set_id)
+ elem->set_id = *batch->ctx.set->set_id;
+ }
+
+ add_cmd(batch, ADD_SETELEM, elem);
+}
+
+void del_elem(struct test_batch *batch, struct elem *elem)
+{
+ if (batch->ctx.table) {
+ elem->table = strdup(batch->ctx.table);
+ *elem->family = batch->ctx.family;
+ }
+ if (batch->ctx.set) {
+ if (batch->ctx.set->name)
+ elem->set = strdup(batch->ctx.set->name);
+ if (batch->ctx.set->set_id)
+ elem->set_id = *batch->ctx.set->set_id;
+ }
+
+ add_cmd(batch, DEL_SETELEM, elem);
+}
+
+static uint32_t get_nfproto(uint32_t *nfproto)
+{
+ return nfproto ? *nfproto : NFPROTO_UNSPEC;
+}
+
+static void build_table(struct test_batch *batch, struct test_batch_cmd *cmd)
+{
+ struct table *table = cmd->obj;
+ struct nlmsghdr *nlh;
+
+ nlh = nftnl_nlmsg_build_hdr(mnl_nlmsg_batch_current(batch->batch),
+ NFT_MSG_NEWTABLE, get_nfproto(table->nfproto),
+ NLM_F_CREATE, cmd->lineno);
+
+ if (table->name)
+ mnl_attr_put_strz(nlh, NFTA_TABLE_NAME, table->name);
+ if (table->handle)
+ mnl_attr_put_u64(nlh, NFTA_TABLE_HANDLE, htobe64(*table->handle));
+ if (table->flags)
+ mnl_attr_put_u32(nlh, NFTA_TABLE_FLAGS, htonl(*table->flags));
+ if (table->userdata)
+ mnl_attr_put(nlh, NFTA_TABLE_USERDATA, strlen(table->userdata), table->userdata);
+
+ mnl_nlmsg_batch_next(batch->batch);
+}
+
+static void build_del_table(struct test_batch *batch, struct test_batch_cmd *cmd)
+{
+ struct table *table = cmd->obj;
+ struct nlmsghdr *nlh;
+
+ nlh = nftnl_nlmsg_build_hdr(mnl_nlmsg_batch_current(batch->batch),
+ NFT_MSG_DELTABLE, get_nfproto(table->nfproto),
+ 0, cmd->lineno);
+ if (table->name)
+ mnl_attr_put_strz(nlh, NFTA_TABLE_NAME, table->name);
+ if (table->handle)
+ mnl_attr_put_u64(nlh, NFTA_TABLE_HANDLE, htobe64(*table->handle));
+ mnl_nlmsg_batch_next(batch->batch);
+}
+
+static void __build_chain(struct nlmsghdr *nlh, struct chain *chain)
+{
+ struct nlattr *nest;
+
+ if (chain->table)
+ mnl_attr_put_strz(nlh, NFTA_CHAIN_TABLE, chain->table);
+ if (chain->name)
+ mnl_attr_put_strz(nlh, NFTA_CHAIN_NAME, chain->name);
+ if (chain->flags)
+ mnl_attr_put_u32(nlh, NFTA_CHAIN_FLAGS, htonl(*chain->flags));
+ if (chain->chain_id)
+ mnl_attr_put_u32(nlh, NFTA_CHAIN_ID, htonl(*chain->chain_id));
+ if (chain->userdata)
+ mnl_attr_put(nlh, NFTA_CHAIN_USERDATA, strlen(chain->userdata), chain->userdata);
+ if (chain->handle)
+ mnl_attr_put_u64(nlh, NFTA_CHAIN_HANDLE, be64toh(*chain->handle));
+
+ if (chain->hooknum || chain->prio || chain->dev || chain->dev_array)
+ nest = mnl_attr_nest_start(nlh, NFTA_CHAIN_HOOK);
+
+ if (chain->hooknum)
+ mnl_attr_put_u32(nlh, NFTA_HOOK_HOOKNUM, htonl(*chain->hooknum));
+ if (chain->prio)
+ mnl_attr_put_u32(nlh, NFTA_HOOK_PRIORITY, htonl(*chain->prio));
+ if (chain->dev)
+ mnl_attr_put_strz(nlh, NFTA_HOOK_DEV, chain->dev);
+ if (chain->dev_array) {
+ struct nlattr *nest_dev;
+ int i;
+
+ nest_dev = mnl_attr_nest_start(nlh, NFTA_HOOK_DEVS);
+ for (i = 0; i < chain->dev_array->num; i++)
+ mnl_attr_put_strz(nlh, NFTA_DEVICE_NAME,
+ chain->dev_array->data[i]);
+ mnl_attr_nest_end(nlh, nest_dev);
+ }
+
+ if (chain->hooknum || chain->prio || chain->dev || chain->dev_array)
+ mnl_attr_nest_end(nlh, nest);
+
+ if (chain->policy)
+ mnl_attr_put_u32(nlh, NFTA_CHAIN_POLICY, htonl(*chain->policy));
+ if (chain->type)
+ mnl_attr_put_strz(nlh, NFTA_CHAIN_TYPE, chain->type);
+
+ if (chain->bytes || chain->pkts)
+ nest = mnl_attr_nest_start(nlh, NFTA_CHAIN_COUNTERS);
+
+ if (chain->bytes)
+ mnl_attr_put_u64(nlh, NFTA_COUNTER_BYTES, be64toh(*chain->bytes));
+ if (chain->pkts)
+ mnl_attr_put_u64(nlh, NFTA_COUNTER_PACKETS, be64toh(*chain->pkts));
+
+ if (chain->bytes || chain->pkts)
+ mnl_attr_nest_end(nlh, nest);
+}
+
+static void build_chain(struct test_batch *batch, struct test_batch_cmd *cmd)
+{
+ struct chain *chain = cmd->obj;
+ struct nlmsghdr *nlh;
+
+ nlh = nftnl_nlmsg_build_hdr(mnl_nlmsg_batch_current(batch->batch),
+ NFT_MSG_NEWCHAIN, get_nfproto(chain->family),
+ NLM_F_CREATE, cmd->lineno);
+ __build_chain(nlh, chain);
+ mnl_nlmsg_batch_next(batch->batch);
+}
+
+static void build_bitwise(struct nlmsghdr *nlh, struct expr *expr)
+{
+ struct bitwise *bitwise = (struct bitwise *)expr;
+
+ if (bitwise->sreg)
+ mnl_attr_put_u32(nlh, NFTA_BITWISE_SREG, htonl(*bitwise->sreg));
+ if (bitwise->dreg)
+ mnl_attr_put_u32(nlh, NFTA_BITWISE_DREG, htonl(*bitwise->dreg));
+ if (bitwise->op)
+ mnl_attr_put_u32(nlh, NFTA_BITWISE_OP, htonl(*bitwise->op));
+ if (bitwise->len)
+ mnl_attr_put_u32(nlh, NFTA_BITWISE_LEN, htonl(*bitwise->len));
+ if (bitwise->mask_len) {
+ struct nlattr *nest;
+
+ nest = mnl_attr_nest_start(nlh, NFTA_BITWISE_MASK);
+ mnl_attr_put(nlh, NFTA_DATA_VALUE, bitwise->mask_len,
+ bitwise->mask);
+ mnl_attr_nest_end(nlh, nest);
+ }
+ if (bitwise->xor) {
+ struct nlattr *nest;
+
+ nest = mnl_attr_nest_start(nlh, NFTA_BITWISE_XOR);
+ mnl_attr_put(nlh, NFTA_DATA_VALUE, bitwise->xor_len,
+ bitwise->xor);
+ mnl_attr_nest_end(nlh, nest);
+ }
+ if (bitwise->data) {
+ struct nlattr *nest;
+
+ nest = mnl_attr_nest_start(nlh, NFTA_BITWISE_DATA);
+ mnl_attr_put(nlh, NFTA_DATA_VALUE, bitwise->data_len,
+ bitwise->data);
+ mnl_attr_nest_end(nlh, nest);
+ }
+}
+
+static void build_byteorder(struct nlmsghdr *nlh, struct expr *expr)
+{
+ struct byteorder *byteorder = (struct byteorder *)expr;
+
+ if (byteorder->sreg)
+ mnl_attr_put_u32(nlh, NFTA_BYTEORDER_SREG, htonl(*byteorder->sreg));
+ if (byteorder->dreg)
+ mnl_attr_put_u32(nlh, NFTA_BYTEORDER_DREG, htonl(*byteorder->dreg));
+ if (byteorder->op)
+ mnl_attr_put_u32(nlh, NFTA_BYTEORDER_OP, htonl(*byteorder->op));
+ if (byteorder->len)
+ mnl_attr_put_u32(nlh, NFTA_BYTEORDER_LEN, htonl(*byteorder->len));
+ if (byteorder->size)
+ mnl_attr_put_u32(nlh, NFTA_BYTEORDER_SIZE, htonl(*byteorder->size));
+}
+
+static void build_cmp(struct nlmsghdr *nlh, struct expr *expr)
+{
+ struct cmp *cmp = (struct cmp *)expr;
+
+ if (cmp->sreg)
+ mnl_attr_put_u32(nlh, NFTA_CMP_SREG, htonl(*cmp->sreg));
+ if (cmp->op)
+ mnl_attr_put_u32(nlh, NFTA_CMP_OP, htonl(*cmp->op));
+ if (cmp->data_len) {
+ struct nlattr *nest;
+
+ nest = mnl_attr_nest_start(nlh, NFTA_CMP_DATA);
+ mnl_attr_put(nlh, NFTA_DATA_VALUE, cmp->data_len, cmp->data);
+ mnl_attr_nest_end(nlh, nest);
+ }
+}
+
+static void build_connlimit(struct nlmsghdr *nlh, struct expr *expr)
+{
+ struct connlimit *connlimit = (struct connlimit *)expr;
+
+ if (connlimit->count) {
+ mnl_attr_put_u32(nlh, NFTA_CONNLIMIT_COUNT,
+ htonl(*connlimit->count));
+ }
+ if (connlimit->flags) {
+ mnl_attr_put_u32(nlh, NFTA_CONNLIMIT_FLAGS,
+ htonl(*connlimit->flags));
+ }
+}
+
+static void build_counter(struct nlmsghdr *nlh, struct expr *expr)
+{
+ struct counter *ctr = (struct counter *)expr;
+
+ if (ctr->bytes)
+ mnl_attr_put_u64(nlh, NFTA_COUNTER_BYTES, htobe64(*ctr->bytes));
+ if (ctr->pkts)
+ mnl_attr_put_u64(nlh, NFTA_COUNTER_PACKETS, htobe64(*ctr->pkts));
+}
+
+static void build_ct(struct nlmsghdr *nlh, struct expr *expr)
+{
+ struct ct *ct = (struct ct *)expr;
+
+ if (ct->key)
+ mnl_attr_put_u32(nlh, NFTA_CT_KEY, htonl(*ct->key));
+ if (ct->dreg)
+ mnl_attr_put_u32(nlh, NFTA_CT_DREG, htonl(*ct->dreg));
+ if (ct->dir)
+ mnl_attr_put_u8(nlh, NFTA_CT_DIRECTION, *ct->dir);
+ if (ct->sreg)
+ mnl_attr_put_u32(nlh, NFTA_CT_SREG, htonl(*ct->sreg));
+}
+
+static void build_dup(struct nlmsghdr *nlh, struct expr *expr)
+{
+ struct dup *dup = (struct dup *)expr;
+
+ if (dup->sreg_addr)
+ mnl_attr_put_u32(nlh, NFTA_DUP_SREG_ADDR, htonl(*dup->sreg_addr));
+ if (dup->sreg_dev)
+ mnl_attr_put_u32(nlh, NFTA_DUP_SREG_DEV, htonl(*dup->sreg_dev));
+}
+
+static void build_dynset(struct nlmsghdr *nlh, struct expr *expr)
+{
+ struct dynset *dynset = (struct dynset *)expr;
+ uint32_t num_exprs = 0;
+
+ if (dynset->sreg_key)
+ mnl_attr_put_u32(nlh, NFTA_DYNSET_SREG_KEY, htonl(*dynset->sreg_key));
+ if (dynset->sreg_data)
+ mnl_attr_put_u32(nlh, NFTA_DYNSET_SREG_DATA, htonl(*dynset->sreg_data));
+ if (dynset->op)
+ mnl_attr_put_u32(nlh, NFTA_DYNSET_OP, htonl(*dynset->op));
+ if (dynset->timeout)
+ mnl_attr_put_u64(nlh, NFTA_DYNSET_TIMEOUT, htobe64(*dynset->timeout));
+ if (dynset->set)
+ mnl_attr_put_strz(nlh, NFTA_DYNSET_SET_NAME, dynset->set);
+ if (dynset->set_id)
+ mnl_attr_put_u32(nlh, NFTA_DYNSET_SET_ID, htonl(*dynset->set_id));
+ if (dynset->flags)
+ mnl_attr_put_u32(nlh, NFTA_DYNSET_FLAGS, htonl(*dynset->flags));
+ if (!list_empty(&dynset->expr_list))
+ ; // TODO
+}
+
+static void build_exthdr(struct nlmsghdr *nlh, struct expr *expr)
+{
+ struct exthdr *exthdr = (struct exthdr *)expr;
+
+ if (exthdr->dreg)
+ mnl_attr_put_u32(nlh, NFTA_EXTHDR_DREG, htonl(*exthdr->dreg));
+ if (exthdr->sreg)
+ mnl_attr_put_u32(nlh, NFTA_EXTHDR_SREG, htonl(*exthdr->sreg));
+ if (exthdr->type)
+ mnl_attr_put_u8(nlh, NFTA_EXTHDR_TYPE, *exthdr->type);
+ if (exthdr->offset)
+ mnl_attr_put_u32(nlh, NFTA_EXTHDR_OFFSET, htonl(*exthdr->offset));
+ if (exthdr->len)
+ mnl_attr_put_u32(nlh, NFTA_EXTHDR_LEN, htonl(*exthdr->len));
+ if (exthdr->op)
+ mnl_attr_put_u32(nlh, NFTA_EXTHDR_OP, htonl(*exthdr->op));
+ if (exthdr->flags)
+ mnl_attr_put_u32(nlh, NFTA_EXTHDR_FLAGS, htonl(*exthdr->flags));
+}
+
+static void build_fib(struct nlmsghdr *nlh, struct expr *expr)
+{
+ struct fib *fib = (struct fib *)expr;
+
+ if (fib->flags)
+ mnl_attr_put_u32(nlh, NFTA_FIB_FLAGS, htonl(*fib->flags));
+ if (fib->result)
+ mnl_attr_put_u32(nlh, NFTA_FIB_RESULT, htonl(*fib->result));
+ if (fib->dreg)
+ mnl_attr_put_u32(nlh, NFTA_FIB_DREG, htonl(*fib->dreg));
+}
+
+static void build_fwd(struct nlmsghdr *nlh, struct expr *expr)
+{
+ struct fwd *fwd = (struct fwd *)expr;
+
+ if (fwd->sreg_dev)
+ mnl_attr_put_u32(nlh, NFTA_FWD_SREG_DEV, htonl(*fwd->sreg_dev));
+ if (fwd->sreg_addr)
+ mnl_attr_put_u32(nlh, NFTA_FWD_SREG_ADDR, htonl(*fwd->sreg_addr));
+ if (fwd->nfproto)
+ mnl_attr_put_u32(nlh, NFTA_FWD_NFPROTO, htonl(*fwd->nfproto));
+}
+
+static void build_hash(struct nlmsghdr *nlh, struct expr *expr)
+{
+ struct hash *hash = (struct hash *)expr;
+
+ if (hash->sreg)
+ mnl_attr_put_u32(nlh, NFTA_HASH_SREG, htonl(*hash->sreg));
+ if (hash->dreg)
+ mnl_attr_put_u32(nlh, NFTA_HASH_DREG, htonl(*hash->dreg));
+ if (hash->len)
+ mnl_attr_put_u32(nlh, NFTA_HASH_LEN, htonl(*hash->len));
+ if (hash->modulus)
+ mnl_attr_put_u32(nlh, NFTA_HASH_MODULUS, htonl(*hash->modulus));
+ if (hash->seed)
+ mnl_attr_put_u32(nlh, NFTA_HASH_SEED, htonl(*hash->seed));
+ if (hash->offset)
+ mnl_attr_put_u32(nlh, NFTA_HASH_OFFSET, htonl(*hash->offset));
+ if (hash->type)
+ mnl_attr_put_u32(nlh, NFTA_HASH_TYPE, htonl(*hash->type));
+}
+
+static void build_immediate(struct nlmsghdr *nlh, struct expr *expr)
+{
+ struct immediate *imm = (struct immediate *)expr;
+ struct nlattr *nest1, *nest2;
+
+ if (imm->dreg)
+ mnl_attr_put_u32(nlh, NFTA_IMMEDIATE_DREG, htonl(*imm->dreg));
+
+ /* Sane configurations allows you to set ONLY one of these two below */
+ if (imm->data_len) {
+ struct nlattr *nest;
+
+ nest = mnl_attr_nest_start(nlh, NFTA_IMMEDIATE_DATA);
+ mnl_attr_put(nlh, NFTA_DATA_VALUE, imm->data_len, imm->data);
+ mnl_attr_nest_end(nlh, nest);
+ }
+
+ if (imm->verdict || imm->chain) {
+ struct nlattr *nest1, *nest2;
+
+ nest1 = mnl_attr_nest_start(nlh, NFTA_IMMEDIATE_DATA);
+ nest2 = mnl_attr_nest_start(nlh, NFTA_DATA_VERDICT);
+ if (imm->verdict)
+ mnl_attr_put_u32(nlh, NFTA_VERDICT_CODE, htonl(*imm->verdict));
+ if (imm->chain)
+ mnl_attr_put_strz(nlh, NFTA_VERDICT_CHAIN, imm->chain);
+ if (imm->chain_id)
+ mnl_attr_put_u32(nlh, NFTA_VERDICT_CHAIN_ID, htonl(*imm->chain_id));
+
+ mnl_attr_nest_end(nlh, nest1);
+ mnl_attr_nest_end(nlh, nest2);
+ }
+}
+
+static void __build_expr(struct nlmsghdr *nlh, struct expr *expr);
+
+static void build_inner(struct nlmsghdr *nlh, struct expr *expr)
+{
+ struct inner *inner = (struct inner *)expr;
+ struct nlattr *nest;
+ struct expr *inner_expr;
+
+ if (inner->num)
+ mnl_attr_put_u32(nlh, NFTA_INNER_NUM, htonl(*inner->num));
+ if (inner->type)
+ mnl_attr_put_u32(nlh, NFTA_INNER_TYPE, htonl(*inner->type));
+ if (inner->flags)
+ mnl_attr_put_u32(nlh, NFTA_INNER_FLAGS, htonl(*inner->flags));
+ if (inner->hdrsize)
+ mnl_attr_put_u32(nlh, NFTA_INNER_HDRSIZE, htonl(*inner->hdrsize));
+ if (!list_empty(&inner->expr_list)) {
+ nest = mnl_attr_nest_start(nlh, NFTA_INNER_EXPR);
+ inner_expr = list_first_entry(&inner->expr_list, struct expr, list);
+ __build_expr(nlh, inner_expr);
+ mnl_attr_nest_end(nlh, nest);
+ }
+}
+
+static void build_last(struct nlmsghdr *nlh, struct expr *expr)
+{
+ struct last *last = (struct last *)expr;
+
+ if (last->msecs)
+ mnl_attr_put_u64(nlh, NFTA_LAST_MSECS, htobe64(*last->msecs));
+ if (last->set)
+ mnl_attr_put_u32(nlh, NFTA_LAST_SET, htonl(*last->set));
+}
+
+static void build_limit(struct nlmsghdr *nlh, struct expr *expr)
+{
+ struct limit *limit = (struct limit *)expr;
+
+ if (limit->rate)
+ mnl_attr_put_u64(nlh, NFTA_LIMIT_RATE, htobe64(*limit->rate));
+ if (limit->unit)
+ mnl_attr_put_u64(nlh, NFTA_LIMIT_UNIT, htobe64(*limit->unit));
+ if (limit->burst)
+ mnl_attr_put_u32(nlh, NFTA_LIMIT_BURST, htonl(*limit->burst));
+ if (limit->type)
+ mnl_attr_put_u32(nlh, NFTA_LIMIT_TYPE, htonl(*limit->type));
+ if (limit->flags)
+ mnl_attr_put_u32(nlh, NFTA_LIMIT_FLAGS, htonl(*limit->flags));
+}
+
+static void build_log(struct nlmsghdr *nlh, struct expr *expr)
+{
+ struct log *log = (struct log *)expr;
+
+ if (log->prefix)
+ mnl_attr_put_strz(nlh, NFTA_LOG_PREFIX, log->prefix);
+ if (log->group)
+ mnl_attr_put_u16(nlh, NFTA_LOG_GROUP, htons(*log->group));
+ if (log->snaplen)
+ mnl_attr_put_u32(nlh, NFTA_LOG_SNAPLEN, htonl(*log->snaplen));
+ if (log->qthreshold)
+ mnl_attr_put_u16(nlh, NFTA_LOG_QTHRESHOLD, htons(*log->qthreshold));
+ if (log->level)
+ mnl_attr_put_u32(nlh, NFTA_LOG_LEVEL, htonl(*log->level));
+ if (log->flags)
+ mnl_attr_put_u32(nlh, NFTA_LOG_FLAGS, htonl(*log->flags));
+}
+
+static void build_lookup(struct nlmsghdr *nlh, struct expr *expr)
+{
+ struct lookup *lookup = (struct lookup *)expr;
+
+ if (lookup->sreg)
+ mnl_attr_put_u32(nlh, NFTA_LOOKUP_SREG, htonl(*lookup->sreg));
+ if (lookup->dreg)
+ mnl_attr_put_u32(nlh, NFTA_LOOKUP_DREG, htonl(*lookup->dreg));
+ if (lookup->set)
+ mnl_attr_put_strz(nlh, NFTA_LOOKUP_SET, lookup->set);
+ if (lookup->set_id)
+ mnl_attr_put_u32(nlh, NFTA_LOOKUP_SET_ID, htonl(*lookup->set_id));
+ if (lookup->flags)
+ mnl_attr_put_u32(nlh, NFTA_LOOKUP_FLAGS, htonl(*lookup->flags));
+}
+
+static void build_masq(struct nlmsghdr *nlh, struct expr *expr)
+{
+ struct masq *masq = (struct masq *)expr;
+
+ if (masq->flags)
+ mnl_attr_put_u32(nlh, NFTA_MASQ_FLAGS, htobe32(*masq->flags));
+ if (masq->sreg_proto_min)
+ mnl_attr_put_u32(nlh, NFTA_MASQ_REG_PROTO_MIN,
+ htobe32(*masq->sreg_proto_min));
+ if (masq->sreg_proto_max)
+ mnl_attr_put_u32(nlh, NFTA_MASQ_REG_PROTO_MAX,
+ htobe32(*masq->sreg_proto_max));
+}
+
+static void build_match(struct nlmsghdr *nlh, struct expr *expr)
+{
+ struct match *mt = (struct match *)expr;
+
+ if (mt->name)
+ mnl_attr_put_strz(nlh, NFTA_MATCH_NAME, mt->name);
+ if (mt->rev)
+ mnl_attr_put_u32(nlh, NFTA_MATCH_REV, htonl(*mt->rev));
+ if (mt->data_len)
+ mnl_attr_put(nlh, NFTA_MATCH_INFO, mt->data_len, mt->data);
+}
+
+static void build_meta(struct nlmsghdr *nlh, struct expr *expr)
+{
+ struct meta *meta = (struct meta *)expr;
+
+ if (meta->key)
+ mnl_attr_put_u32(nlh, NFTA_META_KEY, htonl(*meta->key));
+ if (meta->dreg)
+ mnl_attr_put_u32(nlh, NFTA_META_DREG, htonl(*meta->dreg));
+ if (meta->sreg)
+ mnl_attr_put_u32(nlh, NFTA_META_SREG, htonl(*meta->sreg));
+}
+
+static void build_nat(struct nlmsghdr *nlh, struct expr *expr)
+{
+ struct nat *nat = (struct nat *)expr;
+
+ if (nat->type)
+ mnl_attr_put_u32(nlh, NFTA_NAT_TYPE, htonl(*nat->type));
+ if (nat->nfproto)
+ mnl_attr_put_u32(nlh, NFTA_NAT_FAMILY, htonl(*nat->nfproto));
+ if (nat->sreg_addr_min)
+ mnl_attr_put_u32(nlh, NFTA_NAT_REG_ADDR_MIN,
+ htonl(*nat->sreg_addr_min));
+ if (nat->sreg_addr_max)
+ mnl_attr_put_u32(nlh, NFTA_NAT_REG_ADDR_MAX,
+ htonl(*nat->sreg_addr_max));
+ if (nat->sreg_proto_min)
+ mnl_attr_put_u32(nlh, NFTA_NAT_REG_PROTO_MIN,
+ htonl(*nat->sreg_proto_min));
+ if (nat->sreg_proto_max)
+ mnl_attr_put_u32(nlh, NFTA_NAT_REG_PROTO_MAX,
+ htonl(*nat->sreg_proto_max));
+ if (nat->flags)
+ mnl_attr_put_u32(nlh, NFTA_NAT_FLAGS, htonl(*nat->flags));
+}
+
+static void build_numgen(struct nlmsghdr *nlh, struct expr *expr)
+{
+ struct numgen *ng = (struct numgen *)expr;
+
+ if (ng->dreg)
+ mnl_attr_put_u32(nlh, NFTA_NG_DREG, htonl(*ng->dreg));
+ if (ng->modulus)
+ mnl_attr_put_u32(nlh, NFTA_NG_MODULUS, htonl(*ng->modulus));
+ if (ng->type)
+ mnl_attr_put_u32(nlh, NFTA_NG_TYPE, htonl(*ng->type));
+ if (ng->offset)
+ mnl_attr_put_u32(nlh, NFTA_NG_OFFSET, htonl(*ng->offset));
+}
+
+static void build_objref(struct nlmsghdr *nlh, struct expr *expr)
+{
+ struct objref *objref = (struct objref *)expr;
+
+ if (objref->type)
+ mnl_attr_put_u32(nlh, NFTA_OBJREF_IMM_TYPE,
+ htonl(*objref->type));
+ if (objref->name)
+ mnl_attr_put_str(nlh, NFTA_OBJREF_IMM_NAME, objref->name);
+ if (objref->sreg)
+ mnl_attr_put_u32(nlh, NFTA_OBJREF_SET_SREG,
+ htonl(*objref->sreg));
+ if (objref->set_name)
+ mnl_attr_put_str(nlh, NFTA_OBJREF_SET_NAME, objref->set_name);
+ if (objref->set_id)
+ mnl_attr_put_u32(nlh, NFTA_OBJREF_SET_ID,
+ htonl(*objref->set_id));
+}
+
+static void build_osf(struct nlmsghdr *nlh, struct expr *expr)
+{
+ struct osf *osf = (struct osf *)expr;
+
+ if (osf->dreg)
+ mnl_attr_put_u32(nlh, NFTA_OSF_DREG, htonl(*osf->dreg));
+ if (osf->ttl)
+ mnl_attr_put_u8(nlh, NFTA_OSF_TTL, *osf->ttl);
+ if (osf->flags)
+ mnl_attr_put_u32(nlh, NFTA_OSF_FLAGS, htonl(*osf->flags));
+}
+
+static void build_payload(struct nlmsghdr *nlh, struct expr *expr)
+{
+ struct payload *payload = (struct payload *)expr;
+
+ if (payload->sreg)
+ mnl_attr_put_u32(nlh, NFTA_PAYLOAD_SREG, htonl(*payload->sreg));
+ if (payload->dreg)
+ mnl_attr_put_u32(nlh, NFTA_PAYLOAD_DREG, htonl(*payload->dreg));
+ if (payload->base)
+ mnl_attr_put_u32(nlh, NFTA_PAYLOAD_BASE, htonl(*payload->base));
+ if (payload->offset)
+ mnl_attr_put_u32(nlh, NFTA_PAYLOAD_OFFSET, htonl(*payload->offset));
+ if (payload->len)
+ mnl_attr_put_u32(nlh, NFTA_PAYLOAD_LEN, htonl(*payload->len));
+ if (payload->csum_type)
+ mnl_attr_put_u32(nlh, NFTA_PAYLOAD_CSUM_TYPE,
+ htonl(*payload->csum_type));
+ if (payload->csum_offset)
+ mnl_attr_put_u32(nlh, NFTA_PAYLOAD_CSUM_OFFSET,
+ htonl(*payload->csum_offset));
+ if (payload->csum_flags)
+ mnl_attr_put_u32(nlh, NFTA_PAYLOAD_CSUM_FLAGS,
+ htonl(*payload->csum_flags));
+}
+
+static void build_queue(struct nlmsghdr *nlh, struct expr *expr)
+{
+ struct queue *queue = (struct queue *)expr;
+
+ if (queue->queue_num)
+ mnl_attr_put_u16(nlh, NFTA_QUEUE_NUM, htons(*queue->queue_num));
+ if (queue->queue_total)
+ mnl_attr_put_u16(nlh, NFTA_QUEUE_TOTAL, htons(*queue->queue_total));
+ if (queue->flags)
+ mnl_attr_put_u16(nlh, NFTA_QUEUE_FLAGS, htons(*queue->flags));
+ if (queue->sreg_qnum)
+ mnl_attr_put_u32(nlh, NFTA_QUEUE_SREG_QNUM, htonl(*queue->sreg_qnum));
+}
+
+static void build_quota(struct nlmsghdr *nlh, struct expr *expr)
+{
+ struct quota *quota = (struct quota *)expr;
+
+ if (quota->bytes)
+ mnl_attr_put_u64(nlh, NFTA_QUOTA_BYTES, htobe64(*quota->bytes));
+ if (quota->consumed)
+ mnl_attr_put_u64(nlh, NFTA_QUOTA_CONSUMED, htobe64(*quota->consumed));
+ if (quota->flags)
+ mnl_attr_put_u32(nlh, NFTA_QUOTA_FLAGS, htonl(*quota->flags));
+}
+
+static void build_range(struct nlmsghdr *nlh, struct expr *expr)
+{
+ struct range *range = (struct range *)expr;
+
+ if (range->sreg)
+ mnl_attr_put_u32(nlh, NFTA_RANGE_SREG, htonl(*range->sreg));
+ if (range->op)
+ mnl_attr_put_u32(nlh, NFTA_RANGE_OP, htonl(*range->op));
+ if (range->data_from_len) {
+ struct nlattr *nest;
+
+ nest = mnl_attr_nest_start(nlh, NFTA_RANGE_FROM_DATA);
+ mnl_attr_put(nlh, NFTA_DATA_VALUE, range->data_from_len,
+ range->data_from);
+ mnl_attr_nest_end(nlh, nest);
+ }
+ if (range->data_to_len) {
+ struct nlattr *nest;
+
+ nest = mnl_attr_nest_start(nlh, NFTA_RANGE_TO_DATA);
+ mnl_attr_put(nlh, NFTA_DATA_VALUE, range->data_to_len,
+ range->data_to);
+ mnl_attr_nest_end(nlh, nest);
+ }
+}
+
+static void build_redir(struct nlmsghdr *nlh, struct expr *expr)
+{
+ struct redir *redir = (struct redir *)expr;
+
+ if (redir->sreg_proto_min)
+ mnl_attr_put_u32(nlh, NFTA_REDIR_REG_PROTO_MIN,
+ htobe32(*redir->sreg_proto_min));
+ if (redir->sreg_proto_max)
+ mnl_attr_put_u32(nlh, NFTA_REDIR_REG_PROTO_MAX,
+ htobe32(*redir->sreg_proto_max));
+ if (redir->flags)
+ mnl_attr_put_u32(nlh, NFTA_REDIR_FLAGS, htobe32(*redir->flags));
+}
+
+static void build_reject(struct nlmsghdr *nlh, struct expr *expr)
+{
+ struct reject *reject = (struct reject *)expr;
+
+ if (reject->type)
+ mnl_attr_put_u32(nlh, NFTA_REJECT_TYPE, htonl(*reject->type));
+ if (reject->icmp_code)
+ mnl_attr_put_u8(nlh, NFTA_REJECT_ICMP_CODE, *reject->icmp_code);
+}
+
+static void build_rt(struct nlmsghdr *nlh, struct expr *expr)
+{
+ struct rt *rt = (struct rt *)expr;
+
+ if (rt->key)
+ mnl_attr_put_u32(nlh, NFTA_RT_KEY, htonl(*rt->key));
+ if (rt->dreg)
+ mnl_attr_put_u32(nlh, NFTA_RT_DREG, htonl(*rt->dreg));
+}
+
+static void build_socket(struct nlmsghdr *nlh, struct expr *expr)
+{
+ struct socket *socket = (struct socket *)expr;
+
+ if (socket->key)
+ mnl_attr_put_u32(nlh, NFTA_SOCKET_KEY, htonl(*socket->key));
+ if (socket->dreg)
+ mnl_attr_put_u32(nlh, NFTA_SOCKET_DREG, htonl(*socket->dreg));
+ if (socket->level)
+ mnl_attr_put_u32(nlh, NFTA_SOCKET_LEVEL, htonl(*socket->level));
+}
+
+static void build_synproxy(struct nlmsghdr *nlh, struct expr *expr)
+{
+ struct synproxy *synproxy = (struct synproxy *)expr;
+
+ if (synproxy->mss)
+ mnl_attr_put_u16(nlh, NFTA_SYNPROXY_MSS, htons(*synproxy->mss));
+ if (synproxy->wscale)
+ mnl_attr_put_u8(nlh, NFTA_SYNPROXY_WSCALE, *synproxy->wscale);
+ if (synproxy->flags)
+ mnl_attr_put_u32(nlh, NFTA_SYNPROXY_FLAGS,
+ htonl(*synproxy->flags));
+}
+
+static void build_target(struct nlmsghdr *nlh, struct expr *expr)
+{
+ struct target *tg = (struct target *)expr;
+
+ if (tg->name)
+ mnl_attr_put_strz(nlh, NFTA_TARGET_NAME, tg->name);
+ if (tg->rev)
+ mnl_attr_put_u32(nlh, NFTA_TARGET_REV, htonl(*tg->rev));
+ if (tg->data_len)
+ mnl_attr_put(nlh, NFTA_TARGET_INFO, tg->data_len, tg->data);
+}
+
+static void build_tproxy(struct nlmsghdr *nlh, struct expr *expr)
+{
+ struct tproxy *tproxy = (struct tproxy *)expr;
+
+ if (tproxy->nfproto)
+ mnl_attr_put_u32(nlh, NFTA_TPROXY_FAMILY, htonl(*tproxy->nfproto));
+
+ if (tproxy->sreg_addr)
+ mnl_attr_put_u32(nlh, NFTA_TPROXY_REG_ADDR,
+ htonl(*tproxy->sreg_addr));
+
+ if (tproxy->sreg_port)
+ mnl_attr_put_u32(nlh, NFTA_TPROXY_REG_PORT,
+ htonl(*tproxy->sreg_port));
+}
+
+static void build_tunnel(struct nlmsghdr *nlh, struct expr *expr)
+{
+ struct tunnel *tunnel = (struct tunnel *)expr;
+
+ if (tunnel->key)
+ mnl_attr_put_u32(nlh, NFTA_TUNNEL_KEY, htonl(*tunnel->key));
+ if (tunnel->dreg)
+ mnl_attr_put_u32(nlh, NFTA_TUNNEL_DREG, htonl(*tunnel->dreg));
+}
+
+static void build_xfrm(struct nlmsghdr *nlh, struct expr *expr)
+{
+ struct xfrm *x = (struct xfrm *)expr;
+
+ if (x->key)
+ mnl_attr_put_u32(nlh, NFTA_XFRM_KEY, htonl(*x->key));
+ if (x->dir)
+ mnl_attr_put_u8(nlh, NFTA_XFRM_DIR, *x->dir);
+ if (x->spnum)
+ mnl_attr_put_u32(nlh, NFTA_XFRM_SPNUM, htonl(*x->spnum));
+ if (x->dreg)
+ mnl_attr_put_u32(nlh, NFTA_XFRM_DREG, htonl(*x->dreg));
+}
+
+struct {
+ const char *name;
+ void (*build)(struct nlmsghdr *nlh, struct expr *expr);
+} handler[] = {
+ [BITWISE] = { .name = "bitwise", .build = build_bitwise, },
+ [BYTEORDER] = { .name = "byteorder", .build = build_byteorder, },
+ [CMP] = { .name = "cmp", .build = build_cmp, },
+ [CONNLIMIT] = { .name = "connlimit", .build = build_connlimit, },
+ [COUNTER] = { .name = "counter", .build = build_counter, },
+ [CT] = { .name = "ct", .build = build_ct, },
+ [DUP] = { .name = "dup", .build = build_dup, },
+ [DYNSET] = { .name = "dynset", .build = build_dynset, },
+ [EXTHDR] = { .name = "exthdr", .build = build_exthdr, },
+ [FIB] = { .name = "fib", .build = build_fib, },
+ [FWD] = { .name = "fwd", .build = build_fwd, },
+ [HASH] = { .name = "hash", .build = build_hash, },
+ [INNER] = { .name = "inner", .build = build_inner, },
+ [IMMEDIATE] = { .name = "immediate", .build = build_immediate, },
+ [LAST] = { .name = "last", .build = build_last, },
+ [LIMIT] = { .name = "limit", .build = build_limit, },
+ [LOG] = { .name = "log", .build = build_log, },
+ [LOOKUP] = { .name = "lookup", .build = build_lookup, },
+ [MASQ] = { .name = "masq", .build = build_masq, },
+ [MATCH] = { .name = "match", .build = build_match, },
+ [META] = { .name = "meta", .build = build_meta, },
+ [NAT] = { .name = "nat", .build = build_nat, },
+ [NUMGEN] = { .name = "numgen", .build = build_numgen, },
+ [OBJREF] = { .name = "objref", .build = build_objref, },
+ [OSF] = { .name = "osf", .build = build_osf, },
+ [PAYLOAD] = { .name = "payload", .build = build_payload, },
+ [QUEUE] = { .name = "queue", .build = build_queue, },
+ [QUOTA] = { .name = "quota", .build = build_quota, },
+ [RANGE] = { .name = "range", .build = build_range, },
+ [REDIR] = { .name = "redir", .build = build_redir, },
+ [REJECT] = { .name = "reject", .build = build_reject, },
+ [RT] = { .name = "rt", .build = build_rt, },
+ [SOCKET] = { .name = "socket", .build = build_socket, },
+ [SYNPROXY] = { .name = "synproxy", .build = build_synproxy, },
+ [TARGET] = { .name = "target", .build = build_target, },
+ [TPROXY] = { .name = "tproxy", .build = build_tproxy, },
+ [TUNNEL] = { .name = "tunnel", .build = build_tunnel, },
+ [XFRM] = { .name = "xfrm", .build = build_xfrm, },
+};
+
+static void __build_expr(struct nlmsghdr *nlh, struct expr *expr)
+{
+ struct nlattr *nest;
+
+ mnl_attr_put_strz(nlh, NFTA_EXPR_NAME, handler[expr->type].name);
+
+ if (!handler[expr->type].build)
+ return;
+
+ nest = mnl_attr_nest_start(nlh, NFTA_EXPR_DATA);
+ handler[expr->type].build(nlh, expr);
+ mnl_attr_nest_end(nlh, nest);
+}
+
+static void __build_rule(struct nlmsghdr *nlh, struct rule *rule)
+{
+ struct nlattr *nest, *nest2;
+ struct expr *expr;
+
+ if (rule->table)
+ mnl_attr_put_strz(nlh, NFTA_RULE_TABLE, rule->table);
+ if (rule->chain)
+ mnl_attr_put_strz(nlh, NFTA_RULE_CHAIN, rule->chain);
+/* FIXME
+ if (rule->handle) {
+ mnl_attr_put_u64(nlh, NFTA_RULE_HANDLE, htobe64(*rule->handle));
+ } */
+ if (rule->pos)
+ mnl_attr_put_u64(nlh, NFTA_RULE_POSITION, htobe64(*rule->pos));
+ if (rule->rule_id)
+ mnl_attr_put_u32(nlh, NFTA_RULE_ID, htonl(*rule->rule_id));
+ if (rule->userdata)
+ mnl_attr_put(nlh, NFTA_RULE_USERDATA, strlen(rule->userdata),
+ rule->userdata);
+ if (rule->pos_id)
+ mnl_attr_put_u32(nlh, NFTA_RULE_POSITION_ID, htonl(*rule->pos_id));
+
+ if (!list_empty(&rule->expr_list)) {
+ nest = mnl_attr_nest_start(nlh, NFTA_RULE_EXPRESSIONS);
+ list_for_each_entry(expr, &rule->expr_list, list) {
+ nest2 = mnl_attr_nest_start(nlh, NFTA_LIST_ELEM);
+ __build_expr(nlh, expr);
+ mnl_attr_nest_end(nlh, nest2);
+ }
+ mnl_attr_nest_end(nlh, nest);
+ }
+ if (rule->compat.l4proto || rule->compat.flags) {
+ nest = mnl_attr_nest_start(nlh, NFTA_RULE_COMPAT);
+ if (rule->compat.l4proto)
+ mnl_attr_put_u32(nlh, NFTA_RULE_COMPAT_PROTO,
+ htonl(*rule->compat.l4proto));
+ if (rule->compat.flags)
+ mnl_attr_put_u32(nlh, NFTA_RULE_COMPAT_FLAGS,
+ htonl(*rule->compat.flags));
+ mnl_attr_nest_end(nlh, nest);
+ }
+}
+
+static void build_rule(struct test_batch *batch, struct test_batch_cmd *cmd)
+{
+ struct rule *rule = cmd->obj;
+ struct nlmsghdr *nlh;
+
+ nlh = nftnl_nlmsg_build_hdr(mnl_nlmsg_batch_current(batch->batch),
+ NFT_MSG_NEWRULE, *rule->family,
+ NLM_F_APPEND | NLM_F_CREATE,
+ cmd->lineno);
+ __build_rule(nlh, rule);
+ mnl_nlmsg_batch_next(batch->batch);
+}
+
+static void __build_set_field(struct nlmsghdr *nlh, struct set *set)
+{
+ struct nlattr *nest;
+ int i;
+
+ nest = mnl_attr_nest_start(nlh, NFTA_SET_DESC_CONCAT);
+ for (i = 0; i < set->field_array->num; i++) {
+ struct nlattr *nest_elem;
+
+ nest_elem = mnl_attr_nest_start(nlh, NFTA_LIST_ELEM);
+ mnl_attr_put_u32(nlh, NFTA_SET_FIELD_LEN,
+ htonl(set->field_array->data[i]));
+ mnl_attr_nest_end(nlh, nest_elem);
+ }
+ mnl_attr_nest_end(nlh, nest);
+}
+
+static void __build_set_desc(struct nlmsghdr *nlh, struct set *set)
+{
+ struct nlattr *nest;
+
+ nest = mnl_attr_nest_start(nlh, NFTA_SET_DESC);
+ if (set->size)
+ mnl_attr_put_u32(nlh, NFTA_SET_DESC_SIZE, htonl(*set->size));
+ if (set->field_array)
+ __build_set_field(nlh, set);
+ mnl_attr_nest_end(nlh, nest);
+}
+
+static void __build_set(struct nlmsghdr *nlh, struct set *set,
+ struct test_batch *batch)
+{
+ uint32_t num_exprs = 0;
+
+ if (set->table)
+ mnl_attr_put_strz(nlh, NFTA_SET_TABLE, set->table);
+ if (set->name)
+ mnl_attr_put_strz(nlh, NFTA_SET_NAME, set->name);
+ if (set->handle)
+ mnl_attr_put_u64(nlh, NFTA_SET_HANDLE, htobe64(*set->handle));
+ if (set->flags)
+ mnl_attr_put_u32(nlh, NFTA_SET_FLAGS, htonl(*set->flags));
+ if (set->key_type)
+ mnl_attr_put_u32(nlh, NFTA_SET_KEY_TYPE, htonl(*set->key_type));
+ if (set->key_len)
+ mnl_attr_put_u32(nlh, NFTA_SET_KEY_LEN, htonl(*set->key_len));
+ if (set->data_type)
+ mnl_attr_put_u32(nlh, NFTA_SET_DATA_TYPE, htonl(*set->data_type));
+ if (set->data_len)
+ mnl_attr_put_u32(nlh, NFTA_SET_DATA_LEN, htonl(*set->data_len));
+ if (set->obj_type)
+ mnl_attr_put_u32(nlh, NFTA_SET_OBJ_TYPE, htonl(*set->obj_type));
+ if (set->set_id)
+ mnl_attr_put_u32(nlh, NFTA_SET_ID, htonl(*set->set_id));
+ if (set->policy)
+ mnl_attr_put_u32(nlh, NFTA_SET_POLICY, htonl(*set->policy));
+ if (set->field_array || set->size)
+ __build_set_desc(nlh, set);
+ if (set->timeout)
+ mnl_attr_put_u64(nlh, NFTA_SET_TIMEOUT, htobe64(*set->timeout));
+ if (set->gc_interval)
+ mnl_attr_put_u32(nlh, NFTA_SET_GC_INTERVAL, htonl(*set->gc_interval));
+ if (set->userdata)
+ mnl_attr_put(nlh, NFTA_SET_USERDATA, strlen(set->userdata), set->userdata);
+ if (!list_empty(&set->expr_list)) {
+ struct expr *expr;
+
+ list_for_each_entry(expr, &set->expr_list, list)
+ num_exprs++;
+
+ if (num_exprs == 1) {
+ struct nlattr *nest1;
+
+ nest1 = mnl_attr_nest_start(nlh, NFTA_SET_EXPR);
+ list_for_each_entry(expr, &set->expr_list, list)
+ __build_expr(nlh, expr);
+
+ mnl_attr_nest_end(nlh, nest1);
+ } else if (num_exprs > 1) {
+ struct nlattr *nest1, *nest2;
+
+ nest1 = mnl_attr_nest_start(nlh, NFTA_SET_EXPRESSIONS);
+ list_for_each_entry(expr, &set->expr_list, list) {
+ nest2 = mnl_attr_nest_start(nlh, NFTA_LIST_ELEM);
+ __build_expr(nlh, expr);
+ mnl_attr_nest_end(nlh, nest2);
+ }
+ mnl_attr_nest_end(nlh, nest1);
+ }
+ }
+}
+
+static void build_set(struct test_batch *batch, struct test_batch_cmd *cmd)
+{
+ struct set *set = cmd->obj;
+ struct nlmsghdr *nlh;
+
+ nlh = nftnl_nlmsg_build_hdr(mnl_nlmsg_batch_current(batch->batch),
+ NFT_MSG_NEWSET, *set->family,
+ NLM_F_CREATE, cmd->lineno);
+ __build_set(nlh, set, batch);
+ mnl_nlmsg_batch_next(batch->batch);
+}
+
+static void __build_one_setelem(struct nlmsghdr *nlh, struct elem *elem,
+ struct test_batch *batch)
+{
+ uint32_t num_exprs = 0;
+ struct nlattr *nest;
+ struct expr *expr;
+
+ if (elem->flags) {
+ mnl_attr_put_u32(nlh, NFTA_SET_ELEM_FLAGS, htonl(*elem->flags));
+ }
+ if (elem->timeout) {
+ mnl_attr_put_u64(nlh, NFTA_SET_ELEM_TIMEOUT, htobe64(*elem->timeout));
+ }
+ if (elem->expiration) {
+ mnl_attr_put_u64(nlh, NFTA_SET_ELEM_EXPIRATION, htobe64(*elem->expiration));
+ }
+ if (elem->key) {
+ struct nlattr *nest1;
+
+ nest1 = mnl_attr_nest_start(nlh, NFTA_SET_ELEM_KEY);
+ mnl_attr_put(nlh, NFTA_DATA_VALUE, elem->key_len, elem->key);
+ mnl_attr_nest_end(nlh, nest1);
+ }
+ if (elem->key_end) {
+ struct nlattr *nest1;
+
+ nest1 = mnl_attr_nest_start(nlh, NFTA_SET_ELEM_KEY_END);
+ mnl_attr_put(nlh, NFTA_DATA_VALUE, elem->key_end_len,
+ elem->key_end);
+ mnl_attr_nest_end(nlh, nest1);
+ }
+ if (elem->verdict) {
+ struct nlattr *nest1, *nest2;
+
+ nest1 = mnl_attr_nest_start(nlh, NFTA_SET_ELEM_DATA);
+ nest2 = mnl_attr_nest_start(nlh, NFTA_DATA_VERDICT);
+ mnl_attr_put_u32(nlh, NFTA_VERDICT_CODE, htonl(*elem->verdict));
+ if (elem->chain)
+ mnl_attr_put_strz(nlh, NFTA_VERDICT_CHAIN, elem->chain);
+
+ mnl_attr_nest_end(nlh, nest1);
+ mnl_attr_nest_end(nlh, nest2);
+ }
+ if (elem->data_len) {
+ struct nlattr *nest1;
+
+ nest1 = mnl_attr_nest_start(nlh, NFTA_SET_ELEM_DATA);
+ mnl_attr_put(nlh, NFTA_DATA_VALUE, elem->data_len, elem->data);
+ mnl_attr_nest_end(nlh, nest1);
+ }
+ if (elem->userdata) {
+ mnl_attr_put(nlh, NFTA_SET_ELEM_USERDATA, strlen(elem->userdata), elem->userdata);
+ }
+
+ if (elem->objname)
+ mnl_attr_put_strz(nlh, NFTA_SET_ELEM_OBJREF, elem->objname);
+
+ if (!list_empty(&elem->expr_list)) {
+ list_for_each_entry(expr, &elem->expr_list, list)
+ num_exprs++;
+
+ if (num_exprs == 1) {
+ struct nlattr *nest1;
+
+ nest1 = mnl_attr_nest_start(nlh, NFTA_SET_ELEM_EXPR);
+ list_for_each_entry(expr, &elem->expr_list, list)
+ __build_expr(nlh, expr);
+
+ mnl_attr_nest_end(nlh, nest1);
+ } else if (num_exprs > 1) {
+ struct nlattr *nest1, *nest2;
+
+ nest1 = mnl_attr_nest_start(nlh, NFTA_SET_ELEM_EXPRESSIONS);
+ list_for_each_entry(expr, &elem->expr_list, list) {
+ nest2 = mnl_attr_nest_start(nlh, NFTA_LIST_ELEM);
+ __build_expr(nlh, expr);
+ mnl_attr_nest_end(nlh, nest2);
+ }
+ mnl_attr_nest_end(nlh, nest1);
+ }
+ }
+}
+
+
+static void __build_setelem(struct nlmsghdr *nlh, struct elem *elem,
+ struct test_batch *batch)
+{
+ struct nlattr *nest1, *nest2;
+
+ if (elem->set)
+ mnl_attr_put_strz(nlh, NFTA_SET_ELEM_LIST_SET, elem->set);
+ if (elem->set_id)
+ mnl_attr_put_u32(nlh, NFTA_SET_ELEM_LIST_SET_ID, htonl(elem->set_id));
+ if (elem->table)
+ mnl_attr_put_strz(nlh, NFTA_SET_ELEM_LIST_TABLE, elem->table);
+
+ nest1 = mnl_attr_nest_start(nlh, NFTA_SET_ELEM_LIST_ELEMENTS);
+ nest2 = mnl_attr_nest_start(nlh, 0);
+ __build_one_setelem(nlh, elem, batch);
+ mnl_attr_nest_end(nlh, nest2);
+ mnl_attr_nest_end(nlh, nest1);
+}
+
+static void build_setelem(struct test_batch *batch, struct test_batch_cmd *cmd)
+{
+ struct elem *elem = cmd->obj;
+ struct nlmsghdr *nlh;
+
+ nlh = nftnl_nlmsg_build_hdr(mnl_nlmsg_batch_current(batch->batch),
+ NFT_MSG_NEWSETELEM, *elem->family,
+ 0, cmd->lineno);
+ __build_setelem(nlh, elem, batch);
+ mnl_nlmsg_batch_next(batch->batch);
+}
+
+static void build_del_setelem(struct test_batch *batch, struct test_batch_cmd *cmd)
+{
+ struct elem *elem = cmd->obj;
+ struct nlmsghdr *nlh;
+
+ nlh = nftnl_nlmsg_build_hdr(mnl_nlmsg_batch_current(batch->batch),
+ NFT_MSG_DELSETELEM, *elem->family,
+ 0, cmd->lineno);
+ __build_setelem(nlh, elem, batch);
+ mnl_nlmsg_batch_next(batch->batch);
+}
+
+static void __build_flowtable(struct nlmsghdr *nlh, struct flowtable *ft)
+{
+ struct nlattr *nest = NULL;
+
+ if (ft->table)
+ mnl_attr_put_strz(nlh, NFTA_FLOWTABLE_TABLE, ft->table);
+ if (ft->name)
+ mnl_attr_put_strz(nlh, NFTA_FLOWTABLE_NAME, ft->name);
+
+ if (ft->hooknum || ft->prio || ft->dev_array)
+ nest = mnl_attr_nest_start(nlh, NFTA_FLOWTABLE_HOOK);
+
+ if (ft->hooknum)
+ mnl_attr_put_u32(nlh, NFTA_FLOWTABLE_HOOK_NUM, htonl(*ft->hooknum));
+ if (ft->prio)
+ mnl_attr_put_u32(nlh, NFTA_FLOWTABLE_HOOK_PRIORITY, htonl(*ft->prio));
+
+ if (ft->dev_array) {
+ struct nlattr *nest_dev;
+ unsigned int i;
+
+ nest_dev = mnl_attr_nest_start(nlh, NFTA_FLOWTABLE_HOOK_DEVS);
+ for (i = 0; i < ft->dev_array->num; i++) {
+ mnl_attr_put_strz(nlh, NFTA_DEVICE_NAME, ft->dev_array->data[i]);
+ }
+ mnl_attr_nest_end(nlh, nest_dev);
+ }
+
+ if (nest)
+ mnl_attr_nest_end(nlh, nest);
+
+/* if (ft->flags)
+ mnl_attr_put_u32(nlh, NFTA_FLOWTABLE_FLAGS, htonl(ft->flags)); */
+/* if (ft->handle)
+ mnl_attr_put_u64(nlh, NFTA_FLOWTABLE_HANDLE, htobe64(ft->handle)); */
+}
+
+static void build_add_flowtable(struct test_batch *batch, struct test_batch_cmd *cmd)
+{
+ struct flowtable *ft = cmd->obj;
+ struct nlmsghdr *nlh;
+
+ nlh = nftnl_nlmsg_build_hdr(mnl_nlmsg_batch_current(batch->batch),
+ NFT_MSG_NEWFLOWTABLE, *ft->family,
+ NLM_F_CREATE, cmd->lineno);
+ __build_flowtable(nlh, ft);
+ mnl_nlmsg_batch_next(batch->batch);
+}
+
+static void build_del_flowtable(struct test_batch *batch, struct test_batch_cmd *cmd)
+{
+ struct flowtable *ft = cmd->obj;
+ struct nlmsghdr *nlh;
+
+ nlh = nftnl_nlmsg_build_hdr(mnl_nlmsg_batch_current(batch->batch),
+ NFT_MSG_DELFLOWTABLE, *ft->family,
+ NLM_F_CREATE, cmd->lineno);
+ __build_flowtable(nlh, ft);
+ mnl_nlmsg_batch_next(batch->batch);
+}
+
+static void __build_obj_type(struct nlmsghdr *nlh, struct obj *obj)
+{
+ switch (*obj->type) {
+ case NFT_OBJECT_COUNTER:
+ if (obj->u.counter->bytes)
+ mnl_attr_put_u64(nlh, NFTA_COUNTER_BYTES, htobe64(*obj->u.counter->bytes));
+ if (obj->u.counter->pkts)
+ mnl_attr_put_u64(nlh, NFTA_COUNTER_PACKETS, htobe64(*obj->u.counter->pkts));
+ break;
+ case NFT_OBJECT_QUOTA:
+ if (obj->u.quota->bytes)
+ mnl_attr_put_u64(nlh, NFTA_QUOTA_BYTES, htobe64(*obj->u.quota->bytes));
+ if (obj->u.quota->consumed)
+ mnl_attr_put_u64(nlh, NFTA_QUOTA_CONSUMED, htobe64(*obj->u.quota->consumed));
+ if (obj->u.quota->flags)
+ mnl_attr_put_u32(nlh, NFTA_QUOTA_FLAGS, htonl(*obj->u.quota->flags));
+ break;
+ case NFT_OBJECT_LIMIT:
+ if (obj->u.limit->rate)
+ mnl_attr_put_u64(nlh, NFTA_LIMIT_RATE, htobe64(*obj->u.limit->rate));
+ if (obj->u.limit->unit)
+ mnl_attr_put_u64(nlh, NFTA_LIMIT_UNIT, htobe64(*obj->u.limit->unit));
+ if (obj->u.limit->burst)
+ mnl_attr_put_u32(nlh, NFTA_LIMIT_BURST, htonl(*obj->u.limit->burst));
+ if (obj->u.limit->type)
+ mnl_attr_put_u32(nlh, NFTA_LIMIT_TYPE, htonl(*obj->u.limit->type));
+ if (obj->u.limit->flags)
+ mnl_attr_put_u32(nlh, NFTA_LIMIT_FLAGS, htonl(*obj->u.limit->flags));
+ break;
+ case NFT_OBJECT_CONNLIMIT:
+ if (obj->u.connlimit->count)
+ mnl_attr_put_u32(nlh, NFTA_CONNLIMIT_COUNT,
+ htonl(*obj->u.connlimit->count));
+ if (obj->u.connlimit->flags)
+ mnl_attr_put_u32(nlh, NFTA_CONNLIMIT_FLAGS,
+ htonl(*obj->u.connlimit->flags));
+ break;
+ case NFT_OBJECT_TUNNEL:
+ if (obj->u.tun->id)
+ mnl_attr_put_u32(nlh, NFTA_TUNNEL_KEY_ID,
+ htonl(*obj->u.tun->id));
+ if (obj->u.tun->src_v4 ||
+ obj->u.tun->dst_v4) {
+ struct nlattr *nest;
+
+ nest = mnl_attr_nest_start(nlh, NFTA_TUNNEL_KEY_IP);
+ if (obj->u.tun->src_v4)
+ mnl_attr_put_u32(nlh, NFTA_TUNNEL_KEY_IP_SRC, *obj->u.tun->src_v4);
+ if (obj->u.tun->dst_v4)
+ mnl_attr_put_u32(nlh, NFTA_TUNNEL_KEY_IP_DST, *obj->u.tun->dst_v4);
+ mnl_attr_nest_end(nlh, nest);
+ }
+ if (obj->u.tun->src_v6 ||
+ obj->u.tun->dst_v6) {
+ struct nlattr *nest;
+
+ nest = mnl_attr_nest_start(nlh, NFTA_TUNNEL_KEY_IP6);
+ if (obj->u.tun->src_v6)
+ mnl_attr_put(nlh, NFTA_TUNNEL_KEY_IP6_SRC,
+ sizeof(obj->u.tun->_src_v6),
+ obj->u.tun->src_v6);
+ if (obj->u.tun->dst_v6)
+ mnl_attr_put(nlh, NFTA_TUNNEL_KEY_IP6_DST,
+ sizeof(obj->u.tun->_dst_v6),
+ obj->u.tun->dst_v6);
+ if (obj->u.tun->flowlabel)
+ mnl_attr_put_u32(nlh, NFTA_TUNNEL_KEY_IP6_FLOWLABEL,
+ htonl(*obj->u.tun->flowlabel));
+ mnl_attr_nest_end(nlh, nest);
+ }
+ if (obj->u.tun->sport)
+ mnl_attr_put_u16(nlh, NFTA_TUNNEL_KEY_SPORT, htons(*obj->u.tun->sport));
+ if (obj->u.tun->dport)
+ mnl_attr_put_u16(nlh, NFTA_TUNNEL_KEY_DPORT, htons(*obj->u.tun->dport));
+ if (obj->u.tun->tos)
+ mnl_attr_put_u8(nlh, NFTA_TUNNEL_KEY_TOS, *obj->u.tun->tos);
+ if (obj->u.tun->ttl)
+ mnl_attr_put_u8(nlh, NFTA_TUNNEL_KEY_TTL, *obj->u.tun->ttl);
+ if (obj->u.tun->flags)
+ mnl_attr_put_u32(nlh, NFTA_TUNNEL_KEY_FLAGS, htonl(*obj->u.tun->flags));
+ break;
+ case NFT_OBJECT_CT_EXPECT:
+ if (obj->u.ct_expect->l3proto)
+ mnl_attr_put_u16(nlh, NFTA_CT_EXPECT_L3PROTO, htons(*obj->u.ct_expect->l3proto));
+ if (obj->u.ct_expect->l4proto)
+ mnl_attr_put_u8(nlh, NFTA_CT_EXPECT_L4PROTO, *obj->u.ct_expect->l4proto);
+ if (obj->u.ct_expect->dport)
+ mnl_attr_put_u16(nlh, NFTA_CT_EXPECT_DPORT, htons(*obj->u.ct_expect->dport));
+ if (obj->u.ct_expect->timeout)
+ mnl_attr_put_u32(nlh, NFTA_CT_EXPECT_TIMEOUT, htonl(*obj->u.ct_expect->timeout));
+ if (obj->u.ct_expect->size)
+ mnl_attr_put_u8(nlh, NFTA_CT_EXPECT_SIZE, *obj->u.ct_expect->size);
+ break;
+ case NFT_OBJECT_SYNPROXY:
+ if (obj->u.synproxy->mss)
+ mnl_attr_put_u16(nlh, NFTA_SYNPROXY_MSS, htons(*obj->u.synproxy->mss));
+ if (obj->u.synproxy->wscale)
+ mnl_attr_put_u8(nlh, NFTA_SYNPROXY_WSCALE, *obj->u.synproxy->wscale);
+ if (obj->u.synproxy->flags)
+ mnl_attr_put_u32(nlh, NFTA_SYNPROXY_FLAGS, htonl(*obj->u.synproxy->flags));
+ break;
+ case NFT_OBJECT_CT_HELPER:
+ if (obj->u.ct_helper->name)
+ mnl_attr_put_str(nlh, NFTA_CT_HELPER_NAME, obj->u.ct_helper->name);
+ if (obj->u.ct_helper->l3proto)
+ mnl_attr_put_u16(nlh, NFTA_CT_HELPER_L3PROTO, htons(*obj->u.ct_helper->l3proto));
+ if (obj->u.ct_helper->l4proto)
+ mnl_attr_put_u8(nlh, NFTA_CT_HELPER_L4PROTO, *obj->u.ct_helper->l4proto);
+ break;
+ case NFT_OBJECT_CT_TIMEOUT:
+ if (obj->u.ct_timeout->l3proto)
+ mnl_attr_put_u16(nlh, NFTA_CT_TIMEOUT_L3PROTO, htons(*obj->u.ct_timeout->l3proto));
+ if (obj->u.ct_timeout->l4proto)
+ mnl_attr_put_u8(nlh, NFTA_CT_TIMEOUT_L4PROTO, *obj->u.ct_timeout->l4proto);
+ if (obj->u.ct_timeout->timeout_array) {
+ struct nlattr *nest;
+ int i;
+
+ nest = mnl_attr_nest_start(nlh, NFTA_CT_TIMEOUT_DATA);
+ for (i = 0; i < obj->u.ct_timeout->timeout_array->num; i++)
+ mnl_attr_put_u32(nlh, i+1, htonl(obj->u.ct_timeout->timeout_array->data[i]));
+
+ mnl_attr_nest_end(nlh, nest);
+ }
+ break;
+ case NFT_OBJECT_SECMARK:
+ if (obj->u.secmark->ctx)
+ mnl_attr_put_str(nlh, NFTA_SECMARK_CTX, obj->u.secmark->ctx);
+ break;
+ }
+}
+
+static void __build_obj_def(struct nlmsghdr *nlh, struct obj *obj)
+{
+ if (obj->table)
+ mnl_attr_put_strz(nlh, NFTA_OBJ_TABLE, obj->table);
+ if (obj->name)
+ mnl_attr_put_strz(nlh, NFTA_OBJ_NAME, obj->name);
+ if (obj->type)
+ mnl_attr_put_u32(nlh, NFTA_OBJ_TYPE, htonl(*obj->type));
+ if (obj->handle)
+ mnl_attr_put_u64(nlh, NFTA_OBJ_HANDLE, htobe64(*obj->handle));
+ if (obj->userdata)
+ mnl_attr_put(nlh, NFTA_OBJ_USERDATA, strlen(obj->userdata), obj->userdata);
+}
+
+static void __build_obj(struct nlmsghdr *nlh, struct obj *obj)
+{
+ __build_obj_def(nlh, obj);
+
+ if (obj->type &&
+ *obj->type > NFT_OBJECT_UNSPEC &&
+ *obj->type <= NFT_OBJECT_MAX) {
+ struct nlattr *nest = mnl_attr_nest_start(nlh, NFTA_OBJ_DATA);
+ __build_obj_type(nlh, obj);
+ mnl_attr_nest_end(nlh, nest);
+ }
+}
+
+static void build_obj(struct test_batch *batch, struct test_batch_cmd *cmd)
+{
+ struct obj *obj = cmd->obj;
+ struct nlmsghdr *nlh;
+
+ nlh = nftnl_nlmsg_build_hdr(mnl_nlmsg_batch_current(batch->batch),
+ NFT_MSG_NEWOBJ, *obj->family,
+ NLM_F_CREATE, cmd->lineno);
+ __build_obj(nlh, obj);
+ mnl_nlmsg_batch_next(batch->batch);
+}
+
+static void build_del_obj(struct test_batch *batch, struct test_batch_cmd *cmd)
+{
+ struct obj *obj = cmd->obj;
+ struct nlmsghdr *nlh;
+
+ nlh = nftnl_nlmsg_build_hdr(mnl_nlmsg_batch_current(batch->batch),
+ NFT_MSG_DELOBJ, *obj->family,
+ 0, cmd->lineno);
+ __build_obj_def(nlh, obj);
+ mnl_nlmsg_batch_next(batch->batch);
+}
+
+static void build_del_chain(struct test_batch *batch, struct test_batch_cmd *cmd)
+{
+ struct chain *chain = cmd->obj;
+ struct nlmsghdr *nlh;
+
+ nlh = nftnl_nlmsg_build_hdr(mnl_nlmsg_batch_current(batch->batch),
+ NFT_MSG_DELCHAIN, get_nfproto(chain->family),
+ 0, cmd->lineno);
+ __build_chain(nlh, chain);
+ mnl_nlmsg_batch_next(batch->batch);
+}
+
+void del_chain(struct test_batch *batch, struct chain *chain)
+{
+ if (batch->ctx.table) {
+ chain->table = strdup(batch->ctx.table);
+ *chain->family = batch->ctx.family;
+ }
+
+ add_cmd(batch, DEL_CHAIN, chain);
+}
+
+static void build_del_rule(struct test_batch *batch, struct test_batch_cmd *cmd)
+{
+ struct rule *rule = cmd->obj;
+ struct nlmsghdr *nlh;
+
+ nlh = nftnl_nlmsg_build_hdr(mnl_nlmsg_batch_current(batch->batch),
+ NFT_MSG_DELRULE, *rule->family,
+ 0, cmd->lineno);
+ __build_rule(nlh, rule);
+ mnl_nlmsg_batch_next(batch->batch);
+}
+
+void del_rule(struct test_batch *batch, struct rule *rule)
+{
+ if (batch->ctx.table) {
+ rule->table = strdup(batch->ctx.table);
+ *rule->family = batch->ctx.family;
+ }
+
+ add_cmd(batch, DEL_RULE, rule);
+}
+
+static void build_del_set(struct test_batch *batch, struct test_batch_cmd *cmd)
+{
+ struct set *set = cmd->obj;
+ struct nlmsghdr *nlh;
+
+ nlh = nftnl_nlmsg_build_hdr(mnl_nlmsg_batch_current(batch->batch),
+ NFT_MSG_DELSET, *set->family,
+ 0, cmd->lineno);
+ __build_set(nlh, set, batch);
+ mnl_nlmsg_batch_next(batch->batch);
+}
+
+void del_set(struct test_batch *batch, struct set *set)
+{
+ if (batch->ctx.table) {
+ set->table = strdup(batch->ctx.table);
+ *set->family = batch->ctx.family;
+ }
+
+ add_cmd(batch, DEL_SET, set);
+}
+
+static void build_flush_set(struct test_batch *batch, struct test_batch_cmd *cmd)
+{
+ struct nlattr *nest1, *nest2;
+ struct set *set = cmd->obj;
+ struct nlmsghdr *nlh;
+
+ nlh = nftnl_nlmsg_build_hdr(mnl_nlmsg_batch_current(batch->batch),
+ NFT_MSG_DELSETELEM, *set->family,
+ 0, cmd->lineno);
+ if (set->name)
+ mnl_attr_put_strz(nlh, NFTA_SET_ELEM_LIST_SET, set->name);
+ if (set->set_id)
+ mnl_attr_put_u32(nlh, NFTA_SET_ELEM_LIST_SET_ID, htonl(*set->set_id));
+ if (set->table)
+ mnl_attr_put_strz(nlh, NFTA_SET_ELEM_LIST_TABLE, set->table);
+ mnl_nlmsg_batch_next(batch->batch);
+}
+
+void flush_set(struct test_batch *batch, struct set *set)
+{
+ if (batch->ctx.table) {
+ set->table = strdup(batch->ctx.table);
+ *set->family = batch->ctx.family;
+ }
+
+ add_cmd(batch, FLUSH_SET, set);
+}
+
+static int mnl_batch_extack_cb(const struct nlmsghdr *nlh, void *data)
+{
+ const struct nlmsgerr *err = mnl_nlmsg_get_payload(nlh);
+ int errval;
+
+ if (nlh->nlmsg_len < mnl_nlmsg_size(sizeof(struct nlmsgerr)))
+ return MNL_CB_ERROR;
+
+ if (err->error < 0)
+ errval = -err->error;
+ else
+ errval = err->error;
+
+ if (errval) {
+ errno = errval;
+ if (errout)
+ printf("Error: %s, line %u\n", strerror(errno), nlh->nlmsg_seq);
+
+ return MNL_CB_ERROR;
+ }
+
+ return MNL_CB_OK;
+}
+
+static int nft_test_recv(struct mnl_socket *nl)
+{
+ static mnl_cb_t cb_ctl_array[NLMSG_MIN_TYPE] = {
+ [NLMSG_ERROR] = mnl_batch_extack_cb,
+ };
+ char buf[MNL_SOCKET_BUFFER_SIZE * 16];
+ bool mnl_error = false;
+ struct timeval tv;
+ fd_set readfds;
+ int ret;
+
+ FD_ZERO(&readfds);
+ FD_SET(mnl_socket_get_fd(nl), &readfds);
+
+ while (1) {
+ struct timeval tv = {};
+
+ ret = select(mnl_socket_get_fd(nl) + 1, &readfds, NULL, NULL, &tv);
+ if (ret == -1) {
+ perror("unexpected select() fails");
+ return -1;
+ }
+
+ if (!FD_ISSET(mnl_socket_get_fd(nl), &readfds))
+ break;
+
+ ret = mnl_socket_recvfrom(nl, buf, sizeof(buf));
+ if (ret == -1) {
+ /* enobufs means too many errors in batch. */
+ if (errno != ENOBUFS)
+ perror("unexpected recv() fails");
+ return -1;
+ }
+
+ ret = mnl_cb_run2(buf, ret, 0, mnl_socket_get_portid(nl), NULL, NULL,
+ cb_ctl_array, MNL_ARRAY_SIZE(cb_ctl_array));
+ if (ret < 0)
+ mnl_error = true;
+ }
+
+ return mnl_error ? -1 : 0;
+}
+
+static void setup_commit(struct mnl_nlmsg_batch *batch, unsigned int seq)
+{
+ nftnl_batch_end(mnl_nlmsg_batch_current(batch), seq);
+ mnl_nlmsg_batch_next(batch);
+}
+
+static int __send(struct mnl_socket *nl, struct mnl_nlmsg_batch *batch)
+{
+ int ret;
+
+ ret = mnl_socket_sendto(nl, mnl_nlmsg_batch_head(batch),
+ mnl_nlmsg_batch_size(batch));
+ if (ret == -1) {
+ perror("unexpected send() fails");
+ exit(EXIT_FAILURE);
+ }
+
+ return nft_test_recv(nl);
+}
+
+static void batch_cmd_free(struct test_batch_cmd *cmd)
+{
+ switch (cmd->type) {
+ case ADD_TABLE:
+ case SET_TABLE:
+ case DEL_TABLE:
+ free_table(cmd->obj);
+ break;
+ case ADD_BASECHAIN:
+ case ADD_CHAIN:
+ case DEL_BASECHAIN:
+ case DEL_CHAIN:
+ free_chain(cmd->obj);
+ break;
+ case ADD_RULE:
+ case DEL_RULE:
+ free_rule(cmd->obj);
+ break;
+ case ADD_SET:
+ case SET_SET:
+ case DEL_SET:
+ case FLUSH_SET:
+ free_set(cmd->obj);
+ break;
+ case ADD_SETELEM:
+ case DEL_SETELEM:
+ free_elem(cmd->obj);
+ break;
+ case ADD_FLOWTABLE:
+ case DEL_FLOWTABLE:
+ free_flowtable(cmd->obj);
+ break;
+ case ADD_OBJECT:
+ case DEL_OBJECT:
+ free_obj(cmd->obj);
+ break;
+ }
+
+ list_del(&cmd->list);
+ free(cmd);
+}
+
+static int __build(struct test_batch *batch)
+{
+ struct test_batch_cmd *cmd, *next;
+
+ list_for_each_entry_safe(cmd, next, &batch->cmd, list) {
+ switch (cmd->type) {
+ case ADD_TABLE:
+ build_table(batch, cmd);
+ break;
+ case DEL_TABLE:
+ build_del_table(batch, cmd);
+ break;
+ case SET_TABLE:
+ case SET_SET:
+ /* release table in context. */
+ break;
+ case ADD_BASECHAIN:
+ case ADD_CHAIN:
+ build_chain(batch, cmd);
+ break;
+ case DEL_BASECHAIN:
+ case DEL_CHAIN:
+ build_del_chain(batch, cmd);
+ break;
+ case ADD_RULE:
+ build_rule(batch, cmd);
+ break;
+ case DEL_RULE:
+ build_del_rule(batch, cmd);
+ break;
+ case ADD_SET:
+ build_set(batch, cmd);
+ break;
+ case DEL_SET:
+ build_del_set(batch, cmd);
+ break;
+ case FLUSH_SET:
+ build_flush_set(batch, cmd);
+ break;
+ case ADD_SETELEM:
+ build_setelem(batch, cmd);
+ break;
+ case DEL_SETELEM:
+ build_del_setelem(batch, cmd);
+ break;
+ case ADD_OBJECT:
+ build_obj(batch, cmd);
+ break;
+ case DEL_OBJECT:
+ build_del_obj(batch, cmd);
+ break;
+ case ADD_FLOWTABLE:
+ build_add_flowtable(batch, cmd);
+ break;
+ case DEL_FLOWTABLE:
+ build_del_flowtable(batch, cmd);
+ break;
+ }
+
+ batch_cmd_free(cmd);
+ }
+}
+
+int batch_abort(struct test_batch *batch)
+{
+ int ret;
+
+ __build(batch);
+
+ ret = __send(batch->nl, batch->batch);
+
+ batch_reset(batch);
+
+ return ret;
+}
+
+int batch_commit(struct test_batch *batch)
+{
+ int ret;
+
+ __build(batch);
+
+ setup_commit(batch->batch, UINT32_MAX);
+
+ ret = __send(batch->nl, batch->batch);
+
+ batch_reset(batch);
+
+ return ret;
+}
+
+void batch_stop(struct test_batch *batch)
+{
+ mnl_nlmsg_batch_stop(batch->batch);
+ mnl_socket_close(batch->nl);
+}
+
+void batch_reset(struct test_batch *batch)
+{
+ memset(&batch->ctx, 0, sizeof(batch->ctx));
+ mnl_nlmsg_batch_stop(batch->batch);
+ batch->batch = __setup_batch(batch->buf, sizeof(batch->buf));
+}
+
+int flush_ruleset(struct mnl_socket *nl)
+{
+ char buf[MNL_SOCKET_BUFFER_SIZE];
+ struct mnl_nlmsg_batch *batch;
+ uint32_t seq, portid;
+ struct nlmsghdr *nlh;
+ int ret;
+
+ seq = time(NULL);
+ batch = mnl_nlmsg_batch_start(buf, sizeof(buf));
+
+ nftnl_batch_begin(mnl_nlmsg_batch_current(batch), seq++);
+ mnl_nlmsg_batch_next(batch);
+
+ nlh = nftnl_nlmsg_build_hdr(mnl_nlmsg_batch_current(batch),
+ NFT_MSG_DELTABLE, AF_UNSPEC,
+ NLM_F_ACK, seq++);
+ mnl_nlmsg_batch_next(batch);
+
+ nftnl_batch_end(mnl_nlmsg_batch_current(batch), seq++);
+ mnl_nlmsg_batch_next(batch);
+
+ portid = mnl_socket_get_portid(nl);
+
+ if (mnl_socket_sendto(nl, mnl_nlmsg_batch_head(batch),
+ mnl_nlmsg_batch_size(batch)) < 0) {
+ perror("mnl_socket_send");
+ return -1;
+ }
+
+ mnl_nlmsg_batch_stop(batch);
+
+ nft_test_recv(nl);
+
+ return 0;
+}
+
+bool kernel_is_tainted(void)
+{
+ unsigned int taint;
+ bool ret = false;
+ FILE *fp;
+
+ fp = fopen("/proc/sys/kernel/tainted", "r");
+ if (!fp)
+ return false;
+
+ if (fscanf(fp, "%u", &taint) == 1 && taint) {
+ fprintf(stderr, "kernel is tainted: 0x%x\n", taint);
+ ret = true;
+ }
+
+ fclose(fp);
+
+ return ret;
+}
+
+#define NFT_NLMSG_MAXSIZE (UINT16_MAX + getpagesize())
+
+static int
+nft_mnl_recv(struct mnl_socket *nl, uint32_t portid,
+ int (*cb)(const struct nlmsghdr *nlh, void *data), void *cb_data)
+{
+ char buf[NFT_NLMSG_MAXSIZE];
+ int ret;
+
+ ret = mnl_socket_recvfrom(nl, buf, sizeof(buf));
+ while (ret > 0) {
+ ret = mnl_cb_run(buf, ret, 0, portid, cb, cb_data);
+ if (ret == 0)
+ break;
+ if (ret < 0) {
+ if (errno == EAGAIN) {
+ ret = 0;
+ break;
+ }
+ if (errno != EINTR)
+ break;
+ }
+ ret = mnl_socket_recvfrom(nl, buf, sizeof(buf));
+ }
+
+ return ret;
+}
+
+static int
+nft_mnl_talk(struct mnl_socket *nl, const void *data, unsigned int len,
+ int (*cb)(const struct nlmsghdr *nlh, void *data), void *cb_data)
+{
+ uint32_t portid = mnl_socket_get_portid(nl);
+
+ if (mnl_socket_sendto(nl, data, len) < 0)
+ return -1;
+
+ return nft_mnl_recv(nl, portid, cb, cb_data);
+}
+
+static int dump_hooks_cb(const struct nlmsghdr *nlh, void *_data)
+{
+ /* exercise hook dump path, no real parsing in userspace. */
+ return MNL_CB_OK;
+}
+
+static int dump_hooks(struct mnl_socket *nl, uint8_t family,
+ const char *devname, uint32_t hooknum)
+{
+ char buf[MNL_SOCKET_BUFFER_SIZE];
+ struct nlmsghdr *nlh = mnl_nlmsg_put_header(buf);
+ struct nfgenmsg *nfg;
+
+ nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP;
+ nlh->nlmsg_type = NFNL_SUBSYS_HOOK << 8;
+ nlh->nlmsg_seq = time(NULL);
+
+ nfg = mnl_nlmsg_put_extra_header(nlh, sizeof(*nfg));
+ nfg->nfgen_family = family;
+ nfg->version = NFNETLINK_V0;
+
+ if (devname)
+ mnl_attr_put_strz(nlh, NFNLA_HOOK_DEV, devname);
+
+ mnl_attr_put_u32(nlh, NFNLA_HOOK_HOOKNUM, htonl(hooknum));
+
+ return nft_mnl_talk(nl, nlh, nlh->nlmsg_len, dump_hooks_cb, NULL);
+}
+
+static const char *devname[] = {
+ "dummy0", "dummy1", "dummy2", "dummy3", "lo", NULL,
+};
+
+/* detect UaF in hooks */
+int list_hooks(struct mnl_socket *nl)
+{
+ int i;
+
+ for (i = 0; i <= NF_INET_POST_ROUTING; i++) {
+ dump_hooks(nl, NFPROTO_IPV4, NULL, i);
+
+ if (kernel_is_tainted()) {
+ fprintf(stderr, "FATAL: list ip hooks taints kernel\n");
+ return -1;
+ }
+ }
+
+ for (i = 0; devname[i] != NULL; i++) {
+ dump_hooks(nl, NFPROTO_NETDEV, devname[i], NF_NETDEV_INGRESS);
+
+ if (kernel_is_tainted()) {
+ fprintf(stderr, "FATAL: list netdev hooks %s taints kernel\n", devname[i]);
+ return -1;
+ }
+ }
+
+ return 0;
+}