rule 246 net/if_pfsync.c if (sp->rule != htonl(-1) && sp->anchor == htonl(-1) && chksum_flag &&
rule 247 net/if_pfsync.c ntohl(sp->rule) <
rule 250 net/if_pfsync.c PF_RULESET_FILTER].active.ptr_array[ntohl(sp->rule)];
rule 279 net/if_pfsync.c st->rule.ptr = r;
rule 1200 net/if_pfsync.c if ((r = st->rule.ptr) == NULL)
rule 1201 net/if_pfsync.c sp->rule = htonl(-1);
rule 1203 net/if_pfsync.c sp->rule = htonl(r->nr);
rule 259 net/if_pfsync.h if ((st->rule.ptr->rule_flag & PFRULE_NOSYNC) || \
rule 243 net/pf.c (((*state)->rule.ptr->rt == PF_ROUTETO && \
rule 244 net/pf.c (*state)->rule.ptr->direction == PF_OUT) || \
rule 245 net/pf.c ((*state)->rule.ptr->rt == PF_REPLYTO && \
rule 246 net/pf.c (*state)->rule.ptr->direction == PF_IN)) && \
rule 265 net/pf.c s->rule.ptr->states++; \
rule 278 net/pf.c s->rule.ptr->states--; \
rule 310 net/pf.c if (a->rule.ptr > b->rule.ptr)
rule 312 net/pf.c if (a->rule.ptr < b->rule.ptr)
rule 630 net/pf.c if ((*state)->rule.ptr->max_src_conn &&
rule 631 net/pf.c (*state)->rule.ptr->max_src_conn <
rule 637 net/pf.c if ((*state)->rule.ptr->max_src_conn_rate.limit &&
rule 646 net/pf.c if ((*state)->rule.ptr->overload_tbl) {
rule 674 net/pf.c pfr_insert_kentry((*state)->rule.ptr->overload_tbl,
rule 678 net/pf.c if ((*state)->rule.ptr->flush) {
rule 699 net/pf.c ((*state)->rule.ptr->flush &
rule 701 net/pf.c (*state)->rule.ptr == st->rule.ptr)) {
rule 722 net/pf.c pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule,
rule 730 net/pf.c if (rule->rule_flag & PFRULE_RULESRCTRACK ||
rule 731 net/pf.c rule->rpool.opts & PF_POOL_STICKYADDR)
rule 732 net/pf.c k.rule.ptr = rule;
rule 734 net/pf.c k.rule.ptr = NULL;
rule 739 net/pf.c if (!rule->max_src_nodes ||
rule 740 net/pf.c rule->src_nodes < rule->max_src_nodes)
rule 749 net/pf.c rule->max_src_conn_rate.limit,
rule 750 net/pf.c rule->max_src_conn_rate.seconds);
rule 753 net/pf.c if (rule->rule_flag & PFRULE_RULESRCTRACK ||
rule 754 net/pf.c rule->rpool.opts & PF_POOL_STICKYADDR)
rule 755 net/pf.c (*sn)->rule.ptr = rule;
rule 757 net/pf.c (*sn)->rule.ptr = NULL;
rule 770 net/pf.c (*sn)->ruletype = rule->action;
rule 771 net/pf.c if ((*sn)->rule.ptr != NULL)
rule 772 net/pf.c (*sn)->rule.ptr->src_nodes++;
rule 776 net/pf.c if (rule->max_src_states &&
rule 777 net/pf.c (*sn)->states >= rule->max_src_states) {
rule 903 net/pf.c timeout = state->rule.ptr->timeout[state->timeout];
rule 906 net/pf.c start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START];
rule 908 net/pf.c end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END];
rule 909 net/pf.c states = state->rule.ptr->states;
rule 941 net/pf.c if (cur->rule.ptr != NULL) {
rule 942 net/pf.c cur->rule.ptr->src_nodes--;
rule 943 net/pf.c if (cur->rule.ptr->states <= 0 &&
rule 944 net/pf.c cur->rule.ptr->max_src_nodes <= 0)
rule 945 net/pf.c pf_rm_rule(NULL, cur->rule.ptr);
rule 969 net/pf.c timeout = s->rule.ptr->timeout[PFTM_SRC_NODE];
rule 978 net/pf.c timeout = s->rule.ptr->timeout[PFTM_SRC_NODE];
rule 993 net/pf.c pf_send_tcp(cur->rule.ptr, cur->state_key->af,
rule 1021 net/pf.c if (--cur->rule.ptr->states <= 0 &&
rule 1022 net/pf.c cur->rule.ptr->src_nodes <= 0)
rule 1023 net/pf.c pf_rm_rule(NULL, cur->rule.ptr);
rule 2073 net/pf.c k.rule.ptr = r;
rule 2075 net/pf.c k.rule.ptr = NULL;
rule 2768 net/pf.c struct pf_rule *r = s->rule.ptr;
rule 3289 net/pf.c s->rule.ptr = r;
rule 3622 net/pf.c pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst,
rule 3661 net/pf.c pf_send_tcp((*state)->rule.ptr, pd->af, &src->addr,
rule 3675 net/pf.c pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst,
rule 3680 net/pf.c pf_send_tcp((*state)->rule.ptr, pd->af, &src->addr,
rule 3979 net/pf.c pf_send_tcp((*state)->rule.ptr, pd->af,
rule 3983 net/pf.c (*state)->rule.ptr->return_ttl, 1, 0,
rule 5445 net/pf.c r = s->rule.ptr;
rule 5475 net/pf.c r = s->rule.ptr;
rule 5499 net/pf.c r = s->rule.ptr;
rule 5514 net/pf.c r = s->rule.ptr;
rule 5820 net/pf.c r = s->rule.ptr;
rule 5850 net/pf.c r = s->rule.ptr;
rule 5874 net/pf.c r = s->rule.ptr;
rule 5889 net/pf.c r = s->rule.ptr;
rule 245 net/pf_ioctl.c struct pf_rule *rule;
rule 259 net/pf_ioctl.c rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
rule 262 net/pf_ioctl.c rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
rule 268 net/pf_ioctl.c rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
rule 271 net/pf_ioctl.c rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
rule 274 net/pf_ioctl.c while ((rule != NULL) && (rule->nr != rule_number))
rule 275 net/pf_ioctl.c rule = TAILQ_NEXT(rule, entries);
rule 277 net/pf_ioctl.c if (rule == NULL)
rule 280 net/pf_ioctl.c return (&rule->rpool);
rule 309 net/pf_ioctl.c pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
rule 312 net/pf_ioctl.c if (rule->states <= 0) {
rule 318 net/pf_ioctl.c pf_tbladdr_remove(&rule->src.addr);
rule 319 net/pf_ioctl.c pf_tbladdr_remove(&rule->dst.addr);
rule 320 net/pf_ioctl.c if (rule->overload_tbl)
rule 321 net/pf_ioctl.c pfr_detach_table(rule->overload_tbl);
rule 323 net/pf_ioctl.c TAILQ_REMOVE(rulequeue, rule, entries);
rule 324 net/pf_ioctl.c rule->entries.tqe_prev = NULL;
rule 325 net/pf_ioctl.c rule->nr = -1;
rule 328 net/pf_ioctl.c if (rule->states > 0 || rule->src_nodes > 0 ||
rule 329 net/pf_ioctl.c rule->entries.tqe_prev != NULL)
rule 331 net/pf_ioctl.c pf_tag_unref(rule->tag);
rule 332 net/pf_ioctl.c pf_tag_unref(rule->match_tag);
rule 334 net/pf_ioctl.c if (rule->pqid != rule->qid)
rule 335 net/pf_ioctl.c pf_qid_unref(rule->pqid);
rule 336 net/pf_ioctl.c pf_qid_unref(rule->qid);
rule 338 net/pf_ioctl.c pf_rtlabel_remove(&rule->src.addr);
rule 339 net/pf_ioctl.c pf_rtlabel_remove(&rule->dst.addr);
rule 340 net/pf_ioctl.c pfi_dynaddr_remove(&rule->src.addr);
rule 341 net/pf_ioctl.c pfi_dynaddr_remove(&rule->dst.addr);
rule 343 net/pf_ioctl.c pf_tbladdr_remove(&rule->src.addr);
rule 344 net/pf_ioctl.c pf_tbladdr_remove(&rule->dst.addr);
rule 345 net/pf_ioctl.c if (rule->overload_tbl)
rule 346 net/pf_ioctl.c pfr_detach_table(rule->overload_tbl);
rule 348 net/pf_ioctl.c pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
rule 349 net/pf_ioctl.c pf_anchor_remove(rule);
rule 350 net/pf_ioctl.c pf_empty_pool(&rule->rpool.list);
rule 351 net/pf_ioctl.c pool_put(&pf_rule_pl, rule);
rule 670 net/pf_ioctl.c struct pf_rule *rule;
rule 677 net/pf_ioctl.c while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
rule 678 net/pf_ioctl.c pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
rule 690 net/pf_ioctl.c struct pf_rule *rule;
rule 698 net/pf_ioctl.c while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
rule 699 net/pf_ioctl.c pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
rule 751 net/pf_ioctl.c pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
rule 756 net/pf_ioctl.c pf_hash_rule_addr(ctx, &rule->src);
rule 757 net/pf_ioctl.c pf_hash_rule_addr(ctx, &rule->dst);
rule 758 net/pf_ioctl.c PF_MD5_UPD_STR(rule, label);
rule 759 net/pf_ioctl.c PF_MD5_UPD_STR(rule, ifname);
rule 760 net/pf_ioctl.c PF_MD5_UPD_STR(rule, match_tagname);
rule 761 net/pf_ioctl.c PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
rule 762 net/pf_ioctl.c PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
rule 763 net/pf_ioctl.c PF_MD5_UPD_HTONL(rule, prob, y);
rule 764 net/pf_ioctl.c PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
rule 765 net/pf_ioctl.c PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
rule 766 net/pf_ioctl.c PF_MD5_UPD(rule, uid.op);
rule 767 net/pf_ioctl.c PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
rule 768 net/pf_ioctl.c PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
rule 769 net/pf_ioctl.c PF_MD5_UPD(rule, gid.op);
rule 770 net/pf_ioctl.c PF_MD5_UPD_HTONL(rule, rule_flag, y);
rule 771 net/pf_ioctl.c PF_MD5_UPD(rule, action);
rule 772 net/pf_ioctl.c PF_MD5_UPD(rule, direction);
rule 773 net/pf_ioctl.c PF_MD5_UPD(rule, af);
rule 774 net/pf_ioctl.c PF_MD5_UPD(rule, quick);
rule 775 net/pf_ioctl.c PF_MD5_UPD(rule, ifnot);
rule 776 net/pf_ioctl.c PF_MD5_UPD(rule, match_tag_not);
rule 777 net/pf_ioctl.c PF_MD5_UPD(rule, natpass);
rule 778 net/pf_ioctl.c PF_MD5_UPD(rule, keep_state);
rule 779 net/pf_ioctl.c PF_MD5_UPD(rule, proto);
rule 780 net/pf_ioctl.c PF_MD5_UPD(rule, type);
rule 781 net/pf_ioctl.c PF_MD5_UPD(rule, code);
rule 782 net/pf_ioctl.c PF_MD5_UPD(rule, flags);
rule 783 net/pf_ioctl.c PF_MD5_UPD(rule, flagset);
rule 784 net/pf_ioctl.c PF_MD5_UPD(rule, allow_opts);
rule 785 net/pf_ioctl.c PF_MD5_UPD(rule, rt);
rule 786 net/pf_ioctl.c PF_MD5_UPD(rule, tos);
rule 793 net/pf_ioctl.c struct pf_rule *rule, **old_array;
rule 834 net/pf_ioctl.c while ((rule = TAILQ_FIRST(old_rules)) != NULL)
rule 835 net/pf_ioctl.c pf_rm_rule(old_rules, rule);
rule 871 net/pf_ioctl.c sp->rule = s->rule.ptr->nr;
rule 919 net/pf_ioctl.c s->rule.ptr = &pf_default_rule;
rule 933 net/pf_ioctl.c struct pf_rule *rule;
rule 957 net/pf_ioctl.c TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
rule 959 net/pf_ioctl.c pf_hash_rule(&ctx, rule);
rule 960 net/pf_ioctl.c (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
rule 1112 net/pf_ioctl.c struct pf_rule *rule, *tail;
rule 1122 net/pf_ioctl.c rs_num = pf_get_ruleset_number(pr->rule.action);
rule 1127 net/pf_ioctl.c if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
rule 1139 net/pf_ioctl.c rule = pool_get(&pf_rule_pl, PR_NOWAIT);
rule 1140 net/pf_ioctl.c if (rule == NULL) {
rule 1144 net/pf_ioctl.c bcopy(&pr->rule, rule, sizeof(struct pf_rule));
rule 1145 net/pf_ioctl.c rule->cuid = p->p_cred->p_ruid;
rule 1146 net/pf_ioctl.c rule->cpid = p->p_pid;
rule 1147 net/pf_ioctl.c rule->anchor = NULL;
rule 1148 net/pf_ioctl.c rule->kif = NULL;
rule 1149 net/pf_ioctl.c TAILQ_INIT(&rule->rpool.list);
rule 1151 net/pf_ioctl.c rule->states = 0;
rule 1152 net/pf_ioctl.c rule->src_nodes = 0;
rule 1153 net/pf_ioctl.c rule->entries.tqe_prev = NULL;
rule 1155 net/pf_ioctl.c if (rule->af == AF_INET) {
rule 1156 net/pf_ioctl.c pool_put(&pf_rule_pl, rule);
rule 1162 net/pf_ioctl.c if (rule->af == AF_INET6) {
rule 1163 net/pf_ioctl.c pool_put(&pf_rule_pl, rule);
rule 1171 net/pf_ioctl.c rule->nr = tail->nr + 1;
rule 1173 net/pf_ioctl.c rule->nr = 0;
rule 1174 net/pf_ioctl.c if (rule->ifname[0]) {
rule 1175 net/pf_ioctl.c rule->kif = pfi_kif_get(rule->ifname);
rule 1176 net/pf_ioctl.c if (rule->kif == NULL) {
rule 1177 net/pf_ioctl.c pool_put(&pf_rule_pl, rule);
rule 1181 net/pf_ioctl.c pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE);
rule 1184 net/pf_ioctl.c if (rule->rtableid > 0 && !rtable_exists(rule->rtableid))
rule 1189 net/pf_ioctl.c if (rule->qname[0] != 0) {
rule 1190 net/pf_ioctl.c if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
rule 1192 net/pf_ioctl.c else if (rule->pqname[0] != 0) {
rule 1193 net/pf_ioctl.c if ((rule->pqid =
rule 1194 net/pf_ioctl.c pf_qname2qid(rule->pqname)) == 0)
rule 1197 net/pf_ioctl.c rule->pqid = rule->qid;
rule 1200 net/pf_ioctl.c if (rule->tagname[0])
rule 1201 net/pf_ioctl.c if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
rule 1203 net/pf_ioctl.c if (rule->match_tagname[0])
rule 1204 net/pf_ioctl.c if ((rule->match_tag =
rule 1205 net/pf_ioctl.c pf_tagname2tag(rule->match_tagname)) == 0)
rule 1207 net/pf_ioctl.c if (rule->rt && !rule->direction)
rule 1210 net/pf_ioctl.c if (!rule->log)
rule 1211 net/pf_ioctl.c rule->logif = 0;
rule 1212 net/pf_ioctl.c if (rule->logif >= PFLOGIFS_MAX)
rule 1215 net/pf_ioctl.c if (pf_rtlabel_add(&rule->src.addr) ||
rule 1216 net/pf_ioctl.c pf_rtlabel_add(&rule->dst.addr))
rule 1218 net/pf_ioctl.c if (pfi_dynaddr_setup(&rule->src.addr, rule->af))
rule 1220 net/pf_ioctl.c if (pfi_dynaddr_setup(&rule->dst.addr, rule->af))
rule 1222 net/pf_ioctl.c if (pf_tbladdr_setup(ruleset, &rule->src.addr))
rule 1224 net/pf_ioctl.c if (pf_tbladdr_setup(ruleset, &rule->dst.addr))
rule 1226 net/pf_ioctl.c if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
rule 1232 net/pf_ioctl.c if (rule->overload_tblname[0]) {
rule 1233 net/pf_ioctl.c if ((rule->overload_tbl = pfr_attach_table(ruleset,
rule 1234 net/pf_ioctl.c rule->overload_tblname)) == NULL)
rule 1237 net/pf_ioctl.c rule->overload_tbl->pfrkt_flags |=
rule 1241 net/pf_ioctl.c pf_mv_pool(&pf_pabuf, &rule->rpool.list);
rule 1242 net/pf_ioctl.c if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
rule 1243 net/pf_ioctl.c (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
rule 1244 net/pf_ioctl.c (rule->rt > PF_FASTROUTE)) &&
rule 1245 net/pf_ioctl.c (TAILQ_FIRST(&rule->rpool.list) == NULL))
rule 1249 net/pf_ioctl.c pf_rm_rule(NULL, rule);
rule 1252 net/pf_ioctl.c rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
rule 1253 net/pf_ioctl.c rule->evaluations = rule->packets[0] = rule->packets[1] =
rule 1254 net/pf_ioctl.c rule->bytes[0] = rule->bytes[1] = 0;
rule 1256 net/pf_ioctl.c rule, entries);
rule 1273 net/pf_ioctl.c rs_num = pf_get_ruleset_number(pr->rule.action);
rule 1291 net/pf_ioctl.c struct pf_rule *rule;
rule 1300 net/pf_ioctl.c rs_num = pf_get_ruleset_number(pr->rule.action);
rule 1309 net/pf_ioctl.c rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
rule 1310 net/pf_ioctl.c while ((rule != NULL) && (rule->nr != pr->nr))
rule 1311 net/pf_ioctl.c rule = TAILQ_NEXT(rule, entries);
rule 1312 net/pf_ioctl.c if (rule == NULL) {
rule 1316 net/pf_ioctl.c bcopy(rule, &pr->rule, sizeof(struct pf_rule));
rule 1317 net/pf_ioctl.c if (pf_anchor_copyout(ruleset, rule, pr)) {
rule 1321 net/pf_ioctl.c pfi_dynaddr_copyout(&pr->rule.src.addr);
rule 1322 net/pf_ioctl.c pfi_dynaddr_copyout(&pr->rule.dst.addr);
rule 1323 net/pf_ioctl.c pf_tbladdr_copyout(&pr->rule.src.addr);
rule 1324 net/pf_ioctl.c pf_tbladdr_copyout(&pr->rule.dst.addr);
rule 1325 net/pf_ioctl.c pf_rtlabel_copyout(&pr->rule.src.addr);
rule 1326 net/pf_ioctl.c pf_rtlabel_copyout(&pr->rule.dst.addr);
rule 1328 net/pf_ioctl.c if (rule->skip[i].ptr == NULL)
rule 1329 net/pf_ioctl.c pr->rule.skip[i].nr = -1;
rule 1331 net/pf_ioctl.c pr->rule.skip[i].nr =
rule 1332 net/pf_ioctl.c rule->skip[i].ptr->nr;
rule 1335 net/pf_ioctl.c rule->evaluations = 0;
rule 1336 net/pf_ioctl.c rule->packets[0] = rule->packets[1] = 0;
rule 1337 net/pf_ioctl.c rule->bytes[0] = rule->bytes[1] = 0;
rule 1366 net/pf_ioctl.c rs_num = pf_get_ruleset_number(pcr->rule.action);
rule 1381 net/pf_ioctl.c if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
rule 1393 net/pf_ioctl.c bcopy(&pcr->rule, newrule, sizeof(struct pf_rule));
rule 1900 net/pf_ioctl.c struct pf_rule *rule;
rule 1902 net/pf_ioctl.c TAILQ_FOREACH(rule,
rule 1904 net/pf_ioctl.c rule->evaluations = 0;
rule 1905 net/pf_ioctl.c rule->packets[0] = rule->packets[1] = 0;
rule 1906 net/pf_ioctl.c rule->bytes[0] = rule->bytes[1] = 0;
rule 2811 net/pf_ioctl.c if (n->rule.ptr != NULL)
rule 2812 net/pf_ioctl.c pstore->rule.nr = n->rule.ptr->nr;
rule 1659 net/pf_norm.c if ((ts_fudge = state->rule.ptr->timeout[PFTM_TS_DIFF]) == 0)
rule 636 net/pfvar.h union pf_rule_ptr rule;
rule 734 net/pfvar.h union pf_rule_ptr rule;
rule 797 net/pfvar.h u_int32_t rule;
rule 1318 net/pfvar.h struct pf_rule rule;