This source file includes following definitions.
- pfattach
- pf_thread_create
- pfopen
- pfclose
- pf_get_pool
- pf_mv_pool
- pf_empty_pool
- pf_rm_rule
- tagname2tag
- tag2tagname
- tag_unref
- pf_tagname2tag
- pf_tag2tagname
- pf_tag_ref
- pf_tag_unref
- pf_rtlabel_add
- pf_rtlabel_remove
- pf_rtlabel_copyout
- pf_qname2qid
- pf_qid2qname
- pf_qid_unref
- pf_begin_altq
- pf_rollback_altq
- pf_commit_altq
- pf_enable_altq
- pf_disable_altq
- pf_begin_rules
- pf_rollback_rules
- pf_hash_rule_addr
- pf_hash_rule
- pf_commit_rules
- pf_state_export
- pf_state_import
- pf_setup_pfsync_matching
- pfioctl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38 #include "pfsync.h"
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/mbuf.h>
43 #include <sys/filio.h>
44 #include <sys/fcntl.h>
45 #include <sys/socket.h>
46 #include <sys/socketvar.h>
47 #include <sys/kernel.h>
48 #include <sys/time.h>
49 #include <sys/timeout.h>
50 #include <sys/pool.h>
51 #include <sys/proc.h>
52 #include <sys/malloc.h>
53 #include <sys/kthread.h>
54 #include <sys/rwlock.h>
55 #include <uvm/uvm_extern.h>
56
57 #include <net/if.h>
58 #include <net/if_types.h>
59 #include <net/route.h>
60
61 #include <netinet/in.h>
62 #include <netinet/in_var.h>
63 #include <netinet/in_systm.h>
64 #include <netinet/ip.h>
65 #include <netinet/ip_var.h>
66 #include <netinet/ip_icmp.h>
67
68 #include <dev/rndvar.h>
69 #include <crypto/md5.h>
70 #include <net/pfvar.h>
71
72 #if NPFSYNC > 0
73 #include <net/if_pfsync.h>
74 #endif
75
76 #if NPFLOG > 0
77 #include <net/if_pflog.h>
78 #endif
79
80 #ifdef INET6
81 #include <netinet/ip6.h>
82 #include <netinet/in_pcb.h>
83 #endif
84
85 #ifdef ALTQ
86 #include <altq/altq.h>
87 #endif
88
89 void pfattach(int);
90 void pf_thread_create(void *);
91 int pfopen(dev_t, int, int, struct proc *);
92 int pfclose(dev_t, int, int, struct proc *);
93 struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
94 u_int8_t, u_int8_t, u_int8_t);
95
96 void pf_mv_pool(struct pf_palist *, struct pf_palist *);
97 void pf_empty_pool(struct pf_palist *);
98 int pfioctl(dev_t, u_long, caddr_t, int, struct proc *);
99 #ifdef ALTQ
100 int pf_begin_altq(u_int32_t *);
101 int pf_rollback_altq(u_int32_t);
102 int pf_commit_altq(u_int32_t);
103 int pf_enable_altq(struct pf_altq *);
104 int pf_disable_altq(struct pf_altq *);
105 #endif
106 int pf_begin_rules(u_int32_t *, int, const char *);
107 int pf_rollback_rules(u_int32_t, int, char *);
108 int pf_setup_pfsync_matching(struct pf_ruleset *);
109 void pf_hash_rule(MD5_CTX *, struct pf_rule *);
110 void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
111 int pf_commit_rules(u_int32_t, int, char *);
112 void pf_state_export(struct pfsync_state *,
113 struct pf_state_key *, struct pf_state *);
114 void pf_state_import(struct pfsync_state *,
115 struct pf_state_key *, struct pf_state *);
116
117 struct pf_rule pf_default_rule;
118 struct rwlock pf_consistency_lock = RWLOCK_INITIALIZER("pfcnslk");
119 #ifdef ALTQ
120 static int pf_altq_running;
121 #endif
122
123 #define TAGID_MAX 50000
124 TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
125 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
126
127 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
128 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
129 #endif
130 u_int16_t tagname2tag(struct pf_tags *, char *);
131 void tag2tagname(struct pf_tags *, u_int16_t, char *);
132 void tag_unref(struct pf_tags *, u_int16_t);
133 int pf_rtlabel_add(struct pf_addr_wrap *);
134 void pf_rtlabel_remove(struct pf_addr_wrap *);
135 void pf_rtlabel_copyout(struct pf_addr_wrap *);
136
137 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
138
139 void
140 pfattach(int num)
141 {
142 u_int32_t *timeout = pf_default_rule.timeout;
143
144 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl",
145 &pool_allocator_nointr);
146 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
147 "pfsrctrpl", NULL);
148 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl",
149 NULL);
150 pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 0, 0,
151 "pfstatekeypl", NULL);
152 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl",
153 &pool_allocator_nointr);
154 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0,
155 "pfpooladdrpl", &pool_allocator_nointr);
156 pfr_initialize();
157 pfi_initialize();
158 pf_osfp_initialize();
159
160 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
161 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
162
163 if (ctob(physmem) <= 100*1024*1024)
164 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit =
165 PFR_KENTRY_HIWAT_SMALL;
166
167 RB_INIT(&tree_src_tracking);
168 RB_INIT(&pf_anchors);
169 pf_init_ruleset(&pf_main_ruleset);
170 TAILQ_INIT(&pf_altqs[0]);
171 TAILQ_INIT(&pf_altqs[1]);
172 TAILQ_INIT(&pf_pabuf);
173 pf_altqs_active = &pf_altqs[0];
174 pf_altqs_inactive = &pf_altqs[1];
175 TAILQ_INIT(&state_list);
176
177
178 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
179 pf_default_rule.action = PF_PASS;
180 pf_default_rule.nr = -1;
181 pf_default_rule.rtableid = -1;
182
183
184 timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
185 timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
186 timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
187 timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
188 timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
189 timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
190 timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
191 timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
192 timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
193 timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
194 timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
195 timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
196 timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
197 timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
198 timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
199 timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
200 timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
201 timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
202 timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
203 timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
204
205 pf_normalize_init();
206 bzero(&pf_status, sizeof(pf_status));
207 pf_status.debug = PF_DEBUG_URGENT;
208
209
210 pf_status.hostid = arc4random();
211
212
213 kthread_create_deferred(pf_thread_create, NULL);
214 }
215
216 void
217 pf_thread_create(void *v)
218 {
219 if (kthread_create(pf_purge_thread, NULL, NULL, "pfpurge"))
220 panic("pfpurge thread");
221 }
222
223 int
224 pfopen(dev_t dev, int flags, int fmt, struct proc *p)
225 {
226 if (minor(dev) >= 1)
227 return (ENXIO);
228 return (0);
229 }
230
231 int
232 pfclose(dev_t dev, int flags, int fmt, struct proc *p)
233 {
234 if (minor(dev) >= 1)
235 return (ENXIO);
236 return (0);
237 }
238
239 struct pf_pool *
240 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
241 u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
242 u_int8_t check_ticket)
243 {
244 struct pf_ruleset *ruleset;
245 struct pf_rule *rule;
246 int rs_num;
247
248 ruleset = pf_find_ruleset(anchor);
249 if (ruleset == NULL)
250 return (NULL);
251 rs_num = pf_get_ruleset_number(rule_action);
252 if (rs_num >= PF_RULESET_MAX)
253 return (NULL);
254 if (active) {
255 if (check_ticket && ticket !=
256 ruleset->rules[rs_num].active.ticket)
257 return (NULL);
258 if (r_last)
259 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
260 pf_rulequeue);
261 else
262 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
263 } else {
264 if (check_ticket && ticket !=
265 ruleset->rules[rs_num].inactive.ticket)
266 return (NULL);
267 if (r_last)
268 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
269 pf_rulequeue);
270 else
271 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
272 }
273 if (!r_last) {
274 while ((rule != NULL) && (rule->nr != rule_number))
275 rule = TAILQ_NEXT(rule, entries);
276 }
277 if (rule == NULL)
278 return (NULL);
279
280 return (&rule->rpool);
281 }
282
283 void
284 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
285 {
286 struct pf_pooladdr *mv_pool_pa;
287
288 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
289 TAILQ_REMOVE(poola, mv_pool_pa, entries);
290 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
291 }
292 }
293
294 void
295 pf_empty_pool(struct pf_palist *poola)
296 {
297 struct pf_pooladdr *empty_pool_pa;
298
299 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
300 pfi_dynaddr_remove(&empty_pool_pa->addr);
301 pf_tbladdr_remove(&empty_pool_pa->addr);
302 pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE);
303 TAILQ_REMOVE(poola, empty_pool_pa, entries);
304 pool_put(&pf_pooladdr_pl, empty_pool_pa);
305 }
306 }
307
308 void
309 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
310 {
311 if (rulequeue != NULL) {
312 if (rule->states <= 0) {
313
314
315
316
317
318 pf_tbladdr_remove(&rule->src.addr);
319 pf_tbladdr_remove(&rule->dst.addr);
320 if (rule->overload_tbl)
321 pfr_detach_table(rule->overload_tbl);
322 }
323 TAILQ_REMOVE(rulequeue, rule, entries);
324 rule->entries.tqe_prev = NULL;
325 rule->nr = -1;
326 }
327
328 if (rule->states > 0 || rule->src_nodes > 0 ||
329 rule->entries.tqe_prev != NULL)
330 return;
331 pf_tag_unref(rule->tag);
332 pf_tag_unref(rule->match_tag);
333 #ifdef ALTQ
334 if (rule->pqid != rule->qid)
335 pf_qid_unref(rule->pqid);
336 pf_qid_unref(rule->qid);
337 #endif
338 pf_rtlabel_remove(&rule->src.addr);
339 pf_rtlabel_remove(&rule->dst.addr);
340 pfi_dynaddr_remove(&rule->src.addr);
341 pfi_dynaddr_remove(&rule->dst.addr);
342 if (rulequeue == NULL) {
343 pf_tbladdr_remove(&rule->src.addr);
344 pf_tbladdr_remove(&rule->dst.addr);
345 if (rule->overload_tbl)
346 pfr_detach_table(rule->overload_tbl);
347 }
348 pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
349 pf_anchor_remove(rule);
350 pf_empty_pool(&rule->rpool.list);
351 pool_put(&pf_rule_pl, rule);
352 }
353
354 u_int16_t
355 tagname2tag(struct pf_tags *head, char *tagname)
356 {
357 struct pf_tagname *tag, *p = NULL;
358 u_int16_t new_tagid = 1;
359
360 TAILQ_FOREACH(tag, head, entries)
361 if (strcmp(tagname, tag->name) == 0) {
362 tag->ref++;
363 return (tag->tag);
364 }
365
366
367
368
369
370
371
372
373 if (!TAILQ_EMPTY(head))
374 for (p = TAILQ_FIRST(head); p != NULL &&
375 p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
376 new_tagid = p->tag + 1;
377
378 if (new_tagid > TAGID_MAX)
379 return (0);
380
381
382 tag = (struct pf_tagname *)malloc(sizeof(struct pf_tagname),
383 M_TEMP, M_NOWAIT);
384 if (tag == NULL)
385 return (0);
386 bzero(tag, sizeof(struct pf_tagname));
387 strlcpy(tag->name, tagname, sizeof(tag->name));
388 tag->tag = new_tagid;
389 tag->ref++;
390
391 if (p != NULL)
392 TAILQ_INSERT_BEFORE(p, tag, entries);
393 else
394 TAILQ_INSERT_TAIL(head, tag, entries);
395
396 return (tag->tag);
397 }
398
399 void
400 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
401 {
402 struct pf_tagname *tag;
403
404 TAILQ_FOREACH(tag, head, entries)
405 if (tag->tag == tagid) {
406 strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
407 return;
408 }
409 }
410
411 void
412 tag_unref(struct pf_tags *head, u_int16_t tag)
413 {
414 struct pf_tagname *p, *next;
415
416 if (tag == 0)
417 return;
418
419 for (p = TAILQ_FIRST(head); p != NULL; p = next) {
420 next = TAILQ_NEXT(p, entries);
421 if (tag == p->tag) {
422 if (--p->ref == 0) {
423 TAILQ_REMOVE(head, p, entries);
424 free(p, M_TEMP);
425 }
426 break;
427 }
428 }
429 }
430
431 u_int16_t
432 pf_tagname2tag(char *tagname)
433 {
434 return (tagname2tag(&pf_tags, tagname));
435 }
436
437 void
438 pf_tag2tagname(u_int16_t tagid, char *p)
439 {
440 tag2tagname(&pf_tags, tagid, p);
441 }
442
443 void
444 pf_tag_ref(u_int16_t tag)
445 {
446 struct pf_tagname *t;
447
448 TAILQ_FOREACH(t, &pf_tags, entries)
449 if (t->tag == tag)
450 break;
451 if (t != NULL)
452 t->ref++;
453 }
454
455 void
456 pf_tag_unref(u_int16_t tag)
457 {
458 tag_unref(&pf_tags, tag);
459 }
460
461 int
462 pf_rtlabel_add(struct pf_addr_wrap *a)
463 {
464 if (a->type == PF_ADDR_RTLABEL &&
465 (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0)
466 return (-1);
467 return (0);
468 }
469
470 void
471 pf_rtlabel_remove(struct pf_addr_wrap *a)
472 {
473 if (a->type == PF_ADDR_RTLABEL)
474 rtlabel_unref(a->v.rtlabel);
475 }
476
477 void
478 pf_rtlabel_copyout(struct pf_addr_wrap *a)
479 {
480 const char *name;
481
482 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) {
483 if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL)
484 strlcpy(a->v.rtlabelname, "?",
485 sizeof(a->v.rtlabelname));
486 else
487 strlcpy(a->v.rtlabelname, name,
488 sizeof(a->v.rtlabelname));
489 }
490 }
491
492 #ifdef ALTQ
493 u_int32_t
494 pf_qname2qid(char *qname)
495 {
496 return ((u_int32_t)tagname2tag(&pf_qids, qname));
497 }
498
499 void
500 pf_qid2qname(u_int32_t qid, char *p)
501 {
502 tag2tagname(&pf_qids, (u_int16_t)qid, p);
503 }
504
505 void
506 pf_qid_unref(u_int32_t qid)
507 {
508 tag_unref(&pf_qids, (u_int16_t)qid);
509 }
510
511 int
512 pf_begin_altq(u_int32_t *ticket)
513 {
514 struct pf_altq *altq;
515 int error = 0;
516
517
518 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
519 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
520 if (altq->qname[0] == 0) {
521
522 error = altq_remove(altq);
523 } else
524 pf_qid_unref(altq->qid);
525 pool_put(&pf_altq_pl, altq);
526 }
527 if (error)
528 return (error);
529 *ticket = ++ticket_altqs_inactive;
530 altqs_inactive_open = 1;
531 return (0);
532 }
533
534 int
535 pf_rollback_altq(u_int32_t ticket)
536 {
537 struct pf_altq *altq;
538 int error = 0;
539
540 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
541 return (0);
542
543 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
544 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
545 if (altq->qname[0] == 0) {
546
547 error = altq_remove(altq);
548 } else
549 pf_qid_unref(altq->qid);
550 pool_put(&pf_altq_pl, altq);
551 }
552 altqs_inactive_open = 0;
553 return (error);
554 }
555
556 int
557 pf_commit_altq(u_int32_t ticket)
558 {
559 struct pf_altqqueue *old_altqs;
560 struct pf_altq *altq;
561 int s, err, error = 0;
562
563 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
564 return (EBUSY);
565
566
567 s = splsoftnet();
568 old_altqs = pf_altqs_active;
569 pf_altqs_active = pf_altqs_inactive;
570 pf_altqs_inactive = old_altqs;
571 ticket_altqs_active = ticket_altqs_inactive;
572
573
574 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
575 if (altq->qname[0] == 0) {
576
577 error = altq_pfattach(altq);
578 if (error == 0 && pf_altq_running)
579 error = pf_enable_altq(altq);
580 if (error != 0) {
581 splx(s);
582 return (error);
583 }
584 }
585 }
586
587
588 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
589 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
590 if (altq->qname[0] == 0) {
591
592 if (pf_altq_running)
593 error = pf_disable_altq(altq);
594 err = altq_pfdetach(altq);
595 if (err != 0 && error == 0)
596 error = err;
597 err = altq_remove(altq);
598 if (err != 0 && error == 0)
599 error = err;
600 } else
601 pf_qid_unref(altq->qid);
602 pool_put(&pf_altq_pl, altq);
603 }
604 splx(s);
605
606 altqs_inactive_open = 0;
607 return (error);
608 }
609
610 int
611 pf_enable_altq(struct pf_altq *altq)
612 {
613 struct ifnet *ifp;
614 struct tb_profile tb;
615 int s, error = 0;
616
617 if ((ifp = ifunit(altq->ifname)) == NULL)
618 return (EINVAL);
619
620 if (ifp->if_snd.altq_type != ALTQT_NONE)
621 error = altq_enable(&ifp->if_snd);
622
623
624 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
625 tb.rate = altq->ifbandwidth;
626 tb.depth = altq->tbrsize;
627 s = splnet();
628 error = tbr_set(&ifp->if_snd, &tb);
629 splx(s);
630 }
631
632 return (error);
633 }
634
635 int
636 pf_disable_altq(struct pf_altq *altq)
637 {
638 struct ifnet *ifp;
639 struct tb_profile tb;
640 int s, error;
641
642 if ((ifp = ifunit(altq->ifname)) == NULL)
643 return (EINVAL);
644
645
646
647
648
649 if (altq->altq_disc != ifp->if_snd.altq_disc)
650 return (0);
651
652 error = altq_disable(&ifp->if_snd);
653
654 if (error == 0) {
655
656 tb.rate = 0;
657 s = splnet();
658 error = tbr_set(&ifp->if_snd, &tb);
659 splx(s);
660 }
661
662 return (error);
663 }
664 #endif
665
666 int
667 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
668 {
669 struct pf_ruleset *rs;
670 struct pf_rule *rule;
671
672 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
673 return (EINVAL);
674 rs = pf_find_or_create_ruleset(anchor);
675 if (rs == NULL)
676 return (EINVAL);
677 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
678 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
679 rs->rules[rs_num].inactive.rcount--;
680 }
681 *ticket = ++rs->rules[rs_num].inactive.ticket;
682 rs->rules[rs_num].inactive.open = 1;
683 return (0);
684 }
685
686 int
687 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
688 {
689 struct pf_ruleset *rs;
690 struct pf_rule *rule;
691
692 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
693 return (EINVAL);
694 rs = pf_find_ruleset(anchor);
695 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
696 rs->rules[rs_num].inactive.ticket != ticket)
697 return (0);
698 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
699 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
700 rs->rules[rs_num].inactive.rcount--;
701 }
702 rs->rules[rs_num].inactive.open = 0;
703 return (0);
704 }
705
706 #define PF_MD5_UPD(st, elm) \
707 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
708
709 #define PF_MD5_UPD_STR(st, elm) \
710 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
711
712 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \
713 (stor) = htonl((st)->elm); \
714 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
715 } while (0)
716
717 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \
718 (stor) = htons((st)->elm); \
719 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
720 } while (0)
721
722 void
723 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
724 {
725 PF_MD5_UPD(pfr, addr.type);
726 switch (pfr->addr.type) {
727 case PF_ADDR_DYNIFTL:
728 PF_MD5_UPD(pfr, addr.v.ifname);
729 PF_MD5_UPD(pfr, addr.iflags);
730 break;
731 case PF_ADDR_TABLE:
732 PF_MD5_UPD(pfr, addr.v.tblname);
733 break;
734 case PF_ADDR_ADDRMASK:
735
736 PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
737 PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
738 break;
739 case PF_ADDR_RTLABEL:
740 PF_MD5_UPD(pfr, addr.v.rtlabelname);
741 break;
742 }
743
744 PF_MD5_UPD(pfr, port[0]);
745 PF_MD5_UPD(pfr, port[1]);
746 PF_MD5_UPD(pfr, neg);
747 PF_MD5_UPD(pfr, port_op);
748 }
749
750 void
751 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
752 {
753 u_int16_t x;
754 u_int32_t y;
755
756 pf_hash_rule_addr(ctx, &rule->src);
757 pf_hash_rule_addr(ctx, &rule->dst);
758 PF_MD5_UPD_STR(rule, label);
759 PF_MD5_UPD_STR(rule, ifname);
760 PF_MD5_UPD_STR(rule, match_tagname);
761 PF_MD5_UPD_HTONS(rule, match_tag, x);
762 PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
763 PF_MD5_UPD_HTONL(rule, prob, y);
764 PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
765 PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
766 PF_MD5_UPD(rule, uid.op);
767 PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
768 PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
769 PF_MD5_UPD(rule, gid.op);
770 PF_MD5_UPD_HTONL(rule, rule_flag, y);
771 PF_MD5_UPD(rule, action);
772 PF_MD5_UPD(rule, direction);
773 PF_MD5_UPD(rule, af);
774 PF_MD5_UPD(rule, quick);
775 PF_MD5_UPD(rule, ifnot);
776 PF_MD5_UPD(rule, match_tag_not);
777 PF_MD5_UPD(rule, natpass);
778 PF_MD5_UPD(rule, keep_state);
779 PF_MD5_UPD(rule, proto);
780 PF_MD5_UPD(rule, type);
781 PF_MD5_UPD(rule, code);
782 PF_MD5_UPD(rule, flags);
783 PF_MD5_UPD(rule, flagset);
784 PF_MD5_UPD(rule, allow_opts);
785 PF_MD5_UPD(rule, rt);
786 PF_MD5_UPD(rule, tos);
787 }
788
789 int
790 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
791 {
792 struct pf_ruleset *rs;
793 struct pf_rule *rule, **old_array;
794 struct pf_rulequeue *old_rules;
795 int s, error;
796 u_int32_t old_rcount;
797
798 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
799 return (EINVAL);
800 rs = pf_find_ruleset(anchor);
801 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
802 ticket != rs->rules[rs_num].inactive.ticket)
803 return (EBUSY);
804
805
806 if (rs == &pf_main_ruleset) {
807 error = pf_setup_pfsync_matching(rs);
808 if (error != 0)
809 return (error);
810 }
811
812
813 s = splsoftnet();
814 old_rules = rs->rules[rs_num].active.ptr;
815 old_rcount = rs->rules[rs_num].active.rcount;
816 old_array = rs->rules[rs_num].active.ptr_array;
817
818 rs->rules[rs_num].active.ptr =
819 rs->rules[rs_num].inactive.ptr;
820 rs->rules[rs_num].active.ptr_array =
821 rs->rules[rs_num].inactive.ptr_array;
822 rs->rules[rs_num].active.rcount =
823 rs->rules[rs_num].inactive.rcount;
824 rs->rules[rs_num].inactive.ptr = old_rules;
825 rs->rules[rs_num].inactive.ptr_array = old_array;
826 rs->rules[rs_num].inactive.rcount = old_rcount;
827
828 rs->rules[rs_num].active.ticket =
829 rs->rules[rs_num].inactive.ticket;
830 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
831
832
833
834 while ((rule = TAILQ_FIRST(old_rules)) != NULL)
835 pf_rm_rule(old_rules, rule);
836 if (rs->rules[rs_num].inactive.ptr_array)
837 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
838 rs->rules[rs_num].inactive.ptr_array = NULL;
839 rs->rules[rs_num].inactive.rcount = 0;
840 rs->rules[rs_num].inactive.open = 0;
841 pf_remove_if_empty_ruleset(rs);
842 splx(s);
843 return (0);
844 }
845
846 void
847 pf_state_export(struct pfsync_state *sp, struct pf_state_key *sk,
848 struct pf_state *s)
849 {
850 int secs = time_second;
851 bzero(sp, sizeof(struct pfsync_state));
852
853
854 sp->lan.addr = sk->lan.addr;
855 sp->lan.port = sk->lan.port;
856 sp->gwy.addr = sk->gwy.addr;
857 sp->gwy.port = sk->gwy.port;
858 sp->ext.addr = sk->ext.addr;
859 sp->ext.port = sk->ext.port;
860 sp->proto = sk->proto;
861 sp->af = sk->af;
862 sp->direction = sk->direction;
863
864
865 memcpy(&sp->id, &s->id, sizeof(sp->id));
866 sp->creatorid = s->creatorid;
867 strlcpy(sp->ifname, s->kif->pfik_name, sizeof(sp->ifname));
868 pf_state_peer_to_pfsync(&s->src, &sp->src);
869 pf_state_peer_to_pfsync(&s->dst, &sp->dst);
870
871 sp->rule = s->rule.ptr->nr;
872 sp->nat_rule = (s->nat_rule.ptr == NULL) ? -1 : s->nat_rule.ptr->nr;
873 sp->anchor = (s->anchor.ptr == NULL) ? -1 : s->anchor.ptr->nr;
874
875 pf_state_counter_to_pfsync(s->bytes[0], sp->bytes[0]);
876 pf_state_counter_to_pfsync(s->bytes[1], sp->bytes[1]);
877 pf_state_counter_to_pfsync(s->packets[0], sp->packets[0]);
878 pf_state_counter_to_pfsync(s->packets[1], sp->packets[1]);
879 sp->creation = secs - s->creation;
880 sp->expire = pf_state_expires(s);
881 sp->log = s->log;
882 sp->allow_opts = s->allow_opts;
883 sp->timeout = s->timeout;
884
885 if (s->src_node)
886 sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
887 if (s->nat_src_node)
888 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
889
890 if (sp->expire > secs)
891 sp->expire -= secs;
892 else
893 sp->expire = 0;
894
895 }
896
897 void
898 pf_state_import(struct pfsync_state *sp, struct pf_state_key *sk,
899 struct pf_state *s)
900 {
901
902 sk->lan.addr = sp->lan.addr;
903 sk->lan.port = sp->lan.port;
904 sk->gwy.addr = sp->gwy.addr;
905 sk->gwy.port = sp->gwy.port;
906 sk->ext.addr = sp->ext.addr;
907 sk->ext.port = sp->ext.port;
908 sk->proto = sp->proto;
909 sk->af = sp->af;
910 sk->direction = sp->direction;
911
912
913 memcpy(&s->id, &sp->id, sizeof(sp->id));
914 s->creatorid = sp->creatorid;
915 strlcpy(sp->ifname, s->kif->pfik_name, sizeof(sp->ifname));
916 pf_state_peer_from_pfsync(&sp->src, &s->src);
917 pf_state_peer_from_pfsync(&sp->dst, &s->dst);
918
919 s->rule.ptr = &pf_default_rule;
920 s->nat_rule.ptr = NULL;
921 s->anchor.ptr = NULL;
922 s->rt_kif = NULL;
923 s->creation = time_second;
924 s->pfsync_time = 0;
925 s->packets[0] = s->packets[1] = 0;
926 s->bytes[0] = s->bytes[1] = 0;
927 }
928
929 int
930 pf_setup_pfsync_matching(struct pf_ruleset *rs)
931 {
932 MD5_CTX ctx;
933 struct pf_rule *rule;
934 int rs_cnt;
935 u_int8_t digest[PF_MD5_DIGEST_LENGTH];
936
937 MD5Init(&ctx);
938 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
939
940 if (rs_cnt == PF_RULESET_SCRUB)
941 continue;
942
943 if (rs->rules[rs_cnt].inactive.ptr_array)
944 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
945 rs->rules[rs_cnt].inactive.ptr_array = NULL;
946
947 if (rs->rules[rs_cnt].inactive.rcount) {
948 rs->rules[rs_cnt].inactive.ptr_array =
949 malloc(sizeof(caddr_t) *
950 rs->rules[rs_cnt].inactive.rcount,
951 M_TEMP, M_NOWAIT);
952
953 if (!rs->rules[rs_cnt].inactive.ptr_array)
954 return (ENOMEM);
955 }
956
957 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
958 entries) {
959 pf_hash_rule(&ctx, rule);
960 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
961 }
962 }
963
964 MD5Final(digest, &ctx);
965 memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum));
966 return (0);
967 }
968
969 int
970 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
971 {
972 struct pf_pooladdr *pa = NULL;
973 struct pf_pool *pool = NULL;
974 int s;
975 int error = 0;
976
977
978 if (securelevel > 1)
979 switch (cmd) {
980 case DIOCGETRULES:
981 case DIOCGETRULE:
982 case DIOCGETADDRS:
983 case DIOCGETADDR:
984 case DIOCGETSTATE:
985 case DIOCSETSTATUSIF:
986 case DIOCGETSTATUS:
987 case DIOCCLRSTATUS:
988 case DIOCNATLOOK:
989 case DIOCSETDEBUG:
990 case DIOCGETSTATES:
991 case DIOCGETTIMEOUT:
992 case DIOCCLRRULECTRS:
993 case DIOCGETLIMIT:
994 case DIOCGETALTQS:
995 case DIOCGETALTQ:
996 case DIOCGETQSTATS:
997 case DIOCGETRULESETS:
998 case DIOCGETRULESET:
999 case DIOCRGETTABLES:
1000 case DIOCRGETTSTATS:
1001 case DIOCRCLRTSTATS:
1002 case DIOCRCLRADDRS:
1003 case DIOCRADDADDRS:
1004 case DIOCRDELADDRS:
1005 case DIOCRSETADDRS:
1006 case DIOCRGETADDRS:
1007 case DIOCRGETASTATS:
1008 case DIOCRCLRASTATS:
1009 case DIOCRTSTADDRS:
1010 case DIOCOSFPGET:
1011 case DIOCGETSRCNODES:
1012 case DIOCCLRSRCNODES:
1013 case DIOCIGETIFACES:
1014 case DIOCSETIFFLAG:
1015 case DIOCCLRIFFLAG:
1016 break;
1017 case DIOCRCLRTABLES:
1018 case DIOCRADDTABLES:
1019 case DIOCRDELTABLES:
1020 case DIOCRSETTFLAGS:
1021 if (((struct pfioc_table *)addr)->pfrio_flags &
1022 PFR_FLAG_DUMMY)
1023 break;
1024 return (EPERM);
1025 default:
1026 return (EPERM);
1027 }
1028
1029 if (!(flags & FWRITE))
1030 switch (cmd) {
1031 case DIOCGETRULES:
1032 case DIOCGETADDRS:
1033 case DIOCGETADDR:
1034 case DIOCGETSTATE:
1035 case DIOCGETSTATUS:
1036 case DIOCGETSTATES:
1037 case DIOCGETTIMEOUT:
1038 case DIOCGETLIMIT:
1039 case DIOCGETALTQS:
1040 case DIOCGETALTQ:
1041 case DIOCGETQSTATS:
1042 case DIOCGETRULESETS:
1043 case DIOCGETRULESET:
1044 case DIOCNATLOOK:
1045 case DIOCRGETTABLES:
1046 case DIOCRGETTSTATS:
1047 case DIOCRGETADDRS:
1048 case DIOCRGETASTATS:
1049 case DIOCRTSTADDRS:
1050 case DIOCOSFPGET:
1051 case DIOCGETSRCNODES:
1052 case DIOCIGETIFACES:
1053 break;
1054 case DIOCRCLRTABLES:
1055 case DIOCRADDTABLES:
1056 case DIOCRDELTABLES:
1057 case DIOCRCLRTSTATS:
1058 case DIOCRCLRADDRS:
1059 case DIOCRADDADDRS:
1060 case DIOCRDELADDRS:
1061 case DIOCRSETADDRS:
1062 case DIOCRSETTFLAGS:
1063 if (((struct pfioc_table *)addr)->pfrio_flags &
1064 PFR_FLAG_DUMMY) {
1065 flags |= FWRITE;
1066 break;
1067 }
1068 return (EACCES);
1069 case DIOCGETRULE:
1070 if (((struct pfioc_rule *)addr)->action == PF_GET_CLR_CNTR)
1071 return (EACCES);
1072 break;
1073 default:
1074 return (EACCES);
1075 }
1076
1077 if (flags & FWRITE)
1078 rw_enter_write(&pf_consistency_lock);
1079 else
1080 rw_enter_read(&pf_consistency_lock);
1081
1082 s = splsoftnet();
1083 switch (cmd) {
1084
1085 case DIOCSTART:
1086 if (pf_status.running)
1087 error = EEXIST;
1088 else {
1089 pf_status.running = 1;
1090 pf_status.since = time_second;
1091 if (pf_status.stateid == 0) {
1092 pf_status.stateid = time_second;
1093 pf_status.stateid = pf_status.stateid << 32;
1094 }
1095 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1096 }
1097 break;
1098
1099 case DIOCSTOP:
1100 if (!pf_status.running)
1101 error = ENOENT;
1102 else {
1103 pf_status.running = 0;
1104 pf_status.since = time_second;
1105 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1106 }
1107 break;
1108
1109 case DIOCADDRULE: {
1110 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1111 struct pf_ruleset *ruleset;
1112 struct pf_rule *rule, *tail;
1113 struct pf_pooladdr *pa;
1114 int rs_num;
1115
1116 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1117 ruleset = pf_find_ruleset(pr->anchor);
1118 if (ruleset == NULL) {
1119 error = EINVAL;
1120 break;
1121 }
1122 rs_num = pf_get_ruleset_number(pr->rule.action);
1123 if (rs_num >= PF_RULESET_MAX) {
1124 error = EINVAL;
1125 break;
1126 }
1127 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1128 error = EINVAL;
1129 break;
1130 }
1131 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
1132 error = EBUSY;
1133 break;
1134 }
1135 if (pr->pool_ticket != ticket_pabuf) {
1136 error = EBUSY;
1137 break;
1138 }
1139 rule = pool_get(&pf_rule_pl, PR_NOWAIT);
1140 if (rule == NULL) {
1141 error = ENOMEM;
1142 break;
1143 }
1144 bcopy(&pr->rule, rule, sizeof(struct pf_rule));
1145 rule->cuid = p->p_cred->p_ruid;
1146 rule->cpid = p->p_pid;
1147 rule->anchor = NULL;
1148 rule->kif = NULL;
1149 TAILQ_INIT(&rule->rpool.list);
1150
1151 rule->states = 0;
1152 rule->src_nodes = 0;
1153 rule->entries.tqe_prev = NULL;
1154 #ifndef INET
1155 if (rule->af == AF_INET) {
1156 pool_put(&pf_rule_pl, rule);
1157 error = EAFNOSUPPORT;
1158 break;
1159 }
1160 #endif
1161 #ifndef INET6
1162 if (rule->af == AF_INET6) {
1163 pool_put(&pf_rule_pl, rule);
1164 error = EAFNOSUPPORT;
1165 break;
1166 }
1167 #endif
1168 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
1169 pf_rulequeue);
1170 if (tail)
1171 rule->nr = tail->nr + 1;
1172 else
1173 rule->nr = 0;
1174 if (rule->ifname[0]) {
1175 rule->kif = pfi_kif_get(rule->ifname);
1176 if (rule->kif == NULL) {
1177 pool_put(&pf_rule_pl, rule);
1178 error = EINVAL;
1179 break;
1180 }
1181 pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE);
1182 }
1183
1184 if (rule->rtableid > 0 && !rtable_exists(rule->rtableid))
1185 error = EBUSY;
1186
1187 #ifdef ALTQ
1188
1189 if (rule->qname[0] != 0) {
1190 if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
1191 error = EBUSY;
1192 else if (rule->pqname[0] != 0) {
1193 if ((rule->pqid =
1194 pf_qname2qid(rule->pqname)) == 0)
1195 error = EBUSY;
1196 } else
1197 rule->pqid = rule->qid;
1198 }
1199 #endif
1200 if (rule->tagname[0])
1201 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
1202 error = EBUSY;
1203 if (rule->match_tagname[0])
1204 if ((rule->match_tag =
1205 pf_tagname2tag(rule->match_tagname)) == 0)
1206 error = EBUSY;
1207 if (rule->rt && !rule->direction)
1208 error = EINVAL;
1209 #if NPFLOG > 0
1210 if (!rule->log)
1211 rule->logif = 0;
1212 if (rule->logif >= PFLOGIFS_MAX)
1213 error = EINVAL;
1214 #endif
1215 if (pf_rtlabel_add(&rule->src.addr) ||
1216 pf_rtlabel_add(&rule->dst.addr))
1217 error = EBUSY;
1218 if (pfi_dynaddr_setup(&rule->src.addr, rule->af))
1219 error = EINVAL;
1220 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af))
1221 error = EINVAL;
1222 if (pf_tbladdr_setup(ruleset, &rule->src.addr))
1223 error = EINVAL;
1224 if (pf_tbladdr_setup(ruleset, &rule->dst.addr))
1225 error = EINVAL;
1226 if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1227 error = EINVAL;
1228 TAILQ_FOREACH(pa, &pf_pabuf, entries)
1229 if (pf_tbladdr_setup(ruleset, &pa->addr))
1230 error = EINVAL;
1231
1232 if (rule->overload_tblname[0]) {
1233 if ((rule->overload_tbl = pfr_attach_table(ruleset,
1234 rule->overload_tblname)) == NULL)
1235 error = EINVAL;
1236 else
1237 rule->overload_tbl->pfrkt_flags |=
1238 PFR_TFLAG_ACTIVE;
1239 }
1240
1241 pf_mv_pool(&pf_pabuf, &rule->rpool.list);
1242 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
1243 (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
1244 (rule->rt > PF_FASTROUTE)) &&
1245 (TAILQ_FIRST(&rule->rpool.list) == NULL))
1246 error = EINVAL;
1247
1248 if (error) {
1249 pf_rm_rule(NULL, rule);
1250 break;
1251 }
1252 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
1253 rule->evaluations = rule->packets[0] = rule->packets[1] =
1254 rule->bytes[0] = rule->bytes[1] = 0;
1255 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
1256 rule, entries);
1257 ruleset->rules[rs_num].inactive.rcount++;
1258 break;
1259 }
1260
1261 case DIOCGETRULES: {
1262 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1263 struct pf_ruleset *ruleset;
1264 struct pf_rule *tail;
1265 int rs_num;
1266
1267 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1268 ruleset = pf_find_ruleset(pr->anchor);
1269 if (ruleset == NULL) {
1270 error = EINVAL;
1271 break;
1272 }
1273 rs_num = pf_get_ruleset_number(pr->rule.action);
1274 if (rs_num >= PF_RULESET_MAX) {
1275 error = EINVAL;
1276 break;
1277 }
1278 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
1279 pf_rulequeue);
1280 if (tail)
1281 pr->nr = tail->nr + 1;
1282 else
1283 pr->nr = 0;
1284 pr->ticket = ruleset->rules[rs_num].active.ticket;
1285 break;
1286 }
1287
1288 case DIOCGETRULE: {
1289 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1290 struct pf_ruleset *ruleset;
1291 struct pf_rule *rule;
1292 int rs_num, i;
1293
1294 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1295 ruleset = pf_find_ruleset(pr->anchor);
1296 if (ruleset == NULL) {
1297 error = EINVAL;
1298 break;
1299 }
1300 rs_num = pf_get_ruleset_number(pr->rule.action);
1301 if (rs_num >= PF_RULESET_MAX) {
1302 error = EINVAL;
1303 break;
1304 }
1305 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
1306 error = EBUSY;
1307 break;
1308 }
1309 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
1310 while ((rule != NULL) && (rule->nr != pr->nr))
1311 rule = TAILQ_NEXT(rule, entries);
1312 if (rule == NULL) {
1313 error = EBUSY;
1314 break;
1315 }
1316 bcopy(rule, &pr->rule, sizeof(struct pf_rule));
1317 if (pf_anchor_copyout(ruleset, rule, pr)) {
1318 error = EBUSY;
1319 break;
1320 }
1321 pfi_dynaddr_copyout(&pr->rule.src.addr);
1322 pfi_dynaddr_copyout(&pr->rule.dst.addr);
1323 pf_tbladdr_copyout(&pr->rule.src.addr);
1324 pf_tbladdr_copyout(&pr->rule.dst.addr);
1325 pf_rtlabel_copyout(&pr->rule.src.addr);
1326 pf_rtlabel_copyout(&pr->rule.dst.addr);
1327 for (i = 0; i < PF_SKIP_COUNT; ++i)
1328 if (rule->skip[i].ptr == NULL)
1329 pr->rule.skip[i].nr = -1;
1330 else
1331 pr->rule.skip[i].nr =
1332 rule->skip[i].ptr->nr;
1333
1334 if (pr->action == PF_GET_CLR_CNTR) {
1335 rule->evaluations = 0;
1336 rule->packets[0] = rule->packets[1] = 0;
1337 rule->bytes[0] = rule->bytes[1] = 0;
1338 }
1339 break;
1340 }
1341
1342 case DIOCCHANGERULE: {
1343 struct pfioc_rule *pcr = (struct pfioc_rule *)addr;
1344 struct pf_ruleset *ruleset;
1345 struct pf_rule *oldrule = NULL, *newrule = NULL;
1346 u_int32_t nr = 0;
1347 int rs_num;
1348
1349 if (!(pcr->action == PF_CHANGE_REMOVE ||
1350 pcr->action == PF_CHANGE_GET_TICKET) &&
1351 pcr->pool_ticket != ticket_pabuf) {
1352 error = EBUSY;
1353 break;
1354 }
1355
1356 if (pcr->action < PF_CHANGE_ADD_HEAD ||
1357 pcr->action > PF_CHANGE_GET_TICKET) {
1358 error = EINVAL;
1359 break;
1360 }
1361 ruleset = pf_find_ruleset(pcr->anchor);
1362 if (ruleset == NULL) {
1363 error = EINVAL;
1364 break;
1365 }
1366 rs_num = pf_get_ruleset_number(pcr->rule.action);
1367 if (rs_num >= PF_RULESET_MAX) {
1368 error = EINVAL;
1369 break;
1370 }
1371
1372 if (pcr->action == PF_CHANGE_GET_TICKET) {
1373 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
1374 break;
1375 } else {
1376 if (pcr->ticket !=
1377 ruleset->rules[rs_num].active.ticket) {
1378 error = EINVAL;
1379 break;
1380 }
1381 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1382 error = EINVAL;
1383 break;
1384 }
1385 }
1386
1387 if (pcr->action != PF_CHANGE_REMOVE) {
1388 newrule = pool_get(&pf_rule_pl, PR_NOWAIT);
1389 if (newrule == NULL) {
1390 error = ENOMEM;
1391 break;
1392 }
1393 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule));
1394 newrule->cuid = p->p_cred->p_ruid;
1395 newrule->cpid = p->p_pid;
1396 TAILQ_INIT(&newrule->rpool.list);
1397
1398 newrule->states = 0;
1399 newrule->entries.tqe_prev = NULL;
1400 #ifndef INET
1401 if (newrule->af == AF_INET) {
1402 pool_put(&pf_rule_pl, newrule);
1403 error = EAFNOSUPPORT;
1404 break;
1405 }
1406 #endif
1407 #ifndef INET6
1408 if (newrule->af == AF_INET6) {
1409 pool_put(&pf_rule_pl, newrule);
1410 error = EAFNOSUPPORT;
1411 break;
1412 }
1413 #endif
1414 if (newrule->ifname[0]) {
1415 newrule->kif = pfi_kif_get(newrule->ifname);
1416 if (newrule->kif == NULL) {
1417 pool_put(&pf_rule_pl, newrule);
1418 error = EINVAL;
1419 break;
1420 }
1421 pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE);
1422 } else
1423 newrule->kif = NULL;
1424
1425 if (newrule->rtableid > 0 &&
1426 !rtable_exists(newrule->rtableid))
1427 error = EBUSY;
1428
1429 #ifdef ALTQ
1430
1431 if (newrule->qname[0] != 0) {
1432 if ((newrule->qid =
1433 pf_qname2qid(newrule->qname)) == 0)
1434 error = EBUSY;
1435 else if (newrule->pqname[0] != 0) {
1436 if ((newrule->pqid =
1437 pf_qname2qid(newrule->pqname)) == 0)
1438 error = EBUSY;
1439 } else
1440 newrule->pqid = newrule->qid;
1441 }
1442 #endif
1443 if (newrule->tagname[0])
1444 if ((newrule->tag =
1445 pf_tagname2tag(newrule->tagname)) == 0)
1446 error = EBUSY;
1447 if (newrule->match_tagname[0])
1448 if ((newrule->match_tag = pf_tagname2tag(
1449 newrule->match_tagname)) == 0)
1450 error = EBUSY;
1451 if (newrule->rt && !newrule->direction)
1452 error = EINVAL;
1453 #if NPFLOG > 0
1454 if (!newrule->log)
1455 newrule->logif = 0;
1456 if (newrule->logif >= PFLOGIFS_MAX)
1457 error = EINVAL;
1458 #endif
1459 if (pf_rtlabel_add(&newrule->src.addr) ||
1460 pf_rtlabel_add(&newrule->dst.addr))
1461 error = EBUSY;
1462 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af))
1463 error = EINVAL;
1464 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af))
1465 error = EINVAL;
1466 if (pf_tbladdr_setup(ruleset, &newrule->src.addr))
1467 error = EINVAL;
1468 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr))
1469 error = EINVAL;
1470 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1471 error = EINVAL;
1472 TAILQ_FOREACH(pa, &pf_pabuf, entries)
1473 if (pf_tbladdr_setup(ruleset, &pa->addr))
1474 error = EINVAL;
1475
1476 if (newrule->overload_tblname[0]) {
1477 if ((newrule->overload_tbl = pfr_attach_table(
1478 ruleset, newrule->overload_tblname)) ==
1479 NULL)
1480 error = EINVAL;
1481 else
1482 newrule->overload_tbl->pfrkt_flags |=
1483 PFR_TFLAG_ACTIVE;
1484 }
1485
1486 pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
1487 if (((((newrule->action == PF_NAT) ||
1488 (newrule->action == PF_RDR) ||
1489 (newrule->action == PF_BINAT) ||
1490 (newrule->rt > PF_FASTROUTE)) &&
1491 !newrule->anchor)) &&
1492 (TAILQ_FIRST(&newrule->rpool.list) == NULL))
1493 error = EINVAL;
1494
1495 if (error) {
1496 pf_rm_rule(NULL, newrule);
1497 break;
1498 }
1499 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
1500 newrule->evaluations = 0;
1501 newrule->packets[0] = newrule->packets[1] = 0;
1502 newrule->bytes[0] = newrule->bytes[1] = 0;
1503 }
1504 pf_empty_pool(&pf_pabuf);
1505
1506 if (pcr->action == PF_CHANGE_ADD_HEAD)
1507 oldrule = TAILQ_FIRST(
1508 ruleset->rules[rs_num].active.ptr);
1509 else if (pcr->action == PF_CHANGE_ADD_TAIL)
1510 oldrule = TAILQ_LAST(
1511 ruleset->rules[rs_num].active.ptr, pf_rulequeue);
1512 else {
1513 oldrule = TAILQ_FIRST(
1514 ruleset->rules[rs_num].active.ptr);
1515 while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1516 oldrule = TAILQ_NEXT(oldrule, entries);
1517 if (oldrule == NULL) {
1518 if (newrule != NULL)
1519 pf_rm_rule(NULL, newrule);
1520 error = EINVAL;
1521 break;
1522 }
1523 }
1524
1525 if (pcr->action == PF_CHANGE_REMOVE) {
1526 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
1527 ruleset->rules[rs_num].active.rcount--;
1528 } else {
1529 if (oldrule == NULL)
1530 TAILQ_INSERT_TAIL(
1531 ruleset->rules[rs_num].active.ptr,
1532 newrule, entries);
1533 else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1534 pcr->action == PF_CHANGE_ADD_BEFORE)
1535 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1536 else
1537 TAILQ_INSERT_AFTER(
1538 ruleset->rules[rs_num].active.ptr,
1539 oldrule, newrule, entries);
1540 ruleset->rules[rs_num].active.rcount++;
1541 }
1542
1543 nr = 0;
1544 TAILQ_FOREACH(oldrule,
1545 ruleset->rules[rs_num].active.ptr, entries)
1546 oldrule->nr = nr++;
1547
1548 ruleset->rules[rs_num].active.ticket++;
1549
1550 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
1551 pf_remove_if_empty_ruleset(ruleset);
1552
1553 break;
1554 }
1555
1556 case DIOCCLRSTATES: {
1557 struct pf_state *s, *nexts;
1558 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1559 int killed = 0;
1560
1561 for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) {
1562 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
1563
1564 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1565 s->kif->pfik_name)) {
1566 #if NPFSYNC
1567
1568 s->sync_flags = PFSTATE_NOSYNC;
1569 #endif
1570 pf_unlink_state(s);
1571 killed++;
1572 }
1573 }
1574 psk->psk_af = killed;
1575 #if NPFSYNC
1576 pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
1577 #endif
1578 break;
1579 }
1580
1581 case DIOCKILLSTATES: {
1582 struct pf_state *s, *nexts;
1583 struct pf_state_key *sk;
1584 struct pf_state_host *src, *dst;
1585 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1586 int killed = 0;
1587
1588 for (s = RB_MIN(pf_state_tree_id, &tree_id); s;
1589 s = nexts) {
1590 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
1591 sk = s->state_key;
1592
1593 if (sk->direction == PF_OUT) {
1594 src = &sk->lan;
1595 dst = &sk->ext;
1596 } else {
1597 src = &sk->ext;
1598 dst = &sk->lan;
1599 }
1600 if ((!psk->psk_af || sk->af == psk->psk_af)
1601 && (!psk->psk_proto || psk->psk_proto ==
1602 sk->proto) &&
1603 PF_MATCHA(psk->psk_src.neg,
1604 &psk->psk_src.addr.v.a.addr,
1605 &psk->psk_src.addr.v.a.mask,
1606 &src->addr, sk->af) &&
1607 PF_MATCHA(psk->psk_dst.neg,
1608 &psk->psk_dst.addr.v.a.addr,
1609 &psk->psk_dst.addr.v.a.mask,
1610 &dst->addr, sk->af) &&
1611 (psk->psk_src.port_op == 0 ||
1612 pf_match_port(psk->psk_src.port_op,
1613 psk->psk_src.port[0], psk->psk_src.port[1],
1614 src->port)) &&
1615 (psk->psk_dst.port_op == 0 ||
1616 pf_match_port(psk->psk_dst.port_op,
1617 psk->psk_dst.port[0], psk->psk_dst.port[1],
1618 dst->port)) &&
1619 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1620 s->kif->pfik_name))) {
1621 #if NPFSYNC > 0
1622
1623 pfsync_delete_state(s);
1624 s->sync_flags |= PFSTATE_NOSYNC;
1625 #endif
1626 pf_unlink_state(s);
1627 killed++;
1628 }
1629 }
1630 psk->psk_af = killed;
1631 break;
1632 }
1633
1634 case DIOCADDSTATE: {
1635 struct pfioc_state *ps = (struct pfioc_state *)addr;
1636 struct pfsync_state *sp = (struct pfsync_state *)ps->state;
1637 struct pf_state *s;
1638 struct pf_state_key *sk;
1639 struct pfi_kif *kif;
1640
1641 if (sp->timeout >= PFTM_MAX &&
1642 sp->timeout != PFTM_UNTIL_PACKET) {
1643 error = EINVAL;
1644 break;
1645 }
1646 s = pool_get(&pf_state_pl, PR_NOWAIT);
1647 if (s == NULL) {
1648 error = ENOMEM;
1649 break;
1650 }
1651 bzero(s, sizeof(struct pf_state));
1652 if ((sk = pf_alloc_state_key(s)) == NULL) {
1653 error = ENOMEM;
1654 break;
1655 }
1656 pf_state_import(sp, sk, s);
1657 kif = pfi_kif_get(sp->ifname);
1658 if (kif == NULL) {
1659 pool_put(&pf_state_pl, s);
1660 pool_put(&pf_state_key_pl, sk);
1661 error = ENOENT;
1662 break;
1663 }
1664 if (pf_insert_state(kif, s)) {
1665 pfi_kif_unref(kif, PFI_KIF_REF_NONE);
1666 pool_put(&pf_state_pl, s);
1667 pool_put(&pf_state_key_pl, sk);
1668 error = ENOMEM;
1669 }
1670 break;
1671 }
1672
1673 case DIOCGETSTATE: {
1674 struct pfioc_state *ps = (struct pfioc_state *)addr;
1675 struct pf_state *s;
1676 u_int32_t nr;
1677
1678 nr = 0;
1679 RB_FOREACH(s, pf_state_tree_id, &tree_id) {
1680 if (nr >= ps->nr)
1681 break;
1682 nr++;
1683 }
1684 if (s == NULL) {
1685 error = EBUSY;
1686 break;
1687 }
1688
1689 pf_state_export((struct pfsync_state *)&ps->state,
1690 s->state_key, s);
1691 break;
1692 }
1693
1694 case DIOCGETSTATES: {
1695 struct pfioc_states *ps = (struct pfioc_states *)addr;
1696 struct pf_state *state;
1697 struct pfsync_state *p, *pstore;
1698 u_int32_t nr = 0;
1699
1700 if (ps->ps_len == 0) {
1701 nr = pf_status.states;
1702 ps->ps_len = sizeof(struct pfsync_state) * nr;
1703 break;
1704 }
1705
1706 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK);
1707
1708 p = ps->ps_states;
1709
1710 state = TAILQ_FIRST(&state_list);
1711 while (state) {
1712 if (state->timeout != PFTM_UNLINKED) {
1713 if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len)
1714 break;
1715
1716 pf_state_export(pstore,
1717 state->state_key, state);
1718 error = copyout(pstore, p, sizeof(*p));
1719 if (error) {
1720 free(pstore, M_TEMP);
1721 goto fail;
1722 }
1723 p++;
1724 nr++;
1725 }
1726 state = TAILQ_NEXT(state, entry_list);
1727 }
1728
1729 ps->ps_len = sizeof(struct pfsync_state) * nr;
1730
1731 free(pstore, M_TEMP);
1732 break;
1733 }
1734
1735 case DIOCGETSTATUS: {
1736 struct pf_status *s = (struct pf_status *)addr;
1737 bcopy(&pf_status, s, sizeof(struct pf_status));
1738 pfi_fill_oldstatus(s);
1739 break;
1740 }
1741
1742 case DIOCSETSTATUSIF: {
1743 struct pfioc_if *pi = (struct pfioc_if *)addr;
1744
1745 if (pi->ifname[0] == 0) {
1746 bzero(pf_status.ifname, IFNAMSIZ);
1747 break;
1748 }
1749 if (ifunit(pi->ifname) == NULL) {
1750 error = EINVAL;
1751 break;
1752 }
1753 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
1754 break;
1755 }
1756
1757 case DIOCCLRSTATUS: {
1758 bzero(pf_status.counters, sizeof(pf_status.counters));
1759 bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
1760 bzero(pf_status.scounters, sizeof(pf_status.scounters));
1761 pf_status.since = time_second;
1762 if (*pf_status.ifname)
1763 pfi_clr_istats(pf_status.ifname);
1764 break;
1765 }
1766
1767 case DIOCNATLOOK: {
1768 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr;
1769 struct pf_state_key *sk;
1770 struct pf_state *state;
1771 struct pf_state_key_cmp key;
1772 int m = 0, direction = pnl->direction;
1773
1774 key.af = pnl->af;
1775 key.proto = pnl->proto;
1776
1777 if (!pnl->proto ||
1778 PF_AZERO(&pnl->saddr, pnl->af) ||
1779 PF_AZERO(&pnl->daddr, pnl->af) ||
1780 ((pnl->proto == IPPROTO_TCP ||
1781 pnl->proto == IPPROTO_UDP) &&
1782 (!pnl->dport || !pnl->sport)))
1783 error = EINVAL;
1784 else {
1785
1786
1787
1788
1789
1790
1791 if (direction == PF_IN) {
1792 PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af);
1793 key.ext.port = pnl->dport;
1794 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af);
1795 key.gwy.port = pnl->sport;
1796 state = pf_find_state_all(&key, PF_EXT_GWY, &m);
1797 } else {
1798 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af);
1799 key.lan.port = pnl->dport;
1800 PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af);
1801 key.ext.port = pnl->sport;
1802 state = pf_find_state_all(&key, PF_LAN_EXT, &m);
1803 }
1804 if (m > 1)
1805 error = E2BIG;
1806 else if (state != NULL) {
1807 sk = state->state_key;
1808 if (direction == PF_IN) {
1809 PF_ACPY(&pnl->rsaddr, &sk->lan.addr,
1810 sk->af);
1811 pnl->rsport = sk->lan.port;
1812 PF_ACPY(&pnl->rdaddr, &pnl->daddr,
1813 pnl->af);
1814 pnl->rdport = pnl->dport;
1815 } else {
1816 PF_ACPY(&pnl->rdaddr, &sk->gwy.addr,
1817 sk->af);
1818 pnl->rdport = sk->gwy.port;
1819 PF_ACPY(&pnl->rsaddr, &pnl->saddr,
1820 pnl->af);
1821 pnl->rsport = pnl->sport;
1822 }
1823 } else
1824 error = ENOENT;
1825 }
1826 break;
1827 }
1828
1829 case DIOCSETTIMEOUT: {
1830 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1831 int old;
1832
1833 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
1834 pt->seconds < 0) {
1835 error = EINVAL;
1836 goto fail;
1837 }
1838 old = pf_default_rule.timeout[pt->timeout];
1839 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
1840 pt->seconds = 1;
1841 pf_default_rule.timeout[pt->timeout] = pt->seconds;
1842 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old)
1843 wakeup(pf_purge_thread);
1844 pt->seconds = old;
1845 break;
1846 }
1847
1848 case DIOCGETTIMEOUT: {
1849 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
1850
1851 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
1852 error = EINVAL;
1853 goto fail;
1854 }
1855 pt->seconds = pf_default_rule.timeout[pt->timeout];
1856 break;
1857 }
1858
1859 case DIOCGETLIMIT: {
1860 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
1861
1862 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
1863 error = EINVAL;
1864 goto fail;
1865 }
1866 pl->limit = pf_pool_limits[pl->index].limit;
1867 break;
1868 }
1869
1870 case DIOCSETLIMIT: {
1871 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
1872 int old_limit;
1873
1874 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
1875 pf_pool_limits[pl->index].pp == NULL) {
1876 error = EINVAL;
1877 goto fail;
1878 }
1879 if (pool_sethardlimit(pf_pool_limits[pl->index].pp,
1880 pl->limit, NULL, 0) != 0) {
1881 error = EBUSY;
1882 goto fail;
1883 }
1884 old_limit = pf_pool_limits[pl->index].limit;
1885 pf_pool_limits[pl->index].limit = pl->limit;
1886 pl->limit = old_limit;
1887 break;
1888 }
1889
1890 case DIOCSETDEBUG: {
1891 u_int32_t *level = (u_int32_t *)addr;
1892
1893 pf_status.debug = *level;
1894 break;
1895 }
1896
1897 case DIOCCLRRULECTRS: {
1898
1899 struct pf_ruleset *ruleset = &pf_main_ruleset;
1900 struct pf_rule *rule;
1901
1902 TAILQ_FOREACH(rule,
1903 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
1904 rule->evaluations = 0;
1905 rule->packets[0] = rule->packets[1] = 0;
1906 rule->bytes[0] = rule->bytes[1] = 0;
1907 }
1908 break;
1909 }
1910
1911 #ifdef ALTQ
1912 case DIOCSTARTALTQ: {
1913 struct pf_altq *altq;
1914
1915
1916 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
1917 if (altq->qname[0] == 0) {
1918 error = pf_enable_altq(altq);
1919 if (error != 0)
1920 break;
1921 }
1922 }
1923 if (error == 0)
1924 pf_altq_running = 1;
1925 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
1926 break;
1927 }
1928
1929 case DIOCSTOPALTQ: {
1930 struct pf_altq *altq;
1931
1932
1933 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
1934 if (altq->qname[0] == 0) {
1935 error = pf_disable_altq(altq);
1936 if (error != 0)
1937 break;
1938 }
1939 }
1940 if (error == 0)
1941 pf_altq_running = 0;
1942 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
1943 break;
1944 }
1945
1946 case DIOCADDALTQ: {
1947 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
1948 struct pf_altq *altq, *a;
1949
1950 if (pa->ticket != ticket_altqs_inactive) {
1951 error = EBUSY;
1952 break;
1953 }
1954 altq = pool_get(&pf_altq_pl, PR_NOWAIT);
1955 if (altq == NULL) {
1956 error = ENOMEM;
1957 break;
1958 }
1959 bcopy(&pa->altq, altq, sizeof(struct pf_altq));
1960
1961
1962
1963
1964
1965 if (altq->qname[0] != 0) {
1966 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
1967 error = EBUSY;
1968 pool_put(&pf_altq_pl, altq);
1969 break;
1970 }
1971 TAILQ_FOREACH(a, pf_altqs_inactive, entries) {
1972 if (strncmp(a->ifname, altq->ifname,
1973 IFNAMSIZ) == 0 && a->qname[0] == 0) {
1974 altq->altq_disc = a->altq_disc;
1975 break;
1976 }
1977 }
1978 }
1979
1980 error = altq_add(altq);
1981 if (error) {
1982 pool_put(&pf_altq_pl, altq);
1983 break;
1984 }
1985
1986 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries);
1987 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
1988 break;
1989 }
1990
1991 case DIOCGETALTQS: {
1992 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
1993 struct pf_altq *altq;
1994
1995 pa->nr = 0;
1996 TAILQ_FOREACH(altq, pf_altqs_active, entries)
1997 pa->nr++;
1998 pa->ticket = ticket_altqs_active;
1999 break;
2000 }
2001
2002 case DIOCGETALTQ: {
2003 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2004 struct pf_altq *altq;
2005 u_int32_t nr;
2006
2007 if (pa->ticket != ticket_altqs_active) {
2008 error = EBUSY;
2009 break;
2010 }
2011 nr = 0;
2012 altq = TAILQ_FIRST(pf_altqs_active);
2013 while ((altq != NULL) && (nr < pa->nr)) {
2014 altq = TAILQ_NEXT(altq, entries);
2015 nr++;
2016 }
2017 if (altq == NULL) {
2018 error = EBUSY;
2019 break;
2020 }
2021 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2022 break;
2023 }
2024
2025 case DIOCCHANGEALTQ:
2026
2027 error = ENODEV;
2028 break;
2029
2030 case DIOCGETQSTATS: {
2031 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr;
2032 struct pf_altq *altq;
2033 u_int32_t nr;
2034 int nbytes;
2035
2036 if (pq->ticket != ticket_altqs_active) {
2037 error = EBUSY;
2038 break;
2039 }
2040 nbytes = pq->nbytes;
2041 nr = 0;
2042 altq = TAILQ_FIRST(pf_altqs_active);
2043 while ((altq != NULL) && (nr < pq->nr)) {
2044 altq = TAILQ_NEXT(altq, entries);
2045 nr++;
2046 }
2047 if (altq == NULL) {
2048 error = EBUSY;
2049 break;
2050 }
2051 error = altq_getqstats(altq, pq->buf, &nbytes);
2052 if (error == 0) {
2053 pq->scheduler = altq->scheduler;
2054 pq->nbytes = nbytes;
2055 }
2056 break;
2057 }
2058 #endif
2059
2060 case DIOCBEGINADDRS: {
2061 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2062
2063 pf_empty_pool(&pf_pabuf);
2064 pp->ticket = ++ticket_pabuf;
2065 break;
2066 }
2067
2068 case DIOCADDADDR: {
2069 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2070
2071 if (pp->ticket != ticket_pabuf) {
2072 error = EBUSY;
2073 break;
2074 }
2075 #ifndef INET
2076 if (pp->af == AF_INET) {
2077 error = EAFNOSUPPORT;
2078 break;
2079 }
2080 #endif
2081 #ifndef INET6
2082 if (pp->af == AF_INET6) {
2083 error = EAFNOSUPPORT;
2084 break;
2085 }
2086 #endif
2087 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2088 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2089 pp->addr.addr.type != PF_ADDR_TABLE) {
2090 error = EINVAL;
2091 break;
2092 }
2093 pa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
2094 if (pa == NULL) {
2095 error = ENOMEM;
2096 break;
2097 }
2098 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr));
2099 if (pa->ifname[0]) {
2100 pa->kif = pfi_kif_get(pa->ifname);
2101 if (pa->kif == NULL) {
2102 pool_put(&pf_pooladdr_pl, pa);
2103 error = EINVAL;
2104 break;
2105 }
2106 pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE);
2107 }
2108 if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
2109 pfi_dynaddr_remove(&pa->addr);
2110 pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE);
2111 pool_put(&pf_pooladdr_pl, pa);
2112 error = EINVAL;
2113 break;
2114 }
2115 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
2116 break;
2117 }
2118
2119 case DIOCGETADDRS: {
2120 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2121
2122 pp->nr = 0;
2123 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2124 pp->r_num, 0, 1, 0);
2125 if (pool == NULL) {
2126 error = EBUSY;
2127 break;
2128 }
2129 TAILQ_FOREACH(pa, &pool->list, entries)
2130 pp->nr++;
2131 break;
2132 }
2133
2134 case DIOCGETADDR: {
2135 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2136 u_int32_t nr = 0;
2137
2138 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2139 pp->r_num, 0, 1, 1);
2140 if (pool == NULL) {
2141 error = EBUSY;
2142 break;
2143 }
2144 pa = TAILQ_FIRST(&pool->list);
2145 while ((pa != NULL) && (nr < pp->nr)) {
2146 pa = TAILQ_NEXT(pa, entries);
2147 nr++;
2148 }
2149 if (pa == NULL) {
2150 error = EBUSY;
2151 break;
2152 }
2153 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr));
2154 pfi_dynaddr_copyout(&pp->addr.addr);
2155 pf_tbladdr_copyout(&pp->addr.addr);
2156 pf_rtlabel_copyout(&pp->addr.addr);
2157 break;
2158 }
2159
2160 case DIOCCHANGEADDR: {
2161 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr;
2162 struct pf_pooladdr *oldpa = NULL, *newpa = NULL;
2163 struct pf_ruleset *ruleset;
2164
2165 if (pca->action < PF_CHANGE_ADD_HEAD ||
2166 pca->action > PF_CHANGE_REMOVE) {
2167 error = EINVAL;
2168 break;
2169 }
2170 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
2171 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
2172 pca->addr.addr.type != PF_ADDR_TABLE) {
2173 error = EINVAL;
2174 break;
2175 }
2176
2177 ruleset = pf_find_ruleset(pca->anchor);
2178 if (ruleset == NULL) {
2179 error = EBUSY;
2180 break;
2181 }
2182 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
2183 pca->r_num, pca->r_last, 1, 1);
2184 if (pool == NULL) {
2185 error = EBUSY;
2186 break;
2187 }
2188 if (pca->action != PF_CHANGE_REMOVE) {
2189 newpa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
2190 if (newpa == NULL) {
2191 error = ENOMEM;
2192 break;
2193 }
2194 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
2195 #ifndef INET
2196 if (pca->af == AF_INET) {
2197 pool_put(&pf_pooladdr_pl, newpa);
2198 error = EAFNOSUPPORT;
2199 break;
2200 }
2201 #endif
2202 #ifndef INET6
2203 if (pca->af == AF_INET6) {
2204 pool_put(&pf_pooladdr_pl, newpa);
2205 error = EAFNOSUPPORT;
2206 break;
2207 }
2208 #endif
2209 if (newpa->ifname[0]) {
2210 newpa->kif = pfi_kif_get(newpa->ifname);
2211 if (newpa->kif == NULL) {
2212 pool_put(&pf_pooladdr_pl, newpa);
2213 error = EINVAL;
2214 break;
2215 }
2216 pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE);
2217 } else
2218 newpa->kif = NULL;
2219 if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
2220 pf_tbladdr_setup(ruleset, &newpa->addr)) {
2221 pfi_dynaddr_remove(&newpa->addr);
2222 pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE);
2223 pool_put(&pf_pooladdr_pl, newpa);
2224 error = EINVAL;
2225 break;
2226 }
2227 }
2228
2229 if (pca->action == PF_CHANGE_ADD_HEAD)
2230 oldpa = TAILQ_FIRST(&pool->list);
2231 else if (pca->action == PF_CHANGE_ADD_TAIL)
2232 oldpa = TAILQ_LAST(&pool->list, pf_palist);
2233 else {
2234 int i = 0;
2235
2236 oldpa = TAILQ_FIRST(&pool->list);
2237 while ((oldpa != NULL) && (i < pca->nr)) {
2238 oldpa = TAILQ_NEXT(oldpa, entries);
2239 i++;
2240 }
2241 if (oldpa == NULL) {
2242 error = EINVAL;
2243 break;
2244 }
2245 }
2246
2247 if (pca->action == PF_CHANGE_REMOVE) {
2248 TAILQ_REMOVE(&pool->list, oldpa, entries);
2249 pfi_dynaddr_remove(&oldpa->addr);
2250 pf_tbladdr_remove(&oldpa->addr);
2251 pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE);
2252 pool_put(&pf_pooladdr_pl, oldpa);
2253 } else {
2254 if (oldpa == NULL)
2255 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
2256 else if (pca->action == PF_CHANGE_ADD_HEAD ||
2257 pca->action == PF_CHANGE_ADD_BEFORE)
2258 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
2259 else
2260 TAILQ_INSERT_AFTER(&pool->list, oldpa,
2261 newpa, entries);
2262 }
2263
2264 pool->cur = TAILQ_FIRST(&pool->list);
2265 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
2266 pca->af);
2267 break;
2268 }
2269
2270 case DIOCGETRULESETS: {
2271 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2272 struct pf_ruleset *ruleset;
2273 struct pf_anchor *anchor;
2274
2275 pr->path[sizeof(pr->path) - 1] = 0;
2276 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2277 error = EINVAL;
2278 break;
2279 }
2280 pr->nr = 0;
2281 if (ruleset->anchor == NULL) {
2282
2283 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2284 if (anchor->parent == NULL)
2285 pr->nr++;
2286 } else {
2287 RB_FOREACH(anchor, pf_anchor_node,
2288 &ruleset->anchor->children)
2289 pr->nr++;
2290 }
2291 break;
2292 }
2293
2294 case DIOCGETRULESET: {
2295 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2296 struct pf_ruleset *ruleset;
2297 struct pf_anchor *anchor;
2298 u_int32_t nr = 0;
2299
2300 pr->path[sizeof(pr->path) - 1] = 0;
2301 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2302 error = EINVAL;
2303 break;
2304 }
2305 pr->name[0] = 0;
2306 if (ruleset->anchor == NULL) {
2307
2308 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2309 if (anchor->parent == NULL && nr++ == pr->nr) {
2310 strlcpy(pr->name, anchor->name,
2311 sizeof(pr->name));
2312 break;
2313 }
2314 } else {
2315 RB_FOREACH(anchor, pf_anchor_node,
2316 &ruleset->anchor->children)
2317 if (nr++ == pr->nr) {
2318 strlcpy(pr->name, anchor->name,
2319 sizeof(pr->name));
2320 break;
2321 }
2322 }
2323 if (!pr->name[0])
2324 error = EBUSY;
2325 break;
2326 }
2327
2328 case DIOCRCLRTABLES: {
2329 struct pfioc_table *io = (struct pfioc_table *)addr;
2330
2331 if (io->pfrio_esize != 0) {
2332 error = ENODEV;
2333 break;
2334 }
2335 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
2336 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2337 break;
2338 }
2339
2340 case DIOCRADDTABLES: {
2341 struct pfioc_table *io = (struct pfioc_table *)addr;
2342
2343 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2344 error = ENODEV;
2345 break;
2346 }
2347 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
2348 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2349 break;
2350 }
2351
2352 case DIOCRDELTABLES: {
2353 struct pfioc_table *io = (struct pfioc_table *)addr;
2354
2355 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2356 error = ENODEV;
2357 break;
2358 }
2359 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
2360 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2361 break;
2362 }
2363
2364 case DIOCRGETTABLES: {
2365 struct pfioc_table *io = (struct pfioc_table *)addr;
2366
2367 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2368 error = ENODEV;
2369 break;
2370 }
2371 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
2372 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2373 break;
2374 }
2375
2376 case DIOCRGETTSTATS: {
2377 struct pfioc_table *io = (struct pfioc_table *)addr;
2378
2379 if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
2380 error = ENODEV;
2381 break;
2382 }
2383 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
2384 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2385 break;
2386 }
2387
2388 case DIOCRCLRTSTATS: {
2389 struct pfioc_table *io = (struct pfioc_table *)addr;
2390
2391 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2392 error = ENODEV;
2393 break;
2394 }
2395 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
2396 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2397 break;
2398 }
2399
2400 case DIOCRSETTFLAGS: {
2401 struct pfioc_table *io = (struct pfioc_table *)addr;
2402
2403 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2404 error = ENODEV;
2405 break;
2406 }
2407 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
2408 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
2409 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2410 break;
2411 }
2412
2413 case DIOCRCLRADDRS: {
2414 struct pfioc_table *io = (struct pfioc_table *)addr;
2415
2416 if (io->pfrio_esize != 0) {
2417 error = ENODEV;
2418 break;
2419 }
2420 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
2421 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2422 break;
2423 }
2424
2425 case DIOCRADDADDRS: {
2426 struct pfioc_table *io = (struct pfioc_table *)addr;
2427
2428 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2429 error = ENODEV;
2430 break;
2431 }
2432 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
2433 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
2434 PFR_FLAG_USERIOCTL);
2435 break;
2436 }
2437
2438 case DIOCRDELADDRS: {
2439 struct pfioc_table *io = (struct pfioc_table *)addr;
2440
2441 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2442 error = ENODEV;
2443 break;
2444 }
2445 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
2446 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
2447 PFR_FLAG_USERIOCTL);
2448 break;
2449 }
2450
2451 case DIOCRSETADDRS: {
2452 struct pfioc_table *io = (struct pfioc_table *)addr;
2453
2454 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2455 error = ENODEV;
2456 break;
2457 }
2458 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
2459 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
2460 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
2461 PFR_FLAG_USERIOCTL, 0);
2462 break;
2463 }
2464
2465 case DIOCRGETADDRS: {
2466 struct pfioc_table *io = (struct pfioc_table *)addr;
2467
2468 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2469 error = ENODEV;
2470 break;
2471 }
2472 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
2473 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2474 break;
2475 }
2476
2477 case DIOCRGETASTATS: {
2478 struct pfioc_table *io = (struct pfioc_table *)addr;
2479
2480 if (io->pfrio_esize != sizeof(struct pfr_astats)) {
2481 error = ENODEV;
2482 break;
2483 }
2484 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
2485 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2486 break;
2487 }
2488
2489 case DIOCRCLRASTATS: {
2490 struct pfioc_table *io = (struct pfioc_table *)addr;
2491
2492 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2493 error = ENODEV;
2494 break;
2495 }
2496 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
2497 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
2498 PFR_FLAG_USERIOCTL);
2499 break;
2500 }
2501
2502 case DIOCRTSTADDRS: {
2503 struct pfioc_table *io = (struct pfioc_table *)addr;
2504
2505 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2506 error = ENODEV;
2507 break;
2508 }
2509 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
2510 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
2511 PFR_FLAG_USERIOCTL);
2512 break;
2513 }
2514
2515 case DIOCRINADEFINE: {
2516 struct pfioc_table *io = (struct pfioc_table *)addr;
2517
2518 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2519 error = ENODEV;
2520 break;
2521 }
2522 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
2523 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
2524 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2525 break;
2526 }
2527
2528 case DIOCOSFPADD: {
2529 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2530 error = pf_osfp_add(io);
2531 break;
2532 }
2533
2534 case DIOCOSFPGET: {
2535 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2536 error = pf_osfp_get(io);
2537 break;
2538 }
2539
2540 case DIOCXBEGIN: {
2541 struct pfioc_trans *io = (struct pfioc_trans *)addr;
2542 struct pfioc_trans_e *ioe;
2543 struct pfr_table *table;
2544 int i;
2545
2546 if (io->esize != sizeof(*ioe)) {
2547 error = ENODEV;
2548 goto fail;
2549 }
2550 ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe),
2551 M_TEMP, M_WAITOK);
2552 table = (struct pfr_table *)malloc(sizeof(*table),
2553 M_TEMP, M_WAITOK);
2554 for (i = 0; i < io->size; i++) {
2555 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2556 free(table, M_TEMP);
2557 free(ioe, M_TEMP);
2558 error = EFAULT;
2559 goto fail;
2560 }
2561 switch (ioe->rs_num) {
2562 #ifdef ALTQ
2563 case PF_RULESET_ALTQ:
2564 if (ioe->anchor[0]) {
2565 free(table, M_TEMP);
2566 free(ioe, M_TEMP);
2567 error = EINVAL;
2568 goto fail;
2569 }
2570 if ((error = pf_begin_altq(&ioe->ticket))) {
2571 free(table, M_TEMP);
2572 free(ioe, M_TEMP);
2573 goto fail;
2574 }
2575 break;
2576 #endif
2577 case PF_RULESET_TABLE:
2578 bzero(table, sizeof(*table));
2579 strlcpy(table->pfrt_anchor, ioe->anchor,
2580 sizeof(table->pfrt_anchor));
2581 if ((error = pfr_ina_begin(table,
2582 &ioe->ticket, NULL, 0))) {
2583 free(table, M_TEMP);
2584 free(ioe, M_TEMP);
2585 goto fail;
2586 }
2587 break;
2588 default:
2589 if ((error = pf_begin_rules(&ioe->ticket,
2590 ioe->rs_num, ioe->anchor))) {
2591 free(table, M_TEMP);
2592 free(ioe, M_TEMP);
2593 goto fail;
2594 }
2595 break;
2596 }
2597 if (copyout(ioe, io->array+i, sizeof(io->array[i]))) {
2598 free(table, M_TEMP);
2599 free(ioe, M_TEMP);
2600 error = EFAULT;
2601 goto fail;
2602 }
2603 }
2604 free(table, M_TEMP);
2605 free(ioe, M_TEMP);
2606 break;
2607 }
2608
2609 case DIOCXROLLBACK: {
2610 struct pfioc_trans *io = (struct pfioc_trans *)addr;
2611 struct pfioc_trans_e *ioe;
2612 struct pfr_table *table;
2613 int i;
2614
2615 if (io->esize != sizeof(*ioe)) {
2616 error = ENODEV;
2617 goto fail;
2618 }
2619 ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe),
2620 M_TEMP, M_WAITOK);
2621 table = (struct pfr_table *)malloc(sizeof(*table),
2622 M_TEMP, M_WAITOK);
2623 for (i = 0; i < io->size; i++) {
2624 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2625 free(table, M_TEMP);
2626 free(ioe, M_TEMP);
2627 error = EFAULT;
2628 goto fail;
2629 }
2630 switch (ioe->rs_num) {
2631 #ifdef ALTQ
2632 case PF_RULESET_ALTQ:
2633 if (ioe->anchor[0]) {
2634 free(table, M_TEMP);
2635 free(ioe, M_TEMP);
2636 error = EINVAL;
2637 goto fail;
2638 }
2639 if ((error = pf_rollback_altq(ioe->ticket))) {
2640 free(table, M_TEMP);
2641 free(ioe, M_TEMP);
2642 goto fail;
2643 }
2644 break;
2645 #endif
2646 case PF_RULESET_TABLE:
2647 bzero(table, sizeof(*table));
2648 strlcpy(table->pfrt_anchor, ioe->anchor,
2649 sizeof(table->pfrt_anchor));
2650 if ((error = pfr_ina_rollback(table,
2651 ioe->ticket, NULL, 0))) {
2652 free(table, M_TEMP);
2653 free(ioe, M_TEMP);
2654 goto fail;
2655 }
2656 break;
2657 default:
2658 if ((error = pf_rollback_rules(ioe->ticket,
2659 ioe->rs_num, ioe->anchor))) {
2660 free(table, M_TEMP);
2661 free(ioe, M_TEMP);
2662 goto fail;
2663 }
2664 break;
2665 }
2666 }
2667 free(table, M_TEMP);
2668 free(ioe, M_TEMP);
2669 break;
2670 }
2671
2672 case DIOCXCOMMIT: {
2673 struct pfioc_trans *io = (struct pfioc_trans *)addr;
2674 struct pfioc_trans_e *ioe;
2675 struct pfr_table *table;
2676 struct pf_ruleset *rs;
2677 int i;
2678
2679 if (io->esize != sizeof(*ioe)) {
2680 error = ENODEV;
2681 goto fail;
2682 }
2683 ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe),
2684 M_TEMP, M_WAITOK);
2685 table = (struct pfr_table *)malloc(sizeof(*table),
2686 M_TEMP, M_WAITOK);
2687
2688 for (i = 0; i < io->size; i++) {
2689 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2690 free(table, M_TEMP);
2691 free(ioe, M_TEMP);
2692 error = EFAULT;
2693 goto fail;
2694 }
2695 switch (ioe->rs_num) {
2696 #ifdef ALTQ
2697 case PF_RULESET_ALTQ:
2698 if (ioe->anchor[0]) {
2699 free(table, M_TEMP);
2700 free(ioe, M_TEMP);
2701 error = EINVAL;
2702 goto fail;
2703 }
2704 if (!altqs_inactive_open || ioe->ticket !=
2705 ticket_altqs_inactive) {
2706 free(table, M_TEMP);
2707 free(ioe, M_TEMP);
2708 error = EBUSY;
2709 goto fail;
2710 }
2711 break;
2712 #endif
2713 case PF_RULESET_TABLE:
2714 rs = pf_find_ruleset(ioe->anchor);
2715 if (rs == NULL || !rs->topen || ioe->ticket !=
2716 rs->tticket) {
2717 free(table, M_TEMP);
2718 free(ioe, M_TEMP);
2719 error = EBUSY;
2720 goto fail;
2721 }
2722 break;
2723 default:
2724 if (ioe->rs_num < 0 || ioe->rs_num >=
2725 PF_RULESET_MAX) {
2726 free(table, M_TEMP);
2727 free(ioe, M_TEMP);
2728 error = EINVAL;
2729 goto fail;
2730 }
2731 rs = pf_find_ruleset(ioe->anchor);
2732 if (rs == NULL ||
2733 !rs->rules[ioe->rs_num].inactive.open ||
2734 rs->rules[ioe->rs_num].inactive.ticket !=
2735 ioe->ticket) {
2736 free(table, M_TEMP);
2737 free(ioe, M_TEMP);
2738 error = EBUSY;
2739 goto fail;
2740 }
2741 break;
2742 }
2743 }
2744
2745 for (i = 0; i < io->size; i++) {
2746 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2747 free(table, M_TEMP);
2748 free(ioe, M_TEMP);
2749 error = EFAULT;
2750 goto fail;
2751 }
2752 switch (ioe->rs_num) {
2753 #ifdef ALTQ
2754 case PF_RULESET_ALTQ:
2755 if ((error = pf_commit_altq(ioe->ticket))) {
2756 free(table, M_TEMP);
2757 free(ioe, M_TEMP);
2758 goto fail;
2759 }
2760 break;
2761 #endif
2762 case PF_RULESET_TABLE:
2763 bzero(table, sizeof(*table));
2764 strlcpy(table->pfrt_anchor, ioe->anchor,
2765 sizeof(table->pfrt_anchor));
2766 if ((error = pfr_ina_commit(table, ioe->ticket,
2767 NULL, NULL, 0))) {
2768 free(table, M_TEMP);
2769 free(ioe, M_TEMP);
2770 goto fail;
2771 }
2772 break;
2773 default:
2774 if ((error = pf_commit_rules(ioe->ticket,
2775 ioe->rs_num, ioe->anchor))) {
2776 free(table, M_TEMP);
2777 free(ioe, M_TEMP);
2778 goto fail;
2779 }
2780 break;
2781 }
2782 }
2783 free(table, M_TEMP);
2784 free(ioe, M_TEMP);
2785 break;
2786 }
2787
2788 case DIOCGETSRCNODES: {
2789 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr;
2790 struct pf_src_node *n, *p, *pstore;
2791 u_int32_t nr = 0;
2792 int space = psn->psn_len;
2793
2794 if (space == 0) {
2795 RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
2796 nr++;
2797 psn->psn_len = sizeof(struct pf_src_node) * nr;
2798 break;
2799 }
2800
2801 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK);
2802
2803 p = psn->psn_src_nodes;
2804 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2805 int secs = time_second, diff;
2806
2807 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
2808 break;
2809
2810 bcopy(n, pstore, sizeof(*pstore));
2811 if (n->rule.ptr != NULL)
2812 pstore->rule.nr = n->rule.ptr->nr;
2813 pstore->creation = secs - pstore->creation;
2814 if (pstore->expire > secs)
2815 pstore->expire -= secs;
2816 else
2817 pstore->expire = 0;
2818
2819
2820 diff = secs - n->conn_rate.last;
2821 if (diff >= n->conn_rate.seconds)
2822 pstore->conn_rate.count = 0;
2823 else
2824 pstore->conn_rate.count -=
2825 n->conn_rate.count * diff /
2826 n->conn_rate.seconds;
2827
2828 error = copyout(pstore, p, sizeof(*p));
2829 if (error) {
2830 free(pstore, M_TEMP);
2831 goto fail;
2832 }
2833 p++;
2834 nr++;
2835 }
2836 psn->psn_len = sizeof(struct pf_src_node) * nr;
2837
2838 free(pstore, M_TEMP);
2839 break;
2840 }
2841
2842 case DIOCCLRSRCNODES: {
2843 struct pf_src_node *n;
2844 struct pf_state *state;
2845
2846 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
2847 state->src_node = NULL;
2848 state->nat_src_node = NULL;
2849 }
2850 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2851 n->expire = 1;
2852 n->states = 0;
2853 }
2854 pf_purge_expired_src_nodes(1);
2855 pf_status.src_nodes = 0;
2856 break;
2857 }
2858
2859 case DIOCKILLSRCNODES: {
2860 struct pf_src_node *sn;
2861 struct pf_state *s;
2862 struct pfioc_src_node_kill *psnk = \
2863 (struct pfioc_src_node_kill *) addr;
2864 int killed = 0;
2865
2866 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
2867 if (PF_MATCHA(psnk->psnk_src.neg, \
2868 &psnk->psnk_src.addr.v.a.addr, \
2869 &psnk->psnk_src.addr.v.a.mask, \
2870 &sn->addr, sn->af) &&
2871 PF_MATCHA(psnk->psnk_dst.neg, \
2872 &psnk->psnk_dst.addr.v.a.addr, \
2873 &psnk->psnk_dst.addr.v.a.mask, \
2874 &sn->raddr, sn->af)) {
2875
2876 if (sn->states != 0) {
2877 RB_FOREACH(s, pf_state_tree_id,
2878 &tree_id) {
2879 if (s->src_node == sn)
2880 s->src_node = NULL;
2881 if (s->nat_src_node == sn)
2882 s->nat_src_node = NULL;
2883 }
2884 sn->states = 0;
2885 }
2886 sn->expire = 1;
2887 killed++;
2888 }
2889 }
2890
2891 if (killed > 0)
2892 pf_purge_expired_src_nodes(1);
2893
2894 psnk->psnk_af = killed;
2895 break;
2896 }
2897
2898 case DIOCSETHOSTID: {
2899 u_int32_t *hostid = (u_int32_t *)addr;
2900
2901 if (*hostid == 0)
2902 pf_status.hostid = arc4random();
2903 else
2904 pf_status.hostid = *hostid;
2905 break;
2906 }
2907
2908 case DIOCOSFPFLUSH:
2909 pf_osfp_flush();
2910 break;
2911
2912 case DIOCIGETIFACES: {
2913 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2914
2915 if (io->pfiio_esize != sizeof(struct pfi_kif)) {
2916 error = ENODEV;
2917 break;
2918 }
2919 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer,
2920 &io->pfiio_size);
2921 break;
2922 }
2923
2924 case DIOCSETIFFLAG: {
2925 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2926
2927 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
2928 break;
2929 }
2930
2931 case DIOCCLRIFFLAG: {
2932 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2933
2934 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
2935 break;
2936 }
2937
2938 default:
2939 error = ENODEV;
2940 break;
2941 }
2942 fail:
2943 splx(s);
2944 if (flags & FWRITE)
2945 rw_exit_write(&pf_consistency_lock);
2946 else
2947 rw_exit_read(&pf_consistency_lock);
2948 return (error);
2949 }