altq              514 net/pf_ioctl.c 	struct pf_altq	*altq;
altq              518 net/pf_ioctl.c 	while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
altq              519 net/pf_ioctl.c 		TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
altq              520 net/pf_ioctl.c 		if (altq->qname[0] == 0) {
altq              522 net/pf_ioctl.c 			error = altq_remove(altq);
altq              524 net/pf_ioctl.c 			pf_qid_unref(altq->qid);
altq              525 net/pf_ioctl.c 		pool_put(&pf_altq_pl, altq);
altq              537 net/pf_ioctl.c 	struct pf_altq	*altq;
altq              543 net/pf_ioctl.c 	while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
altq              544 net/pf_ioctl.c 		TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
altq              545 net/pf_ioctl.c 		if (altq->qname[0] == 0) {
altq              547 net/pf_ioctl.c 			error = altq_remove(altq);
altq              549 net/pf_ioctl.c 			pf_qid_unref(altq->qid);
altq              550 net/pf_ioctl.c 		pool_put(&pf_altq_pl, altq);
altq              560 net/pf_ioctl.c 	struct pf_altq		*altq;
altq              574 net/pf_ioctl.c 	TAILQ_FOREACH(altq, pf_altqs_active, entries) {
altq              575 net/pf_ioctl.c 		if (altq->qname[0] == 0) {
altq              577 net/pf_ioctl.c 			error = altq_pfattach(altq);
altq              579 net/pf_ioctl.c 				error = pf_enable_altq(altq);
altq              588 net/pf_ioctl.c 	while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
altq              589 net/pf_ioctl.c 		TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
altq              590 net/pf_ioctl.c 		if (altq->qname[0] == 0) {
altq              593 net/pf_ioctl.c 				error = pf_disable_altq(altq);
altq              594 net/pf_ioctl.c 			err = altq_pfdetach(altq);
altq              597 net/pf_ioctl.c 			err = altq_remove(altq);
altq              601 net/pf_ioctl.c 			pf_qid_unref(altq->qid);
altq              602 net/pf_ioctl.c 		pool_put(&pf_altq_pl, altq);
altq              611 net/pf_ioctl.c pf_enable_altq(struct pf_altq *altq)
altq              617 net/pf_ioctl.c 	if ((ifp = ifunit(altq->ifname)) == NULL)
altq              625 net/pf_ioctl.c 		tb.rate = altq->ifbandwidth;
altq              626 net/pf_ioctl.c 		tb.depth = altq->tbrsize;
altq              636 net/pf_ioctl.c pf_disable_altq(struct pf_altq *altq)
altq              642 net/pf_ioctl.c 	if ((ifp = ifunit(altq->ifname)) == NULL)
altq              649 net/pf_ioctl.c 	if (altq->altq_disc != ifp->if_snd.altq_disc)
altq             1913 net/pf_ioctl.c 		struct pf_altq		*altq;
altq             1916 net/pf_ioctl.c 		TAILQ_FOREACH(altq, pf_altqs_active, entries) {
altq             1917 net/pf_ioctl.c 			if (altq->qname[0] == 0) {
altq             1918 net/pf_ioctl.c 				error = pf_enable_altq(altq);
altq             1930 net/pf_ioctl.c 		struct pf_altq		*altq;
altq             1933 net/pf_ioctl.c 		TAILQ_FOREACH(altq, pf_altqs_active, entries) {
altq             1934 net/pf_ioctl.c 			if (altq->qname[0] == 0) {
altq             1935 net/pf_ioctl.c 				error = pf_disable_altq(altq);
altq             1948 net/pf_ioctl.c 		struct pf_altq		*altq, *a;
altq             1954 net/pf_ioctl.c 		altq = pool_get(&pf_altq_pl, PR_NOWAIT);
altq             1955 net/pf_ioctl.c 		if (altq == NULL) {
altq             1959 net/pf_ioctl.c 		bcopy(&pa->altq, altq, sizeof(struct pf_altq));
altq             1965 net/pf_ioctl.c 		if (altq->qname[0] != 0) {
altq             1966 net/pf_ioctl.c 			if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
altq             1968 net/pf_ioctl.c 				pool_put(&pf_altq_pl, altq);
altq             1972 net/pf_ioctl.c 				if (strncmp(a->ifname, altq->ifname,
altq             1974 net/pf_ioctl.c 					altq->altq_disc = a->altq_disc;
altq             1980 net/pf_ioctl.c 		error = altq_add(altq);
altq             1982 net/pf_ioctl.c 			pool_put(&pf_altq_pl, altq);
altq             1986 net/pf_ioctl.c 		TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries);
altq             1987 net/pf_ioctl.c 		bcopy(altq, &pa->altq, sizeof(struct pf_altq));
altq             1993 net/pf_ioctl.c 		struct pf_altq		*altq;
altq             1996 net/pf_ioctl.c 		TAILQ_FOREACH(altq, pf_altqs_active, entries)
altq             2004 net/pf_ioctl.c 		struct pf_altq		*altq;
altq             2012 net/pf_ioctl.c 		altq = TAILQ_FIRST(pf_altqs_active);
altq             2013 net/pf_ioctl.c 		while ((altq != NULL) && (nr < pa->nr)) {
altq             2014 net/pf_ioctl.c 			altq = TAILQ_NEXT(altq, entries);
altq             2017 net/pf_ioctl.c 		if (altq == NULL) {
altq             2021 net/pf_ioctl.c 		bcopy(altq, &pa->altq, sizeof(struct pf_altq));
altq             2032 net/pf_ioctl.c 		struct pf_altq		*altq;
altq             2042 net/pf_ioctl.c 		altq = TAILQ_FIRST(pf_altqs_active);
altq             2043 net/pf_ioctl.c 		while ((altq != NULL) && (nr < pq->nr)) {
altq             2044 net/pf_ioctl.c 			altq = TAILQ_NEXT(altq, entries);
altq             2047 net/pf_ioctl.c 		if (altq == NULL) {
altq             2051 net/pf_ioctl.c 		error = altq_getqstats(altq, pq->buf, &nbytes);
altq             2053 net/pf_ioctl.c 			pq->scheduler = altq->scheduler;
altq             1394 net/pfvar.h    	struct pf_altq	 altq;