q 68 altq/altq_classq.h #define qtype(q) (q)->qtype_ /* Get queue type */
q 69 altq/altq_classq.h #define qlimit(q) (q)->qlim_ /* Max packets to be queued */
q 70 altq/altq_classq.h #define qlen(q) (q)->qlen_ /* Current queue length. */
q 71 altq/altq_classq.h #define qtail(q) (q)->tail_ /* Tail of the queue */
q 72 altq/altq_classq.h #define qhead(q) ((q)->tail_ ? (q)->tail_->m_nextpkt : NULL)
q 74 altq/altq_classq.h #define qempty(q) ((q)->qlen_ == 0) /* Is the queue empty?? */
q 75 altq/altq_classq.h #define q_is_red(q) ((q)->qtype_ == Q_RED) /* Is the queue a red queue */
q 76 altq/altq_classq.h #define q_is_rio(q) ((q)->qtype_ == Q_RIO) /* Is the queue a rio queue */
q 77 altq/altq_classq.h #define q_is_red_or_rio(q) ((q)->qtype_ == Q_RED || (q)->qtype_ == Q_RIO)
q 93 altq/altq_classq.h _addq(class_queue_t *q, struct mbuf *m)
q 97 altq/altq_classq.h if ((m0 = qtail(q)) != NULL)
q 102 altq/altq_classq.h qtail(q) = m;
q 103 altq/altq_classq.h qlen(q)++;
q 107 altq/altq_classq.h _getq(class_queue_t *q)
q 111 altq/altq_classq.h if ((m = qtail(q)) == NULL)
q 116 altq/altq_classq.h qtail(q) = NULL;
q 117 altq/altq_classq.h qlen(q)--;
q 124 altq/altq_classq.h _getq_tail(class_queue_t *q)
q 128 altq/altq_classq.h if ((m = m0 = qtail(q)) == NULL)
q 136 altq/altq_classq.h qtail(q) = NULL;
q 138 altq/altq_classq.h qtail(q) = prev;
q 139 altq/altq_classq.h qlen(q)--;
q 146 altq/altq_classq.h _getq_random(class_queue_t *q)
q 151 altq/altq_classq.h if ((m = qtail(q)) == NULL)
q 154 altq/altq_classq.h qtail(q) = NULL;
q 158 altq/altq_classq.h n = random() % qlen(q) + 1;
q 164 altq/altq_classq.h if (m == qtail(q))
q 165 altq/altq_classq.h qtail(q) = prev;
q 167 altq/altq_classq.h qlen(q)--;
q 173 altq/altq_classq.h _removeq(class_queue_t *q, struct mbuf *m)
q 177 altq/altq_classq.h m0 = qtail(q);
q 184 altq/altq_classq.h qtail(q) = NULL;
q 185 altq/altq_classq.h else if (qtail(q) == m)
q 186 altq/altq_classq.h qtail(q) = prev;
q 187 altq/altq_classq.h qlen(q)--;
q 191 altq/altq_classq.h _flushq(class_queue_t *q)
q 195 altq/altq_classq.h while ((m = _getq(q)) != NULL)
q 262 altq/altq_red.c red_addq(red_t *rp, class_queue_t *q, struct mbuf *m,
q 299 altq/altq_red.c avg += (qlen(q) << FP_SHIFT) - (avg >> rp->red_wshift);
q 310 altq/altq_red.c if (avg >= rp->red_thmin_s && qlen(q) > 1) {
q 341 altq/altq_red.c if (droptype == DTYPE_NODROP && qlen(q) >= qlimit(q))
q 347 altq/altq_red.c _addq(q, m);
q 351 altq/altq_red.c _addq(q, m);
q 362 altq/altq_red.c m = _getq_random(q);
q 498 altq/altq_red.c red_getq(rp, q)
q 500 altq/altq_red.c class_queue_t *q;
q 504 altq/altq_red.c if ((m = _getq(q)) == NULL) {
q 307 altq/altq_rio.c rio_addq(rio_t *rp, class_queue_t *q, struct mbuf *m,
q 381 altq/altq_rio.c if (droptype == DTYPE_NODROP && qlen(q) >= qlimit(q))
q 411 altq/altq_rio.c _addq(q, m);
q 420 altq/altq_rio.c rio_getq(rio_t *rp, class_queue_t *q)
q 425 altq/altq_rio.c if ((m = _getq(q)) == NULL)
q 1667 altq/altq_rmclass.c _addq(class_queue_t *q, mbuf_t *m)
q 1671 altq/altq_rmclass.c if ((m0 = qtail(q)) != NULL)
q 1676 altq/altq_rmclass.c qtail(q) = m;
q 1677 altq/altq_rmclass.c qlen(q)++;
q 1681 altq/altq_rmclass.c _getq(class_queue_t *q)
q 1685 altq/altq_rmclass.c if ((m = qtail(q)) == NULL)
q 1690 altq/altq_rmclass.c ASSERT(qlen(q) == 1);
q 1691 altq/altq_rmclass.c qtail(q) = NULL;
q 1693 altq/altq_rmclass.c qlen(q)--;
q 1700 altq/altq_rmclass.c _getq_tail(class_queue_t *q)
q 1704 altq/altq_rmclass.c if ((m = m0 = qtail(q)) == NULL)
q 1712 altq/altq_rmclass.c ASSERT(qlen(q) == 1);
q 1713 altq/altq_rmclass.c qtail(q) = NULL;
q 1715 altq/altq_rmclass.c qtail(q) = prev;
q 1716 altq/altq_rmclass.c qlen(q)--;
q 1723 altq/altq_rmclass.c _getq_random(class_queue_t *q)
q 1728 altq/altq_rmclass.c if ((m = qtail(q)) == NULL)
q 1731 altq/altq_rmclass.c ASSERT(qlen(q) == 1);
q 1732 altq/altq_rmclass.c qtail(q) = NULL;
q 1736 altq/altq_rmclass.c n = random() % qlen(q) + 1;
q 1742 altq/altq_rmclass.c if (m == qtail(q))
q 1743 altq/altq_rmclass.c qtail(q) = prev;
q 1745 altq/altq_rmclass.c qlen(q)--;
q 1751 altq/altq_rmclass.c _removeq(class_queue_t *q, mbuf_t *m)
q 1755 altq/altq_rmclass.c m0 = qtail(q);
q 1762 altq/altq_rmclass.c qtail(q) = NULL;
q 1763 altq/altq_rmclass.c else if (qtail(q) == m)
q 1764 altq/altq_rmclass.c qtail(q) = prev;
q 1765 altq/altq_rmclass.c qlen(q)--;
q 1769 altq/altq_rmclass.c _flushq(class_queue_t *q)
q 1773 altq/altq_rmclass.c while ((m = _getq(q)) != NULL)
q 1775 altq/altq_rmclass.c ASSERT(qlen(q) == 0);
q 380 arch/i386/i386/bios.c bootarg_t *q;
q 386 arch/i386/i386/bios.c for(q = bootargp; q->ba_type != BOOTARG_END; q = q->ba_next) {
q 387 arch/i386/i386/bios.c q->ba_next = (bootarg_t *)((caddr_t)q + q->ba_size);
q 388 arch/i386/i386/bios.c switch (q->ba_type) {
q 390 arch/i386/i386/bios.c bios_memmap = (bios_memmap_t *)q->ba_arg;
q 396 arch/i386/i386/bios.c bios_diskinfo = (bios_diskinfo_t *)q->ba_arg;
q 404 arch/i386/i386/bios.c printf(" apminfo %p", q->ba_arg);
q 406 arch/i386/i386/bios.c apm = (bios_apminfo_t *)q->ba_arg;
q 410 arch/i386/i386/bios.c bios_cksumlen = *(u_int32_t *)q->ba_arg;
q 417 arch/i386/i386/bios.c bios_pciinfo = (bios_pciinfo_t *)q->ba_arg;
q 424 arch/i386/i386/bios.c if (q->ba_size >= sizeof(bios_consdev_t))
q 426 arch/i386/i386/bios.c bios_consdev_t *cdp = (bios_consdev_t*)q->ba_arg;
q 442 arch/i386/i386/bios.c bios_smpinfo = q->ba_arg;
q 449 arch/i386/i386/bios.c bios_bootmac = (bios_bootmac_t *)q->ba_arg;
q 455 arch/i386/i386/bios.c printf(" unsupported arg (%d) %p", q->ba_type,
q 456 arch/i386/i386/bios.c q->ba_arg);
q 559 arch/i386/i386/ioapic.c struct intrhand *q;
q 593 arch/i386/i386/ioapic.c for (q = sc->sc_pins[p].ip_handler; q != NULL;
q 594 arch/i386/i386/ioapic.c q = q->ih_next) {
q 595 arch/i386/i386/ioapic.c if (q->ih_level > maxlevel)
q 596 arch/i386/i386/ioapic.c maxlevel = q->ih_level;
q 597 arch/i386/i386/ioapic.c if (q->ih_level < minlevel)
q 598 arch/i386/i386/ioapic.c minlevel = q->ih_level;
q 633 arch/i386/i386/ioapic.c struct intrhand **p, *q, *ih;
q 679 arch/i386/i386/ioapic.c for (p = &pin->ip_handler; (q = *p) != NULL; p = &q->ih_next) {
q 680 arch/i386/i386/ioapic.c if (q->ih_level > maxlevel)
q 681 arch/i386/i386/ioapic.c maxlevel = q->ih_level;
q 682 arch/i386/i386/ioapic.c if (q->ih_level < minlevel)
q 683 arch/i386/i386/ioapic.c minlevel = q->ih_level;
q 747 arch/i386/i386/ioapic.c struct intrhand **p, *q;
q 762 arch/i386/i386/ioapic.c for (p = &pin->ip_handler; (q = *p) != NULL && q != ih;
q 763 arch/i386/i386/ioapic.c p = &q->ih_next) {
q 764 arch/i386/i386/ioapic.c if (q->ih_level > maxlevel)
q 765 arch/i386/i386/ioapic.c maxlevel = q->ih_level;
q 766 arch/i386/i386/ioapic.c if (q->ih_level < minlevel)
q 767 arch/i386/i386/ioapic.c minlevel = q->ih_level;
q 770 arch/i386/i386/ioapic.c if (q)
q 771 arch/i386/i386/ioapic.c *p = q->ih_next;
q 774 arch/i386/i386/ioapic.c for (; q != NULL; q = q->ih_next) {
q 775 arch/i386/i386/ioapic.c if (q->ih_level > maxlevel)
q 776 arch/i386/i386/ioapic.c maxlevel = q->ih_level;
q 777 arch/i386/i386/ioapic.c if (q->ih_level < minlevel)
q 778 arch/i386/i386/ioapic.c minlevel = q->ih_level;
q 274 arch/i386/isa/isa_machdep.c struct intrhand *q;
q 280 arch/i386/isa/isa_machdep.c for (q = intrhand[irq]; q; q = q->ih_next)
q 281 arch/i386/isa/isa_machdep.c levels |= 1 << IPL(q->ih_level);
q 321 arch/i386/isa/isa_machdep.c for (q = intrhand[irq]; q; q = q->ih_next) {
q 322 arch/i386/isa/isa_machdep.c irqs |= IMASK(q->ih_level);
q 324 arch/i386/isa/isa_machdep.c q->ih_level < minlevel)
q 325 arch/i386/isa/isa_machdep.c minlevel = q->ih_level;
q 326 arch/i386/isa/isa_machdep.c if (q->ih_level > maxlevel)
q 327 arch/i386/isa/isa_machdep.c maxlevel = q->ih_level;
q 375 arch/i386/isa/isa_machdep.c struct intrhand **p, *q;
q 417 arch/i386/isa/isa_machdep.c for (p = &intrhand[i], tmp = 0; (q = *p) != NULL;
q 418 arch/i386/isa/isa_machdep.c p = &q->ih_next, tmp++)
q 477 arch/i386/isa/isa_machdep.c struct intrhand **p, *q, *ih;
q 552 arch/i386/isa/isa_machdep.c for (p = &intrhand[irq]; (q = *p) != NULL; p = &q->ih_next)
q 588 arch/i386/isa/isa_machdep.c struct intrhand **p, *q;
q 604 arch/i386/isa/isa_machdep.c for (p = &intrhand[irq]; (q = *p) != NULL && q != ih; p = &q->ih_next)
q 606 arch/i386/isa/isa_machdep.c if (q)
q 607 arch/i386/isa/isa_machdep.c *p = q->ih_next;
q 193 arch/i386/isa/pccom.c #define divrnd(n, q) (((n)*2/(q)+1)/2) /* divide and round off */
q 189 arch/i386/stand/libsa/bioscons.c #define divrnd(n, q) (((n)*2/(q)+1)/2) /* divide and round off */
q 48 arch/i386/stand/libsa/memprobe.c register char *q = (char *)0x000000;
q 52 arch/i386/stand/libsa/memprobe.c if (*p != *q)
q 57 arch/i386/stand/libsa/memprobe.c st = (*p != *q);
q 1037 compat/svr4/svr4_misc.c svr4_sys_waitsys(q, v, retval)
q 1038 compat/svr4/svr4_misc.c struct proc *q;
q 1052 compat/svr4/svr4_misc.c SCARG(uap, id) = -q->p_pgid;
q 1068 compat/svr4/svr4_misc.c LIST_FOREACH(p, &q->p_children, p_sibling) {
q 1102 compat/svr4/svr4_misc.c scheduler_wait_hook(q, p);
q 1104 compat/svr4/svr4_misc.c ruadd(&q->p_stats->p_cru, p->p_ru);
q 1131 compat/svr4/svr4_misc.c if ((error = tsleep((caddr_t)q, PWAIT | PCATCH, "svr4_wait", 0)) != 0)
q 105 ddb/db_hangman.c char *p, *q;
q 126 ddb/db_hangman.c q = db_qualify(dfa.sym, stab->name);
q 130 ddb/db_hangman.c while (*q++ != ':');
q 133 ddb/db_hangman.c for ((*lenp) = 0, p = q; *p; p++)
q 137 ddb/db_hangman.c return (q);
q 228 dev/ata/wd.c char buf[41], c, *p, *q;
q 249 dev/ata/wd.c for (blank = 0, p = wd->sc_params.atap_model, q = buf, i = 0;
q 256 dev/ata/wd.c *q++ = ' ';
q 259 dev/ata/wd.c *q++ = c;
q 263 dev/ata/wd.c *q++ = '\0';
q 123 dev/auconv.c u_char *q = p;
q 126 dev/auconv.c q += cc * 2;
q 128 dev/auconv.c q -= 2;
q 129 dev/auconv.c q[1] = *--p;
q 130 dev/auconv.c q[0] = 0;
q 137 dev/auconv.c u_char *q = p;
q 140 dev/auconv.c q += cc * 2;
q 142 dev/auconv.c q -= 2;
q 143 dev/auconv.c q[0] = *--p;
q 144 dev/auconv.c q[1] = 0;
q 151 dev/auconv.c u_char *q = p;
q 154 dev/auconv.c *q++ = p[1];
q 162 dev/auconv.c u_char *q = p;
q 165 dev/auconv.c *q++ = p[0];
q 173 dev/auconv.c u_char *q = p;
q 176 dev/auconv.c q += cc * 2;
q 178 dev/auconv.c q -= 2;
q 179 dev/auconv.c q[1] = (*--p) ^ 0x80;
q 180 dev/auconv.c q[0] = 0;
q 187 dev/auconv.c u_char *q = p;
q 190 dev/auconv.c q += cc * 2;
q 192 dev/auconv.c q -= 2;
q 193 dev/auconv.c q[0] = (*--p) ^ 0x80;
q 194 dev/auconv.c q[1] = 0;
q 201 dev/auconv.c u_char *q = p;
q 204 dev/auconv.c *q++ = p[1] ^ 0x80;
q 212 dev/auconv.c u_char *q = p;
q 215 dev/auconv.c *q++ = p[0] ^ 0x80;
q 226 dev/auconv.c u_char *q = p;
q 229 dev/auconv.c q += cc * 2;
q 231 dev/auconv.c q -= 4;
q 232 dev/auconv.c q[1] = q[3] = *--p;
q 233 dev/auconv.c q[0] = q[2] = *--p;
q 243 dev/auconv.c u_char *q = p;
q 246 dev/auconv.c q += cc * 2;
q 248 dev/auconv.c q -= 4;
q 249 dev/auconv.c q[0] = q[2] = *--p;
q 250 dev/auconv.c q[1] = q[3] = *--p;
q 260 dev/auconv.c u_char *q = p;
q 263 dev/auconv.c q += cc * 4;
q 265 dev/auconv.c q -= 4;
q 266 dev/auconv.c q[1] = q[3] = *--p;
q 267 dev/auconv.c q[0] = q[2] = 0;
q 277 dev/auconv.c u_char *q = p;
q 280 dev/auconv.c q += cc * 4;
q 282 dev/auconv.c q -= 4;
q 283 dev/auconv.c q[0] = q[2] = *--p;
q 284 dev/auconv.c q[1] = q[3] = 0;
q 294 dev/auconv.c u_char *q = p;
q 297 dev/auconv.c q += cc * 4;
q 299 dev/auconv.c q -= 4;
q 300 dev/auconv.c q[1] = q[3] = (*--p) ^ 0x80;
q 301 dev/auconv.c q[0] = q[2] = 0;
q 311 dev/auconv.c u_char *q = p;
q 314 dev/auconv.c q += cc * 4;
q 316 dev/auconv.c q -= 4;
q 317 dev/auconv.c q[0] = q[2] = (*--p) ^ 0x80;
q 318 dev/auconv.c q[1] = q[3] = 0;
q 328 dev/auconv.c u_char *q = p;
q 331 dev/auconv.c q += cc * 2;
q 333 dev/auconv.c q -= 4;
q 334 dev/auconv.c q[1] = q[3] = (*--p) ^ 0x80;
q 335 dev/auconv.c q[0] = q[2] = *--p;
q 345 dev/auconv.c u_char *q = p;
q 348 dev/auconv.c q += cc * 2;
q 350 dev/auconv.c q -= 4;
q 351 dev/auconv.c q[0] = q[2] = (*--p) ^ 0x80;
q 352 dev/auconv.c q[1] = q[3] = *--p;
q 1915 dev/audio.c u_char *s, *e, *p, *q;
q 1923 dev/audio.c q = p+cc;
q 1926 dev/audio.c s <= q && q <= e)) {
q 1928 dev/audio.c sc->sc_sil_count = max(sc->sc_sil_count, q-s);
q 154 dev/ic/com.c #define divrnd(n, q) (((n)*2/(q)+1)/2) /* divide and round off */
q 160 dev/ic/ispvar.h #define ISP_QUEUE_ENTRY(q, idx) ((q) + ((idx) * QENTRY_LEN))
q 778 dev/ic/midway.c bzero(&sc->txslot[lcv].q, sizeof(sc->txslot[lcv].q));
q 805 dev/ic/midway.c bzero(&sc->rxslot[lcv].q, sizeof(sc->rxslot[lcv].q));
q 1223 dev/ic/midway.c if (sc->rxslot[slot].indma.ifq_head || sc->rxslot[slot].q.ifq_head)
q 1256 dev/ic/midway.c sc->rxslot[slot].q.ifq_head ||
q 1311 dev/ic/midway.c IF_DEQUEUE(&sc->rxslot[slot].q, m);
q 1338 dev/ic/midway.c IF_DEQUEUE(&sc->txslot[lcv].q, m);
q 1665 dev/ic/midway.c IF_ENQUEUE(&sc->txslot[txchan].q, m);
q 1786 dev/ic/midway.c launch.t = sc->txslot[chan].q.ifq_head; /* peek at head of queue */
q 1902 dev/ic/midway.c IF_DEQUEUE(&sc->txslot[chan].q, tmp);
q 1967 dev/ic/midway.c IF_DEQUEUE(&sc->txslot[chan].q, tmp);
q 2452 dev/ic/midway.c if ((kick & mask) && sc->txslot[lcv].q.ifq_head) {
q 2490 dev/ic/midway.c sc->rxslot[slot].q.ifq_head == NULL &&
q 2749 dev/ic/midway.c m = sc->rxslot[slot].q.ifq_head;
q 2762 dev/ic/midway.c IF_DEQUEUE(&sc->rxslot[slot].q, m);
q 2810 dev/ic/midway.c IF_ENQUEUE(&sc->rxslot[slot].q, m);
q 139 dev/ic/midwayvar.h struct ifqueue q; /* mbufs waiting for dma now */
q 161 dev/ic/midwayvar.h struct ifqueue q; /* mbufs waiting for dma now */
q 72 dev/ic/pckbc.c #define CMD_IN_QUEUE(q) (TAILQ_FIRST(&(q)->cmdqueue) != NULL)
q 414 dev/ic/pckbc.c pckbc_init_slotdata(q)
q 415 dev/ic/pckbc.c struct pckbc_slotdata *q;
q 418 dev/ic/pckbc.c TAILQ_INIT(&q->cmdqueue);
q 419 dev/ic/pckbc.c TAILQ_INIT(&q->freequeue);
q 422 dev/ic/pckbc.c TAILQ_INSERT_TAIL(&q->freequeue, &(q->cmds[i]), next);
q 424 dev/ic/pckbc.c q->polling = 0;
q 444 dev/ic/pckbc.c struct pckbc_slotdata *q = t->t_slotdata[slot];
q 449 dev/ic/pckbc.c if (c != -1 && q && CMD_IN_QUEUE(q)) {
q 670 dev/ic/pckbc.c pckbc_cleanqueue(q)
q 671 dev/ic/pckbc.c struct pckbc_slotdata *q;
q 678 dev/ic/pckbc.c while ((cmd = TAILQ_FIRST(&q->cmdqueue))) {
q 679 dev/ic/pckbc.c TAILQ_REMOVE(&q->cmdqueue, cmd, next);
q 686 dev/ic/pckbc.c TAILQ_INSERT_TAIL(&q->freequeue, cmd, next);
q 729 dev/ic/pckbc.c struct pckbc_slotdata *q = t->t_slotdata[slot];
q 730 dev/ic/pckbc.c struct pckbc_devcmd *cmd = TAILQ_FIRST(&q->cmdqueue);
q 732 dev/ic/pckbc.c if (q->polling) {
q 738 dev/ic/pckbc.c TAILQ_REMOVE(&q->cmdqueue, cmd, next);
q 743 dev/ic/pckbc.c TAILQ_INSERT_TAIL(&q->freequeue, cmd, next);
q 745 dev/ic/pckbc.c cmd = TAILQ_FIRST(&q->cmdqueue);
q 768 dev/ic/pckbc.c struct pckbc_slotdata *q = t->t_slotdata[slot];
q 769 dev/ic/pckbc.c struct pckbc_devcmd *cmd = TAILQ_FIRST(&q->cmdqueue);
q 805 dev/ic/pckbc.c TAILQ_REMOVE(&q->cmdqueue, cmd, next);
q 810 dev/ic/pckbc.c TAILQ_INSERT_TAIL(&q->freequeue, cmd, next);
q 812 dev/ic/pckbc.c if (!CMD_IN_QUEUE(q))
q 831 dev/ic/pckbc.c struct pckbc_slotdata *q = t->t_slotdata[slot];
q 838 dev/ic/pckbc.c nc = TAILQ_FIRST(&q->freequeue);
q 840 dev/ic/pckbc.c TAILQ_REMOVE(&q->freequeue, nc, next);
q 854 dev/ic/pckbc.c if (q->polling && sync) {
q 860 dev/ic/pckbc.c pckbc_cleanqueue(q);
q 863 dev/ic/pckbc.c isactive = CMD_IN_QUEUE(q);
q 864 dev/ic/pckbc.c TAILQ_INSERT_TAIL(&q->cmdqueue, nc, next);
q 868 dev/ic/pckbc.c if (q->polling)
q 872 dev/ic/pckbc.c TAILQ_REMOVE(&q->cmdqueue, nc, next);
q 882 dev/ic/pckbc.c TAILQ_INSERT_TAIL(&q->freequeue, nc, next);
q 943 dev/ic/pckbc.c struct pckbc_slotdata *q;
q 959 dev/ic/pckbc.c q = t->t_slotdata[slot];
q 961 dev/ic/pckbc.c if (!q) {
q 969 dev/ic/pckbc.c if (q->polling)
q 975 dev/ic/pckbc.c if (CMD_IN_QUEUE(q) && pckbc_cmdresponse(t, slot, data))
q 253 dev/ic/pdq.c pdq_databuf_queue_t *q)
q 257 dev/ic/pdq.c PDQ_OS_DATABUF_DEQUEUE(q, pdu);
q 319 dev/ic/pdqvar.h #define PDQ_OS_DATABUF_ENQUEUE(q, b) do { \
q 321 dev/ic/pdqvar.h if ((q)->q_tail == NULL) \
q 322 dev/ic/pdqvar.h (q)->q_head = (b); \
q 324 dev/ic/pdqvar.h PDQ_OS_DATABUF_NEXTPKT_SET(((PDQ_OS_DATABUF_T *)(q)->q_tail), b); \
q 325 dev/ic/pdqvar.h (q)->q_tail = (b); \
q 328 dev/ic/pdqvar.h #define PDQ_OS_DATABUF_DEQUEUE(q, b) do { \
q 329 dev/ic/pdqvar.h if (((b) = (PDQ_OS_DATABUF_T *) (q)->q_head) != NULL) { \
q 330 dev/ic/pdqvar.h if (((q)->q_head = PDQ_OS_DATABUF_NEXTPKT(b)) == NULL) \
q 331 dev/ic/pdqvar.h (q)->q_tail = NULL; \
q 1395 dev/isa/mcd.c union mcd_qchninfo q;
q 1412 dev/isa/mcd.c if (mcd_getqchan(sc, &q, CD_TRACK_INFO) != 0)
q 1414 dev/isa/mcd.c if (q.toc.trk_no != 0x00 || q.toc.idx_no == 0x00)
q 1416 dev/isa/mcd.c idx = bcd2bin(q.toc.idx_no);
q 1419 dev/isa/mcd.c sc->toc[idx] = q;
q 1534 dev/isa/mcd.c mcd_getqchan(sc, q, qchn)
q 1536 dev/isa/mcd.c union mcd_qchninfo *q;
q 1563 dev/isa/mcd.c *q = mbx.res.data.qchninfo;
q 1573 dev/isa/mcd.c union mcd_qchninfo q;
q 1592 dev/isa/mcd.c if ((error = mcd_getqchan(sc, &q, ch->data_format)) != 0)
q 1607 dev/isa/mcd.c data.what.position.track_number = bcd2bin(q.current.trk_no);
q 1608 dev/isa/mcd.c data.what.position.index_number = bcd2bin(q.current.idx_no);
q 1613 dev/isa/mcd.c bcd2bin(q.current.relative_pos[0]);
q 1615 dev/isa/mcd.c bcd2bin(q.current.relative_pos[1]);
q 1617 dev/isa/mcd.c bcd2bin(q.current.relative_pos[2]);
q 1620 dev/isa/mcd.c bcd2bin(q.current.absolute_pos[0]);
q 1622 dev/isa/mcd.c bcd2bin(q.current.absolute_pos[1]);
q 1624 dev/isa/mcd.c bcd2bin(q.current.absolute_pos[2]);
q 1627 dev/isa/mcd.c lba = msf2hsg(q.current.relative_pos, 1);
q 1639 dev/isa/mcd.c lba = msf2hsg(q.current.absolute_pos, 0);
q 1756 dev/isa/mcd.c union mcd_qchninfo q;
q 1767 dev/isa/mcd.c if ((error = mcd_getqchan(sc, &q, CD_CURRENT_POSITION)) != 0)
q 1771 dev/isa/mcd.c sc->lastpb.data.seek.start_msf[0] = q.current.absolute_pos[0];
q 1772 dev/isa/mcd.c sc->lastpb.data.seek.start_msf[1] = q.current.absolute_pos[1];
q 1773 dev/isa/mcd.c sc->lastpb.data.seek.start_msf[2] = q.current.absolute_pos[2];
q 1913 dev/microcode/aic7xxx/aicasm_gram.y const char *q, prefix[] = " * ";
q 1926 dev/microcode/aic7xxx/aicasm_gram.y for (p = &versions[oldlen + strlen(prefix)], q = verstring; *q; q++) {
q 1927 dev/microcode/aic7xxx/aicasm_gram.y if (*q == '$')
q 1929 dev/microcode/aic7xxx/aicasm_gram.y *p++ = *q;
q 274 dev/mulaw.c u_char *q = p;
q 277 dev/mulaw.c q += cc << 1;
q 280 dev/mulaw.c q -= 2;
q 281 dev/mulaw.c q[1] = mulawtolin16[*p][0];
q 282 dev/mulaw.c q[0] = mulawtolin16[*p][1];
q 289 dev/mulaw.c u_char *q = p;
q 292 dev/mulaw.c q += cc << 1;
q 295 dev/mulaw.c q -= 2;
q 296 dev/mulaw.c q[0] = mulawtolin16[*p][0];
q 297 dev/mulaw.c q[1] = mulawtolin16[*p][1];
q 304 dev/mulaw.c u_char *q = p;
q 307 dev/mulaw.c q += cc << 1;
q 310 dev/mulaw.c q -= 2;
q 311 dev/mulaw.c q[1] = mulawtolin16[*p][0] ^ 0x80;
q 312 dev/mulaw.c q[0] = mulawtolin16[*p][1];
q 319 dev/mulaw.c u_char *q = p;
q 322 dev/mulaw.c q += cc * 4;
q 325 dev/mulaw.c q -= 4;
q 326 dev/mulaw.c q[1] = q[3] = mulawtolin16[*p][0] ^ 0x80;
q 327 dev/mulaw.c q[0] = q[2] = mulawtolin16[*p][1];
q 334 dev/mulaw.c u_char *q = p;
q 337 dev/mulaw.c q += cc << 1;
q 340 dev/mulaw.c q -= 2;
q 341 dev/mulaw.c q[0] = mulawtolin16[*p][0] ^ 0x80;
q 342 dev/mulaw.c q[1] = mulawtolin16[*p][1];
q 349 dev/mulaw.c u_char *q = p;
q 352 dev/mulaw.c q += cc * 4;
q 355 dev/mulaw.c q -= 4;
q 356 dev/mulaw.c q[0] = q[2] = mulawtolin16[*p][0] ^ 0x80;
q 357 dev/mulaw.c q[1] = q[3] = mulawtolin16[*p][1];
q 364 dev/mulaw.c u_char *q = p + 1; /* q points higher byte. */
q 367 dev/mulaw.c *p++ = lintomulaw[*q ^ 0x80];
q 368 dev/mulaw.c q +=2 ;
q 413 dev/mulaw.c u_char *q = p;
q 416 dev/mulaw.c q += cc << 1;
q 419 dev/mulaw.c q -= 2;
q 420 dev/mulaw.c q[1] = alawtolin16[*p][0];
q 421 dev/mulaw.c q[0] = alawtolin16[*p][1];
q 428 dev/mulaw.c u_char *q = p;
q 431 dev/mulaw.c q += cc << 1;
q 434 dev/mulaw.c q -= 2;
q 435 dev/mulaw.c q[0] = alawtolin16[*p][0];
q 436 dev/mulaw.c q[1] = alawtolin16[*p][1];
q 443 dev/mulaw.c u_char *q = p;
q 446 dev/mulaw.c q += cc << 1;
q 449 dev/mulaw.c q -= 2;
q 450 dev/mulaw.c q[1] = alawtolin16[*p][0] ^ 0x80;
q 451 dev/mulaw.c q[0] = alawtolin16[*p][1];
q 458 dev/mulaw.c u_char *q = p;
q 461 dev/mulaw.c q += cc << 1;
q 464 dev/mulaw.c q -= 2;
q 465 dev/mulaw.c q[0] = alawtolin16[*p][0] ^ 0x80;
q 466 dev/mulaw.c q[1] = alawtolin16[*p][1];
q 491 dev/mulaw.c u_char *q = p;
q 494 dev/mulaw.c *p = lintoalaw[q[1] ^ 0x80];
q 496 dev/mulaw.c q += 2;
q 504 dev/mulaw.c u_char *q = p;
q 507 dev/mulaw.c *p = lintoalaw[q[0] ^ 0x80];
q 509 dev/mulaw.c q += 2;
q 519 dev/mulaw.c u_char *q = p;
q 522 dev/mulaw.c q += cc * 4;
q 525 dev/mulaw.c q -= 4;
q 526 dev/mulaw.c q[1] = q[3] = mulawtolin16[*p][0];
q 527 dev/mulaw.c q[0] = q[2] = mulawtolin16[*p][1];
q 537 dev/mulaw.c u_char *q = p;
q 540 dev/mulaw.c q += cc * 4;
q 543 dev/mulaw.c q -= 4;
q 544 dev/mulaw.c q[0] = q[2] = mulawtolin16[*p][0];
q 545 dev/mulaw.c q[1] = q[3] = mulawtolin16[*p][1];
q 555 dev/mulaw.c u_char *q = p;
q 558 dev/mulaw.c q += cc * 4;
q 561 dev/mulaw.c q -= 4;
q 562 dev/mulaw.c q[1] = q[3] = alawtolin16[*p][0] ^ 0x80;
q 563 dev/mulaw.c q[0] = q[2] = alawtolin16[*p][1];
q 573 dev/mulaw.c u_char *q = p;
q 576 dev/mulaw.c q += cc * 4;
q 579 dev/mulaw.c q -= 4;
q 580 dev/mulaw.c q[0] = q[2] = alawtolin16[*p][0] ^ 0x80;
q 581 dev/mulaw.c q[1] = q[3] = alawtolin16[*p][1];
q 1205 dev/pci/auich.c struct auich_dmalist *q, *qe;
q 1207 dev/pci/auich.c q = sc->dmap_pcmo;
q 1210 dev/pci/auich.c while (q != qe) {
q 1212 dev/pci/auich.c q->base = sc->pcmo_p;
q 1213 dev/pci/auich.c q->len = (sc->pcmo_blksize /
q 1217 dev/pci/auich.c qe, q, sc->pcmo_blksize /
q 1224 dev/pci/auich.c if (++q == &sc->dmalist_pcmo[AUICH_DMALIST_MAX])
q 1225 dev/pci/auich.c q = sc->dmalist_pcmo;
q 1228 dev/pci/auich.c sc->dmap_pcmo = q;
q 1260 dev/pci/auich.c struct auich_dmalist *q, *qe;
q 1262 dev/pci/auich.c q = sc->dmap_pcmi;
q 1265 dev/pci/auich.c while (q != qe) {
q 1267 dev/pci/auich.c q->base = sc->pcmi_p;
q 1268 dev/pci/auich.c q->len = (sc->pcmi_blksize /
q 1272 dev/pci/auich.c qe, q, sc->pcmi_blksize /
q 1279 dev/pci/auich.c if (++q == &sc->dmalist_pcmi[AUICH_DMALIST_MAX])
q 1280 dev/pci/auich.c q = sc->dmalist_pcmi;
q 1283 dev/pci/auich.c sc->dmap_pcmi = q;
q 1329 dev/pci/auich.c struct auich_dmalist *q;
q 1353 dev/pci/auich.c q = sc->dmap_pcmo = sc->dmalist_pcmo;
q 1354 dev/pci/auich.c q->base = sc->pcmo_start;
q 1355 dev/pci/auich.c q->len = (blksize / sc->sc_sample_size) | AUICH_DMAF_IOC;
q 1356 dev/pci/auich.c if (++q == &sc->dmalist_pcmo[AUICH_DMALIST_MAX])
q 1357 dev/pci/auich.c q = sc->dmalist_pcmo;
q 1358 dev/pci/auich.c sc->dmap_pcmo = q;
q 1380 dev/pci/auich.c struct auich_dmalist *q;
q 1404 dev/pci/auich.c q = sc->dmap_pcmi = sc->dmalist_pcmi;
q 1405 dev/pci/auich.c q->base = sc->pcmi_start;
q 1406 dev/pci/auich.c q->len = (blksize / sc->sc_sample_size) | AUICH_DMAF_IOC;
q 1407 dev/pci/auich.c if (++q == &sc->dmalist_pcmi[AUICH_DMALIST_MAX])
q 1408 dev/pci/auich.c q = sc->dmalist_pcmi;
q 1409 dev/pci/auich.c sc->dmap_pcmi = q;
q 1418 dev/pci/cs4280.c caddr_t q;
q 1422 dev/pci/cs4280.c q = malloc(size, pool, flags);
q 1423 dev/pci/cs4280.c if (!q)
q 1427 dev/pci/cs4280.c free(q,pool);
q 1436 dev/pci/cs4280.c free(q, pool);
q 1443 dev/pci/cs4280.c p->dum = q; /* return to audio driver */
q 443 dev/pci/if_bnx.c u_char *p, *q;
q 523 dev/pci/if_bnx.c q = p + sizeof(*hdr);
q 525 dev/pci/if_bnx.c bnx_COM_b06FwText = (u_int32_t *)q;
q 526 dev/pci/if_bnx.c q += bnx_COM_b06FwTextLen;
q 528 dev/pci/if_bnx.c bnx_COM_b06FwData = (u_int32_t *)q;
q 529 dev/pci/if_bnx.c q += bnx_COM_b06FwDataLen;
q 531 dev/pci/if_bnx.c bnx_COM_b06FwRodata = (u_int32_t *)q;
q 532 dev/pci/if_bnx.c q += bnx_COM_b06FwRodataLen;
q 534 dev/pci/if_bnx.c bnx_COM_b06FwBss = (u_int32_t *)q;
q 535 dev/pci/if_bnx.c q += bnx_COM_b06FwBssLen;
q 537 dev/pci/if_bnx.c bnx_COM_b06FwSbss = (u_int32_t *)q;
q 538 dev/pci/if_bnx.c q += bnx_COM_b06FwSbssLen;
q 541 dev/pci/if_bnx.c bnx_RXP_b06FwText = (u_int32_t *)q;
q 542 dev/pci/if_bnx.c q += bnx_RXP_b06FwTextLen;
q 544 dev/pci/if_bnx.c bnx_RXP_b06FwData = (u_int32_t *)q;
q 545 dev/pci/if_bnx.c q += bnx_RXP_b06FwDataLen;
q 547 dev/pci/if_bnx.c bnx_RXP_b06FwRodata = (u_int32_t *)q;
q 548 dev/pci/if_bnx.c q += bnx_RXP_b06FwRodataLen;
q 550 dev/pci/if_bnx.c bnx_RXP_b06FwBss = (u_int32_t *)q;
q 551 dev/pci/if_bnx.c q += bnx_RXP_b06FwBssLen;
q 553 dev/pci/if_bnx.c bnx_RXP_b06FwSbss = (u_int32_t *)q;
q 554 dev/pci/if_bnx.c q += bnx_RXP_b06FwSbssLen;
q 557 dev/pci/if_bnx.c bnx_TPAT_b06FwText = (u_int32_t *)q;
q 558 dev/pci/if_bnx.c q += bnx_TPAT_b06FwTextLen;
q 560 dev/pci/if_bnx.c bnx_TPAT_b06FwData = (u_int32_t *)q;
q 561 dev/pci/if_bnx.c q += bnx_TPAT_b06FwDataLen;
q 563 dev/pci/if_bnx.c bnx_TPAT_b06FwRodata = (u_int32_t *)q;
q 564 dev/pci/if_bnx.c q += bnx_TPAT_b06FwRodataLen;
q 566 dev/pci/if_bnx.c bnx_TPAT_b06FwBss = (u_int32_t *)q;
q 567 dev/pci/if_bnx.c q += bnx_TPAT_b06FwBssLen;
q 569 dev/pci/if_bnx.c bnx_TPAT_b06FwSbss = (u_int32_t *)q;
q 570 dev/pci/if_bnx.c q += bnx_TPAT_b06FwSbssLen;
q 573 dev/pci/if_bnx.c bnx_TXP_b06FwText = (u_int32_t *)q;
q 574 dev/pci/if_bnx.c q += bnx_TXP_b06FwTextLen;
q 576 dev/pci/if_bnx.c bnx_TXP_b06FwData = (u_int32_t *)q;
q 577 dev/pci/if_bnx.c q += bnx_TXP_b06FwDataLen;
q 579 dev/pci/if_bnx.c bnx_TXP_b06FwRodata = (u_int32_t *)q;
q 580 dev/pci/if_bnx.c q += bnx_TXP_b06FwRodataLen;
q 582 dev/pci/if_bnx.c bnx_TXP_b06FwBss = (u_int32_t *)q;
q 583 dev/pci/if_bnx.c q += bnx_TXP_b06FwBssLen;
q 585 dev/pci/if_bnx.c bnx_TXP_b06FwSbss = (u_int32_t *)q;
q 586 dev/pci/if_bnx.c q += bnx_TXP_b06FwSbssLen;
q 589 dev/pci/if_bnx.c bnx_rv2p_proc1 = (u_int32_t *)q;
q 590 dev/pci/if_bnx.c q += bnx_rv2p_proc1len;
q 592 dev/pci/if_bnx.c bnx_rv2p_proc2 = (u_int32_t *)q;
q 593 dev/pci/if_bnx.c q += bnx_rv2p_proc2len;
q 596 dev/pci/if_bnx.c if (q - p != size) {
q 425 dev/pci/if_xgereg.h #define MC_QUEUE(q,s) ((uint64_t)(s)<<(56-(q*8)))
q 831 dev/pci/ises.c struct ises_q *q;
q 851 dev/pci/ises.c q = SIMPLEQ_FIRST(&sc->sc_queue);
q 861 dev/pci/ises.c if (sc->sc_cursession != q->q_sesn) {
q 864 dev/pci/ises.c if (ises_bchu_switch_session (sc, &q->q_session, q->q_sesn))
q 865 dev/pci/ises.c sc->sc_cursession = q->q_sesn;
q 871 dev/pci/ises.c DPRINTF(("%s:ises_feed: feed to chip (q = %p)\n", dv, q));
q 876 dev/pci/ises.c SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next);
q 880 dev/pci/ises.c if (q->q_crp->crp_flags & CRYPTO_F_IMBUF)
q 882 dev/pci/ises.c q->q_src.mbuf, BUS_DMA_NOWAIT);
q 883 dev/pci/ises.c else if (q->q_crp->crp_flags & CRYPTO_F_IOV)
q 884 dev/pci/ises.c bus_dmamap_load_uio(sc->sc_dmat, sc->sc_dmamap, q->q_src.uio,
q 1140 dev/pci/ises.c struct ises_q *q;
q 1174 dev/pci/ises.c q = (struct ises_q *)malloc(sizeof(struct ises_q), M_DEVBUF, M_NOWAIT);
q 1175 dev/pci/ises.c if (q == NULL)
q 1177 dev/pci/ises.c bzero(q, sizeof(struct ises_q));
q 1179 dev/pci/ises.c q->q_sesn = ISES_SESSION(crp->crp_sid);
q 1180 dev/pci/ises.c ses = &sc->sc_sessions[q->q_sesn];
q 1182 dev/pci/ises.c DPRINTF(("%s:ises_process: session %d selected\n", dv, q->q_sesn));
q 1184 dev/pci/ises.c q->q_sc = sc;
q 1185 dev/pci/ises.c q->q_crp = crp;
q 1188 dev/pci/ises.c q->q_src.mbuf = (struct mbuf *)crp->crp_buf;
q 1189 dev/pci/ises.c q->q_dst.mbuf = (struct mbuf *)crp->crp_buf;
q 1191 dev/pci/ises.c q->q_src.uio = (struct uio *)crp->crp_buf;
q 1192 dev/pci/ises.c q->q_dst.uio = (struct uio *)crp->crp_buf;
q 1233 dev/pci/ises.c q->q_session.omr |= ISES_SELR_BCHU_DIS;
q 1240 dev/pci/ises.c q->q_session.omr |= ISES_SOMR_BOMR_3DES;
q 1242 dev/pci/ises.c q->q_session.omr |= ISES_SOMR_BOMR_DES;
q 1245 dev/pci/ises.c q->q_session.omr |= ISES_SOMR_FMR_CBC;
q 1249 dev/pci/ises.c q->q_session.omr |= ISES_SOMR_EDR;
q 1252 dev/pci/ises.c bcopy(enccrd->crd_iv, q->q_session.sccr, 8);
q 1254 dev/pci/ises.c q->q_session.sccr[0] = ses->sccr[0];
q 1255 dev/pci/ises.c q->q_session.sccr[1] = ses->sccr[1];
q 1260 dev/pci/ises.c m_copyback(q->q_src.mbuf,
q 1262 dev/pci/ises.c (caddr_t)q->q_session.sccr);
q 1264 dev/pci/ises.c cuio_copyback(q->q_src.uio,
q 1266 dev/pci/ises.c (caddr_t)q->q_session.sccr);
q 1271 dev/pci/ises.c q->q_session.omr &= ~ISES_SOMR_EDR;
q 1274 dev/pci/ises.c bcopy(enccrd->crd_iv, q->q_session.sccr, 8);
q 1276 dev/pci/ises.c m_copydata(q->q_src.mbuf, enccrd->crd_inject,
q 1277 dev/pci/ises.c 8, (caddr_t)q->q_session.sccr);
q 1279 dev/pci/ises.c cuio_copydata(q->q_src.uio,
q 1281 dev/pci/ises.c (caddr_t)q->q_session.sccr);
q 1285 dev/pci/ises.c q->q_session.kr[0] = ses->kr[0];
q 1286 dev/pci/ises.c q->q_session.kr[1] = ses->kr[1];
q 1287 dev/pci/ises.c q->q_session.kr[2] = ses->kr[2];
q 1288 dev/pci/ises.c q->q_session.kr[3] = ses->kr[3];
q 1289 dev/pci/ises.c q->q_session.kr[4] = ses->kr[4];
q 1290 dev/pci/ises.c q->q_session.kr[5] = ses->kr[5];
q 1292 dev/pci/ises.c SWAP32(q->q_session.sccr[0]);
q 1293 dev/pci/ises.c SWAP32(q->q_session.sccr[1]);
q 1302 dev/pci/ises.c q->q_session.omr |= ISES_HOMR_HFR_MD5;
q 1305 dev/pci/ises.c q->q_session.omr |= ISES_HOMR_HFR_SHA1;
q 1309 dev/pci/ises.c q->q_session.omr |= ISES_HOMR_HFR_RMD160;
q 1313 dev/pci/ises.c q->q_session.cvr[0] = ses->cvr[0];
q 1314 dev/pci/ises.c q->q_session.cvr[1] = ses->cvr[1];
q 1315 dev/pci/ises.c q->q_session.cvr[2] = ses->cvr[2];
q 1316 dev/pci/ises.c q->q_session.cvr[3] = ses->cvr[3];
q 1317 dev/pci/ises.c q->q_session.cvr[4] = ses->cvr[4];
q 1343 dev/pci/ises.c q->q_offset = coffset >> 2;
q 1348 dev/pci/ises.c q->q_src_l = mbuf2pages(q->q_src.mbuf, &q->q_src_npa,
q 1349 dev/pci/ises.c q->q_src_packp, q->q_src_packl, 1, &nicealign);
q 1351 dev/pci/ises.c q->q_src_l = iov2pages(q->q_src.uio, &q->q_src_npa,
q 1352 dev/pci/ises.c q->q_src_packp, q->q_src_packl, 1, &nicealign);
q 1357 dev/pci/ises.c if (q->q_src_l == 0)
q 1359 dev/pci/ises.c else if (q->q_src_l > 0xfffc) {
q 1375 dev/pci/ises.c totlen = q->q_dst_l = q->q_src_l;
q 1376 dev/pci/ises.c if (q->q_src.mbuf->m_flags & M_PKTHDR) {
q 1378 dev/pci/ises.c M_DUP_PKTHDR(m, q->q_src.mbuf);
q 1415 dev/pci/ises.c q->q_dst.mbuf = top;
q 1417 dev/pci/ises.c ubsec_mcopy(q->q_src.mbuf, q->q_dst.mbuf, cpskip, cpoffset);
q 1420 dev/pci/ises.c q->q_dst.mbuf = q->q_src.mbuf;
q 1424 dev/pci/ises.c q->q_dst_l = mbuf2pages(q->q_dst.mbuf, &q->q_dst_npa,
q 1425 dev/pci/ises.c &q->q_dst_packp, &q->q_dst_packl, 1, NULL);
q 1434 dev/pci/ises.c SIMPLEQ_INSERT_TAIL(&sc->sc_queue, q, q_next);
q 1446 dev/pci/ises.c err, q));
q 1451 dev/pci/ises.c if (q) {
q 1452 dev/pci/ises.c if (q->q_src.mbuf != q->q_dst.mbuf)
q 1453 dev/pci/ises.c m_freem(q->q_dst.mbuf);
q 1454 dev/pci/ises.c free(q, M_DEVBUF);
q 1462 dev/pci/ises.c ises_callback(struct ises_q *q)
q 1464 dev/pci/ises.c struct cryptop *crp = (struct cryptop *)q->q_crp;
q 1466 dev/pci/ises.c struct ises_softc *sc = q->q_sc;
q 1470 dev/pci/ises.c (q->q_src.mbuf != q->q_dst.mbuf)) {
q 1471 dev/pci/ises.c m_freem(q->q_src.mbuf);
q 1472 dev/pci/ises.c crp->crp_buf = (caddr_t)q->q_dst.mbuf;
q 1475 dev/pci/ises.c if (q->q_session.omr & ISES_SOMR_EDR) {
q 1477 dev/pci/ises.c sccr = (u_int8_t *)&sc->sc_sessions[q->q_sesn].sccr;
q 1498 dev/pci/ises.c crd->crd_inject, 12, (u_int8_t *)q->q_macbuf);
q 1500 dev/pci/ises.c bcopy((u_int8_t *)q->q_macbuf, crp->crp_mac, 12);
q 1505 dev/pci/ises.c free(q, M_DEVBUF);
q 172 dev/pci/lofn.c struct lofn_q *q;
q 201 dev/pci/lofn.c q = sc->sc_current;
q 203 dev/pci/lofn.c q->q_finish(sc, q);
q 204 dev/pci/lofn.c free(q, M_DEVBUF);
q 288 dev/pci/lofn.c struct lofn_q *q;
q 299 dev/pci/lofn.c q = (struct lofn_q *)malloc(sizeof(*q), M_DEVBUF, M_NOWAIT);
q 300 dev/pci/lofn.c if (q == NULL) {
q 308 dev/pci/lofn.c q->q_start = lofn_modexp_start;
q 309 dev/pci/lofn.c q->q_finish = lofn_modexp_finish;
q 310 dev/pci/lofn.c q->q_krp = krp;
q 312 dev/pci/lofn.c SIMPLEQ_INSERT_TAIL(&sc->sc_queue, q, q_next);
q 321 dev/pci/lofn.c free(q, M_DEVBUF);
q 327 dev/pci/lofn.c lofn_modexp_start(sc, q)
q 329 dev/pci/lofn.c struct lofn_q *q;
q 331 dev/pci/lofn.c struct cryptkop *krp = q->q_krp;
q 462 dev/pci/lofn.c lofn_modexp_finish(sc, q)
q 464 dev/pci/lofn.c struct lofn_q *q;
q 466 dev/pci/lofn.c struct cryptkop *krp = q->q_krp;
q 520 dev/pci/lofn.c struct lofn_q *q;
q 535 dev/pci/lofn.c q = SIMPLEQ_FIRST(&sc->sc_queue);
q 536 dev/pci/lofn.c if (q->q_start(sc, q) == 0) {
q 537 dev/pci/lofn.c sc->sc_current = q;
q 544 dev/pci/lofn.c free(q, M_DEVBUF);
q 876 dev/pci/noct.c struct noct_workq *q;
q 888 dev/pci/noct.c q = SIMPLEQ_FIRST(&sc->sc_outq);
q 892 dev/pci/noct.c crp = q->q_crp;
q 906 dev/pci/noct.c bus_dmamap_sync(sc->sc_dmat, q->q_dmamap,
q 907 dev/pci/noct.c 0, q->q_dmamap->dm_mapsize,
q 914 dev/pci/noct.c q->q_macbuf);
q 916 dev/pci/noct.c bcopy(q->q_macbuf, crp->crp_mac, len);
q 924 dev/pci/noct.c q->q_buf);
q 928 dev/pci/noct.c q->q_buf);
q 931 dev/pci/noct.c bus_dmamap_unload(sc->sc_dmat, q->q_dmamap);
q 932 dev/pci/noct.c bus_dmamap_destroy(sc->sc_dmat, q->q_dmamap);
q 933 dev/pci/noct.c bus_dmamem_unmap(sc->sc_dmat, q->q_buf, crd->crd_len);
q 934 dev/pci/noct.c bus_dmamem_free(sc->sc_dmat, &q->q_dmaseg, rseg);
q 936 dev/pci/noct.c free(q, M_DEVBUF);
q 945 dev/pci/noct.c q = SIMPLEQ_FIRST(&sc->sc_inq);
q 949 dev/pci/noct.c noct_ea_start(sc, q);
q 957 dev/pci/noct.c noct_ea_start(sc, q)
q 959 dev/pci/noct.c struct noct_workq *q;
q 965 dev/pci/noct.c crp = q->q_crp;
q 977 dev/pci/noct.c noct_ea_start_hash(sc, q, crp, crd);
q 981 dev/pci/noct.c noct_ea_start_des(sc, q, crp, crd);
q 992 dev/pci/noct.c free(q, M_DEVBUF);
q 999 dev/pci/noct.c noct_ea_start_hash(sc, q, crp, crd)
q 1001 dev/pci/noct.c struct noct_workq *q;
q 1015 dev/pci/noct.c &q->q_dmaseg, 1, &rseg, BUS_DMA_WAITOK | BUS_DMA_STREAMING)) != 0)
q 1018 dev/pci/noct.c if ((err = bus_dmamem_map(sc->sc_dmat, &q->q_dmaseg, rseg,
q 1019 dev/pci/noct.c crd->crd_len, (caddr_t *)&q->q_buf, BUS_DMA_WAITOK)) != 0)
q 1023 dev/pci/noct.c crd->crd_len, 0, BUS_DMA_WAITOK, &q->q_dmamap)) != 0)
q 1026 dev/pci/noct.c if ((err = bus_dmamap_load_raw(sc->sc_dmat, q->q_dmamap, &q->q_dmaseg,
q 1032 dev/pci/noct.c crd->crd_skip, crd->crd_len, q->q_buf);
q 1035 dev/pci/noct.c crd->crd_skip, crd->crd_len, q->q_buf);
q 1041 dev/pci/noct.c bus_dmamap_sync(sc->sc_dmat, q->q_dmamap, 0, q->q_dmamap->dm_mapsize,
q 1066 dev/pci/noct.c adr = q->q_dmamap->dm_segs[0].ds_addr;
q 1086 dev/pci/noct.c SIMPLEQ_INSERT_TAIL(&sc->sc_chipq, q, q_next);
q 1092 dev/pci/noct.c bus_dmamap_unload(sc->sc_dmat, q->q_dmamap);
q 1094 dev/pci/noct.c bus_dmamap_destroy(sc->sc_dmat, q->q_dmamap);
q 1096 dev/pci/noct.c bus_dmamem_unmap(sc->sc_dmat, q->q_buf, crd->crd_len);
q 1098 dev/pci/noct.c bus_dmamem_free(sc->sc_dmat, &q->q_dmaseg, rseg);
q 1101 dev/pci/noct.c free(q, M_DEVBUF);
q 1108 dev/pci/noct.c noct_ea_start_des(sc, q, crp, crd)
q 1110 dev/pci/noct.c struct noct_workq *q;
q 1164 dev/pci/noct.c &q->q_dmaseg, 1, &rseg, BUS_DMA_WAITOK | BUS_DMA_STREAMING)) != 0)
q 1167 dev/pci/noct.c if ((err = bus_dmamem_map(sc->sc_dmat, &q->q_dmaseg, rseg,
q 1168 dev/pci/noct.c crd->crd_len, (caddr_t *)&q->q_buf, BUS_DMA_WAITOK)) != 0)
q 1172 dev/pci/noct.c crd->crd_len, 0, BUS_DMA_WAITOK, &q->q_dmamap)) != 0)
q 1175 dev/pci/noct.c if ((err = bus_dmamap_load_raw(sc->sc_dmat, q->q_dmamap, &q->q_dmaseg,
q 1181 dev/pci/noct.c crd->crd_skip, crd->crd_len, q->q_buf);
q 1184 dev/pci/noct.c crd->crd_skip, crd->crd_len, q->q_buf);
q 1190 dev/pci/noct.c bus_dmamap_sync(sc->sc_dmat, q->q_dmamap, 0, q->q_dmamap->dm_mapsize,
q 1214 dev/pci/noct.c adr = q->q_dmamap->dm_segs[0].ds_addr;
q 1247 dev/pci/noct.c SIMPLEQ_INSERT_TAIL(&sc->sc_chipq, q, q_next);
q 1253 dev/pci/noct.c bus_dmamap_unload(sc->sc_dmat, q->q_dmamap);
q 1255 dev/pci/noct.c bus_dmamap_destroy(sc->sc_dmat, q->q_dmamap);
q 1257 dev/pci/noct.c bus_dmamem_unmap(sc->sc_dmat, q->q_buf, crd->crd_len);
q 1259 dev/pci/noct.c bus_dmamem_free(sc->sc_dmat, &q->q_dmaseg, rseg);
q 1262 dev/pci/noct.c free(q, M_DEVBUF);
q 1272 dev/pci/noct.c struct noct_workq *q;
q 1287 dev/pci/noct.c q = SIMPLEQ_FIRST(&sc->sc_chipq);
q 1289 dev/pci/noct.c SIMPLEQ_INSERT_TAIL(&sc->sc_outq, q, q_next);
q 1296 dev/pci/noct.c q->q_macbuf, 20);
q 1860 dev/pci/noct.c struct noct_workq *q = NULL;
q 1871 dev/pci/noct.c q = (struct noct_workq *)malloc(sizeof(struct noct_workq),
q 1873 dev/pci/noct.c if (q == NULL) {
q 1877 dev/pci/noct.c q->q_crp = crp;
q 1880 dev/pci/noct.c SIMPLEQ_INSERT_TAIL(&sc->sc_inq, q, q_next);
q 1886 dev/pci/noct.c if (q != NULL)
q 1887 dev/pci/noct.c free(q, M_DEVBUF);
q 195 dev/pci/nofn.c struct nofn_pk_q *q;
q 201 dev/pci/nofn.c q = sc->sc_pk_current;
q 203 dev/pci/nofn.c q->q_finish(sc, q);
q 204 dev/pci/nofn.c free(q, M_DEVBUF);
q 384 dev/pci/nofn.c struct nofn_pk_q *q;
q 401 dev/pci/nofn.c q = SIMPLEQ_FIRST(&sc->sc_pk_queue);
q 402 dev/pci/nofn.c if (q->q_start(sc, q) == 0) {
q 403 dev/pci/nofn.c sc->sc_pk_current = q;
q 413 dev/pci/nofn.c free(q, M_DEVBUF);
q 423 dev/pci/nofn.c struct nofn_pk_q *q;
q 434 dev/pci/nofn.c q = (struct nofn_pk_q *)malloc(sizeof(*q), M_DEVBUF, M_NOWAIT);
q 435 dev/pci/nofn.c if (q == NULL) {
q 443 dev/pci/nofn.c q->q_start = nofn_modexp_start;
q 444 dev/pci/nofn.c q->q_finish = nofn_modexp_finish;
q 445 dev/pci/nofn.c q->q_krp = krp;
q 447 dev/pci/nofn.c SIMPLEQ_INSERT_TAIL(&sc->sc_pk_queue, q, q_next);
q 456 dev/pci/nofn.c free(q, M_DEVBUF);
q 517 dev/pci/nofn.c nofn_modexp_start(sc, q)
q 519 dev/pci/nofn.c struct nofn_pk_q *q;
q 521 dev/pci/nofn.c struct cryptkop *krp = q->q_krp;
q 652 dev/pci/nofn.c nofn_modexp_finish(sc, q)
q 654 dev/pci/nofn.c struct nofn_pk_q *q;
q 656 dev/pci/nofn.c struct cryptkop *krp = q->q_krp;
q 1801 dev/pci/safe.c struct safe_pkq *q;
q 1814 dev/pci/safe.c q = (struct safe_pkq *)malloc(sizeof(*q), M_DEVBUF, M_NOWAIT);
q 1815 dev/pci/safe.c if (q == NULL) {
q 1819 dev/pci/safe.c q->pkq_krp = krp;
q 1822 dev/pci/safe.c SIMPLEQ_INSERT_TAIL(&sc->sc_pkq, q, pkq_next);
q 1968 dev/pci/safe.c struct safe_pkq *q = SIMPLEQ_FIRST(&sc->sc_pkq);
q 1970 dev/pci/safe.c sc->sc_pkq_cur = q;
q 1973 dev/pci/safe.c crypto_kdone(q->pkq_krp);
q 1974 dev/pci/safe.c free(q, M_DEVBUF);
q 1988 dev/pci/safe.c struct safe_pkq *q;
q 2002 dev/pci/safe.c q = sc->sc_pkq_cur;
q 2003 dev/pci/safe.c res = &q->pkq_krp->krp_param[q->pkq_krp->krp_iparams];
q 2016 dev/pci/safe.c crypto_kdone(q->pkq_krp);
q 2017 dev/pci/safe.c free(q, M_DEVBUF);
q 230 dev/pci/ubsec.c struct ubsec_q *q;
q 232 dev/pci/ubsec.c q = (struct ubsec_q *)malloc(sizeof(struct ubsec_q),
q 234 dev/pci/ubsec.c if (q == NULL) {
q 242 dev/pci/ubsec.c free(q, M_DEVBUF);
q 247 dev/pci/ubsec.c q->q_dma = dmap;
q 248 dev/pci/ubsec.c sc->sc_queuea[i] = q;
q 250 dev/pci/ubsec.c SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
q 335 dev/pci/ubsec.c struct ubsec_q *q;
q 352 dev/pci/ubsec.c q = SIMPLEQ_FIRST(&sc->sc_qchip);
q 353 dev/pci/ubsec.c dmap = q->q_dma;
q 360 dev/pci/ubsec.c npkts = q->q_nstacked_mcrs;
q 367 dev/pci/ubsec.c if(q->q_stacked_mcr[i])
q 368 dev/pci/ubsec.c ubsec_callback(sc, q->q_stacked_mcr[i]);
q 372 dev/pci/ubsec.c ubsec_callback(sc, q);
q 445 dev/pci/ubsec.c struct ubsec_q *q, *q2;
q 474 dev/pci/ubsec.c q = SIMPLEQ_FIRST(&sc->sc_queue);
q 478 dev/pci/ubsec.c bus_dmamap_sync(sc->sc_dmat, q->q_src_map,
q 479 dev/pci/ubsec.c 0, q->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
q 480 dev/pci/ubsec.c if (q->q_dst_map != NULL)
q 481 dev/pci/ubsec.c bus_dmamap_sync(sc->sc_dmat, q->q_dst_map,
q 482 dev/pci/ubsec.c 0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
q 484 dev/pci/ubsec.c q->q_nstacked_mcrs = npkts - 1; /* Number of packets stacked */
q 486 dev/pci/ubsec.c for (i = 0; i < q->q_nstacked_mcrs; i++) {
q 498 dev/pci/ubsec.c bcopy(v, &q->q_dma->d_dma->d_mcradd[i], sizeof(struct ubsec_mcr_add));
q 499 dev/pci/ubsec.c q->q_stacked_mcr[i] = q2;
q 501 dev/pci/ubsec.c q->q_dma->d_dma->d_mcr.mcr_pkts = htole16(npkts);
q 502 dev/pci/ubsec.c SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next);
q 503 dev/pci/ubsec.c bus_dmamap_sync(sc->sc_dmat, q->q_dma->d_alloc.dma_map,
q 504 dev/pci/ubsec.c 0, q->q_dma->d_alloc.dma_map->dm_mapsize,
q 506 dev/pci/ubsec.c WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr +
q 521 dev/pci/ubsec.c q = SIMPLEQ_FIRST(&sc->sc_queue);
q 523 dev/pci/ubsec.c bus_dmamap_sync(sc->sc_dmat, q->q_src_map,
q 524 dev/pci/ubsec.c 0, q->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
q 525 dev/pci/ubsec.c if (q->q_dst_map != NULL)
q 526 dev/pci/ubsec.c bus_dmamap_sync(sc->sc_dmat, q->q_dst_map,
q 527 dev/pci/ubsec.c 0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
q 528 dev/pci/ubsec.c bus_dmamap_sync(sc->sc_dmat, q->q_dma->d_alloc.dma_map,
q 529 dev/pci/ubsec.c 0, q->q_dma->d_alloc.dma_map->dm_mapsize,
q 532 dev/pci/ubsec.c WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr +
q 535 dev/pci/ubsec.c printf("feed: q->chip %p %08x\n", q,
q 536 dev/pci/ubsec.c (u_int32_t)q->q_dma->d_alloc.dma_paddr);
q 540 dev/pci/ubsec.c SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next);
q 714 dev/pci/ubsec.c struct ubsec_q *q = NULL;
q 746 dev/pci/ubsec.c q = SIMPLEQ_FIRST(&sc->sc_freequeue);
q 750 dev/pci/ubsec.c dmap = q->q_dma; /* Save dma pointer */
q 751 dev/pci/ubsec.c bzero(q, sizeof(struct ubsec_q));
q 754 dev/pci/ubsec.c q->q_sesn = UBSEC_SESSION(crp->crp_sid);
q 755 dev/pci/ubsec.c q->q_dma = dmap;
q 756 dev/pci/ubsec.c ses = &sc->sc_sessions[q->q_sesn];
q 759 dev/pci/ubsec.c q->q_src_m = (struct mbuf *)crp->crp_buf;
q 760 dev/pci/ubsec.c q->q_dst_m = (struct mbuf *)crp->crp_buf;
q 762 dev/pci/ubsec.c q->q_src_io = (struct uio *)crp->crp_buf;
q 763 dev/pci/ubsec.c q->q_dst_io = (struct uio *)crp->crp_buf;
q 773 dev/pci/ubsec.c q->q_crp = crp;
q 824 dev/pci/ubsec.c q->q_flags |= UBSEC_QFLAGS_COPYOUTIV;
q 835 dev/pci/ubsec.c m_copyback(q->q_src_m,
q 839 dev/pci/ubsec.c cuio_copyback(q->q_src_io,
q 849 dev/pci/ubsec.c m_copydata(q->q_src_m, enccrd->crd_inject,
q 852 dev/pci/ubsec.c cuio_copydata(q->q_src_io,
q 921 dev/pci/ubsec.c 0xfff0, 0, BUS_DMA_NOWAIT, &q->q_src_map) != 0) {
q 926 dev/pci/ubsec.c if (bus_dmamap_load_mbuf(sc->sc_dmat, q->q_src_map,
q 927 dev/pci/ubsec.c q->q_src_m, BUS_DMA_NOWAIT) != 0) {
q 928 dev/pci/ubsec.c bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
q 929 dev/pci/ubsec.c q->q_src_map = NULL;
q 934 dev/pci/ubsec.c if (bus_dmamap_load_uio(sc->sc_dmat, q->q_src_map,
q 935 dev/pci/ubsec.c q->q_src_io, BUS_DMA_NOWAIT) != 0) {
q 936 dev/pci/ubsec.c bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
q 937 dev/pci/ubsec.c q->q_src_map = NULL;
q 942 dev/pci/ubsec.c nicealign = ubsec_dmamap_aligned(q->q_src_map);
q 949 dev/pci/ubsec.c for (i = j = 0; i < q->q_src_map->dm_nsegs; i++) {
q 951 dev/pci/ubsec.c bus_size_t packl = q->q_src_map->dm_segs[i].ds_len;
q 952 dev/pci/ubsec.c bus_addr_t packp = q->q_src_map->dm_segs[i].ds_addr;
q 986 dev/pci/ubsec.c if ((i + 1) == q->q_src_map->dm_nsegs)
q 1014 dev/pci/ubsec.c &q->q_dst_map) != 0) {
q 1018 dev/pci/ubsec.c if (bus_dmamap_load_uio(sc->sc_dmat, q->q_dst_map,
q 1019 dev/pci/ubsec.c q->q_dst_io, BUS_DMA_NOWAIT) != 0) {
q 1020 dev/pci/ubsec.c bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map);
q 1021 dev/pci/ubsec.c q->q_dst_map = NULL;
q 1026 dev/pci/ubsec.c q->q_dst_m = q->q_src_m;
q 1027 dev/pci/ubsec.c q->q_dst_map = q->q_src_map;
q 1032 dev/pci/ubsec.c totlen = q->q_src_map->dm_mapsize;
q 1033 dev/pci/ubsec.c if (q->q_src_m->m_flags & M_PKTHDR) {
q 1045 dev/pci/ubsec.c M_DUP_PKTHDR(m, q->q_src_m);
q 1075 dev/pci/ubsec.c q->q_dst_m = top;
q 1076 dev/pci/ubsec.c ubsec_mcopy(q->q_src_m, q->q_dst_m,
q 1080 dev/pci/ubsec.c &q->q_dst_map) != 0) {
q 1085 dev/pci/ubsec.c q->q_dst_map, q->q_dst_m,
q 1088 dev/pci/ubsec.c q->q_dst_map);
q 1089 dev/pci/ubsec.c q->q_dst_map = NULL;
q 1102 dev/pci/ubsec.c for (i = j = 0; i < q->q_dst_map->dm_nsegs; i++) {
q 1104 dev/pci/ubsec.c bus_size_t packl = q->q_dst_map->dm_segs[i].ds_len;
q 1105 dev/pci/ubsec.c bus_addr_t packp = q->q_dst_map->dm_segs[i].ds_addr;
q 1139 dev/pci/ubsec.c if ((i + 1) == q->q_dst_map->dm_nsegs) {
q 1180 dev/pci/ubsec.c SIMPLEQ_INSERT_TAIL(&sc->sc_queue, q, q_next);
q 1189 dev/pci/ubsec.c if (q != NULL) {
q 1190 dev/pci/ubsec.c if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m))
q 1191 dev/pci/ubsec.c m_freem(q->q_dst_m);
q 1193 dev/pci/ubsec.c if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) {
q 1194 dev/pci/ubsec.c bus_dmamap_unload(sc->sc_dmat, q->q_dst_map);
q 1195 dev/pci/ubsec.c bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map);
q 1197 dev/pci/ubsec.c if (q->q_src_map != NULL) {
q 1198 dev/pci/ubsec.c bus_dmamap_unload(sc->sc_dmat, q->q_src_map);
q 1199 dev/pci/ubsec.c bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
q 1203 dev/pci/ubsec.c SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
q 1217 dev/pci/ubsec.c ubsec_callback(struct ubsec_softc *sc, struct ubsec_q *q)
q 1219 dev/pci/ubsec.c struct cryptop *crp = (struct cryptop *)q->q_crp;
q 1221 dev/pci/ubsec.c struct ubsec_dma *dmap = q->q_dma;
q 1229 dev/pci/ubsec.c if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) {
q 1230 dev/pci/ubsec.c bus_dmamap_sync(sc->sc_dmat, q->q_dst_map,
q 1231 dev/pci/ubsec.c 0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
q 1232 dev/pci/ubsec.c bus_dmamap_unload(sc->sc_dmat, q->q_dst_map);
q 1233 dev/pci/ubsec.c bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map);
q 1235 dev/pci/ubsec.c bus_dmamap_sync(sc->sc_dmat, q->q_src_map,
q 1236 dev/pci/ubsec.c 0, q->q_src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
q 1237 dev/pci/ubsec.c bus_dmamap_unload(sc->sc_dmat, q->q_src_map);
q 1238 dev/pci/ubsec.c bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
q 1240 dev/pci/ubsec.c if ((crp->crp_flags & CRYPTO_F_IMBUF) && (q->q_src_m != q->q_dst_m)) {
q 1241 dev/pci/ubsec.c m_freem(q->q_src_m);
q 1242 dev/pci/ubsec.c crp->crp_buf = (caddr_t)q->q_dst_m;
q 1246 dev/pci/ubsec.c if (q->q_flags & UBSEC_QFLAGS_COPYOUTIV) {
q 1254 dev/pci/ubsec.c (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv);
q 1258 dev/pci/ubsec.c (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv);
q 1277 dev/pci/ubsec.c SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
q 1324 dev/pci/ubsec.c struct ubsec_q2 *q;
q 1329 dev/pci/ubsec.c q = SIMPLEQ_FIRST(&sc->sc_queue2);
q 1331 dev/pci/ubsec.c bus_dmamap_sync(sc->sc_dmat, q->q_mcr.dma_map, 0,
q 1332 dev/pci/ubsec.c q->q_mcr.dma_map->dm_mapsize,
q 1334 dev/pci/ubsec.c bus_dmamap_sync(sc->sc_dmat, q->q_ctx.dma_map, 0,
q 1335 dev/pci/ubsec.c q->q_ctx.dma_map->dm_mapsize,
q 1338 dev/pci/ubsec.c WRITE_REG(sc, BS_MCR2, q->q_mcr.dma_paddr);
q 1341 dev/pci/ubsec.c SIMPLEQ_INSERT_TAIL(&sc->sc_qchip2, q, q_next);
q 1349 dev/pci/ubsec.c ubsec_callback2(struct ubsec_softc *sc, struct ubsec_q2 *q)
q 1354 dev/pci/ubsec.c ctx = (struct ubsec_ctx_keyop *)q->q_ctx.dma_vaddr;
q 1355 dev/pci/ubsec.c bus_dmamap_sync(sc->sc_dmat, q->q_ctx.dma_map, 0,
q 1356 dev/pci/ubsec.c q->q_ctx.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
q 1358 dev/pci/ubsec.c switch (q->q_type) {
q 1362 dev/pci/ubsec.c struct ubsec_q2_rng *rng = (struct ubsec_q2_rng *)q;
q 1377 dev/pci/ubsec.c struct ubsec_q2_modexp *me = (struct ubsec_q2_modexp *)q;
q 1422 dev/pci/ubsec.c struct ubsec_q2_rsapriv *rp = (struct ubsec_q2_rsapriv *)q;
q 1624 dev/pci/ubsec.c struct ubsec_q *q;
q 1627 dev/pci/ubsec.c q = SIMPLEQ_FIRST(&sc->sc_qchip);
q 1629 dev/pci/ubsec.c ubsec_free_q(sc, q);
q 1638 dev/pci/ubsec.c ubsec_free_q(struct ubsec_softc *sc, struct ubsec_q *q)
q 1645 dev/pci/ubsec.c npkts = q->q_nstacked_mcrs;
q 1648 dev/pci/ubsec.c if(q->q_stacked_mcr[i]) {
q 1649 dev/pci/ubsec.c q2 = q->q_stacked_mcr[i];
q 1668 dev/pci/ubsec.c if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m))
q 1669 dev/pci/ubsec.c m_freem(q->q_dst_m);
q 1671 dev/pci/ubsec.c crp = (struct cryptop *)q->q_crp;
q 1673 dev/pci/ubsec.c SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
q 1724 dev/pci/ubsec.c ubsec_kfree(struct ubsec_softc *sc, struct ubsec_q2 *q)
q 1726 dev/pci/ubsec.c switch (q->q_type) {
q 1728 dev/pci/ubsec.c struct ubsec_q2_modexp *me = (struct ubsec_q2_modexp *)q;
q 1740 dev/pci/ubsec.c struct ubsec_q2_rsapriv *rp = (struct ubsec_q2_rsapriv *)q;
q 1751 dev/pci/ubsec.c q->q_type);
q 1768 dev/pci/ubsec.c struct ubsec_q2 *q;
q 1770 dev/pci/ubsec.c q = SIMPLEQ_FIRST(&sc->sc_q2free);
q 1772 dev/pci/ubsec.c ubsec_kfree(sc, q);
q 479 dev/raidframe/rf_engine.c RF_DagNode_t *q = NULL, *qh = NULL, *next;
q 556 dev/raidframe/rf_engine.c if (q) {
q 557 dev/raidframe/rf_engine.c q->next = s;
q 558 dev/raidframe/rf_engine.c q = s;
q 560 dev/raidframe/rf_engine.c qh = q = s;
q 569 dev/raidframe/rf_engine.c if (q) {
q 574 dev/raidframe/rf_engine.c q->next = raidPtr->node_queue;
q 652 dev/raidframe/rf_engine.c if (q) {
q 653 dev/raidframe/rf_engine.c q->next = a;
q 654 dev/raidframe/rf_engine.c q = a;
q 656 dev/raidframe/rf_engine.c qh = q = a;
q 663 dev/raidframe/rf_engine.c if (q) {
q 668 dev/raidframe/rf_engine.c q->next = raidPtr->node_queue;
q 58 dev/raidframe/rf_fifo.c RF_FifoHeader_t *q;
q 60 dev/raidframe/rf_fifo.c RF_CallocAndAdd(q, 1, sizeof(RF_FifoHeader_t), (RF_FifoHeader_t *),
q 62 dev/raidframe/rf_fifo.c q->hq_count = q->lq_count = 0;
q 63 dev/raidframe/rf_fifo.c return ((void *) q);
q 69 dev/raidframe/rf_fifo.c RF_FifoHeader_t *q = (RF_FifoHeader_t *) q_in;
q 76 dev/raidframe/rf_fifo.c if (!q->hq_tail) {
q 77 dev/raidframe/rf_fifo.c RF_ASSERT(q->hq_count == 0 && q->hq_head == NULL);
q 78 dev/raidframe/rf_fifo.c q->hq_head = q->hq_tail = elem;
q 80 dev/raidframe/rf_fifo.c RF_ASSERT(q->hq_count != 0 && q->hq_head != NULL);
q 81 dev/raidframe/rf_fifo.c q->hq_tail->next = elem;
q 82 dev/raidframe/rf_fifo.c q->hq_tail = elem;
q 84 dev/raidframe/rf_fifo.c q->hq_count++;
q 91 dev/raidframe/rf_fifo.c if (!q->lq_tail) {
q 92 dev/raidframe/rf_fifo.c RF_ASSERT(q->lq_count == 0 && q->lq_head == NULL);
q 93 dev/raidframe/rf_fifo.c q->lq_head = q->lq_tail = elem;
q 95 dev/raidframe/rf_fifo.c RF_ASSERT(q->lq_count != 0 && q->lq_head != NULL);
q 96 dev/raidframe/rf_fifo.c q->lq_tail->next = elem;
q 97 dev/raidframe/rf_fifo.c q->lq_tail = elem;
q 99 dev/raidframe/rf_fifo.c q->lq_count++;
q 101 dev/raidframe/rf_fifo.c if ((q->hq_count + q->lq_count) != elem->queue->queueLength) {
q 103 dev/raidframe/rf_fifo.c q->hq_count, q->lq_count, (int) elem->queue->queueLength);
q 110 dev/raidframe/rf_fifo.c RF_ASSERT((q->hq_count + q->lq_count) == elem->queue->queueLength);
q 116 dev/raidframe/rf_fifo.c RF_FifoHeader_t *q = (RF_FifoHeader_t *) q_in;
q 119 dev/raidframe/rf_fifo.c RF_ASSERT(q);
q 120 dev/raidframe/rf_fifo.c if (q->hq_head) {
q 121 dev/raidframe/rf_fifo.c RF_ASSERT(q->hq_count != 0 && q->hq_tail != NULL);
q 122 dev/raidframe/rf_fifo.c nd = q->hq_head;
q 123 dev/raidframe/rf_fifo.c q->hq_head = q->hq_head->next;
q 124 dev/raidframe/rf_fifo.c if (!q->hq_head)
q 125 dev/raidframe/rf_fifo.c q->hq_tail = NULL;
q 127 dev/raidframe/rf_fifo.c q->hq_count--;
q 129 dev/raidframe/rf_fifo.c if (q->lq_head) {
q 130 dev/raidframe/rf_fifo.c RF_ASSERT(q->lq_count != 0 && q->lq_tail != NULL);
q 131 dev/raidframe/rf_fifo.c nd = q->lq_head;
q 132 dev/raidframe/rf_fifo.c q->lq_head = q->lq_head->next;
q 133 dev/raidframe/rf_fifo.c if (!q->lq_head)
q 134 dev/raidframe/rf_fifo.c q->lq_tail = NULL;
q 136 dev/raidframe/rf_fifo.c q->lq_count--;
q 142 dev/raidframe/rf_fifo.c RF_ASSERT(q->hq_count == 0 && q->lq_count == 0 &&
q 143 dev/raidframe/rf_fifo.c q->hq_tail == NULL && q->lq_tail == NULL);
q 157 dev/raidframe/rf_fifo.c RF_FifoHeader_t *q = (RF_FifoHeader_t *) q_in;
q 159 dev/raidframe/rf_fifo.c RF_ASSERT(q);
q 160 dev/raidframe/rf_fifo.c if (q->hq_head)
q 161 dev/raidframe/rf_fifo.c headElement = q->hq_head;
q 163 dev/raidframe/rf_fifo.c if (q->lq_head)
q 164 dev/raidframe/rf_fifo.c headElement = q->lq_head;
q 181 dev/raidframe/rf_fifo.c RF_FifoHeader_t *q = (RF_FifoHeader_t *) q_in;
q 183 dev/raidframe/rf_fifo.c RF_DiskQueueData_t *lp = q->lq_head, *pt = NULL;
q 200 dev/raidframe/rf_fifo.c q->lq_head = lp->next;
q 202 dev/raidframe/rf_fifo.c if (!q->lq_head)
q 204 dev/raidframe/rf_fifo.c q->lq_tail = NULL;
q 206 dev/raidframe/rf_fifo.c if (lp == q->lq_tail)
q 208 dev/raidframe/rf_fifo.c q->lq_tail = pt;
q 211 dev/raidframe/rf_fifo.c q->lq_count--;
q 213 dev/raidframe/rf_fifo.c if (q->hq_tail) {
q 214 dev/raidframe/rf_fifo.c q->hq_tail->next = lp;
q 215 dev/raidframe/rf_fifo.c q->hq_tail = lp;
q 219 dev/raidframe/rf_fifo.c q->hq_head = q->hq_tail = lp;
q 221 dev/raidframe/rf_fifo.c q->hq_count++;
q 228 dev/raidframe/rf_fifo.c lp = (pt) ? pt->next : q->lq_head;
q 76 dev/raidframe/rf_geniq.c unsigned int m, p, q;
q 142 dev/raidframe/rf_geniq.c for (q = 0; q < 32; q++) {
q 157 dev/raidframe/rf_geniq.c q)
q 163 dev/raidframe/rf_geniq.c i, j, p, q);
q 762 dev/raidframe/rf_pq.c unsigned int *q = &(rf_qfor[28 - coeff][0]);
q 778 dev/raidframe/rf_pq.c a1 = q[a1];
q 779 dev/raidframe/rf_pq.c a2 = q[a2];
q 783 dev/raidframe/rf_pq.c a1 = q[a1];
q 784 dev/raidframe/rf_pq.c a2 = q[a2];
q 788 dev/raidframe/rf_pq.c a1 = q[a1];
q 789 dev/raidframe/rf_pq.c a2 = q[a2];
q 794 dev/raidframe/rf_pq.c a1 = q[a1];
q 795 dev/raidframe/rf_pq.c a2 = q[a2];
q 799 dev/raidframe/rf_pq.c a1 = q[a1];
q 800 dev/raidframe/rf_pq.c a2 = q[a2];
q 804 dev/raidframe/rf_pq.c a1 = q[a1];
q 805 dev/raidframe/rf_pq.c a2 = q[a2];
q 832 dev/raidframe/rf_pq.c unsigned int *q = &(rf_qfor[28 - coeff][0]);
q 836 dev/raidframe/rf_pq.c q = NULL; /* XXX For now */
q 856 dev/raidframe/rf_pq.c a1 = q[a1];
q 857 dev/raidframe/rf_pq.c a2 = q[a2];
q 861 dev/raidframe/rf_pq.c a1 = q[a1];
q 862 dev/raidframe/rf_pq.c a2 = q[a2];
q 866 dev/raidframe/rf_pq.c a1 = q[a1];
q 867 dev/raidframe/rf_pq.c a2 = q[a2];
q 871 dev/raidframe/rf_pq.c a1 = q[a1];
q 872 dev/raidframe/rf_pq.c a2 = q[a2];
q 877 dev/raidframe/rf_pq.c a1 = q[a1];
q 878 dev/raidframe/rf_pq.c a2 = q[a2];
q 882 dev/raidframe/rf_pq.c a1 = q[a1];
q 883 dev/raidframe/rf_pq.c a2 = q[a2];
q 887 dev/raidframe/rf_pq.c a1 = q[a1];
q 888 dev/raidframe/rf_pq.c a2 = q[a2];
q 915 dev/raidframe/rf_pq.c unsigned long p, q, a, a0, a1;
q 922 dev/raidframe/rf_pq.c q = *qbuf++;
q 924 dev/raidframe/rf_pq.c a1 = EXTRACT(q, 0);
q 930 dev/raidframe/rf_pq.c a1 = EXTRACT(q, i); \
q 979 dev/raidframe/rf_pq.c unsigned int *q = &(rf_qfor[3 + coeff][0]);
q 988 dev/raidframe/rf_pq.c a1 = r ^ q[a1];
q 989 dev/raidframe/rf_pq.c a2 = r ^ q[a2];
q 996 dev/raidframe/rf_pq.c a1 = r ^ q[a1]; \
q 997 dev/raidframe/rf_pq.c a2 = r ^ q[a2]; \
q 292 dev/raidframe/rf_reconmap.c RF_ReconMapListElem_t *p, *q;
q 303 dev/raidframe/rf_reconmap.c q = p;
q 305 dev/raidframe/rf_reconmap.c RF_Free(q, sizeof(*q));
q 758 dev/raidframe/rf_stripelocks.c RF_LockReqDesc_t *q;
q 774 dev/raidframe/rf_stripelocks.c for (did = 1, j = 0, q = p->granted; q;
q 775 dev/raidframe/rf_stripelocks.c j++, q = q->next) {
q 776 dev/raidframe/rf_stripelocks.c printf(" %c(%ld-%ld", q->type,
q 777 dev/raidframe/rf_stripelocks.c (long) q->start, (long) q->stop);
q 778 dev/raidframe/rf_stripelocks.c if (q->start2 != -1)
q 780 dev/raidframe/rf_stripelocks.c (long) q->start2,
q 781 dev/raidframe/rf_stripelocks.c (long) q->stop2);
q 797 dev/raidframe/rf_stripelocks.c for (did = 1, j = 0, q = p->waitersH; q;
q 798 dev/raidframe/rf_stripelocks.c j++, q = q->next) {
q 799 dev/raidframe/rf_stripelocks.c printf("%c(%ld-%ld", q->type,
q 800 dev/raidframe/rf_stripelocks.c (long) q->start, (long) q->stop);
q 801 dev/raidframe/rf_stripelocks.c if (q->start2 != -1)
q 803 dev/raidframe/rf_stripelocks.c (long) q->start2,
q 804 dev/raidframe/rf_stripelocks.c (long) q->stop2);
q 417 dev/sdmmc/sbt.c sbt_start(struct hci_unit *unit, struct ifqueue *q, int xmit)
q 426 dev/sdmmc/sbt.c if (sc->sc_dying || IF_IS_EMPTY(q))
q 429 dev/sdmmc/sbt.c IF_DEQUEUE(q, m);
q 285 dev/sequencer.c struct sequencer_queue *q = &sc->outq;
q 290 dev/sequencer.c DPRINTFN(4, ("seq_startoutput: %p, len=%d\n", sc, SEQ_QLEN(q)));
q 291 dev/sequencer.c while(!SEQ_QEMPTY(q) && !sc->timeout) {
q 292 dev/sequencer.c SEQ_QGET(q, cmd);
q 323 dev/sequencer.c struct sequencer_queue *q = &sc->inq;
q 328 dev/sequencer.c if (SEQ_QFULL(q))
q 330 dev/sequencer.c SEQ_QPUT(q, *cmd);
q 374 dev/sequencer.c struct sequencer_queue *q = &sc->inq;
q 387 dev/sequencer.c while (SEQ_QEMPTY(q)) {
q 397 dev/sequencer.c while (uio->uio_resid >= sizeof ev && !error && !SEQ_QEMPTY(q)) {
q 398 dev/sequencer.c SEQ_QGET(q, ev);
q 409 dev/sequencer.c struct sequencer_queue *q = &sc->outq;
q 432 dev/sequencer.c while (SEQ_QFULL(q)) {
q 434 dev/sequencer.c if (SEQ_QFULL(q)) {
q 442 dev/sequencer.c SEQ_QPUT(q, cmdbuf);
q 57 dev/sequencervar.h #define SEQ_QINIT(q) ((q)->in = (q)->out = (q)->count = 0)
q 58 dev/sequencervar.h #define SEQ_QEMPTY(q) ((q)->count == 0)
q 59 dev/sequencervar.h #define SEQ_QFULL(q) ((q)->count >= SEQ_MAXQ)
q 60 dev/sequencervar.h #define SEQ_QPUT(q, e) ((q)->buf[(q)->in++] = (e), (q)->in %= SEQ_MAXQ, (q)->count++)
q 61 dev/sequencervar.h #define SEQ_QGET(q, e) ((e) = (q)->buf[(q)->out++], (q)->out %= SEQ_MAXQ, (q)->count--)
q 62 dev/sequencervar.h #define SEQ_QLEN(q) ((q)->count)
q 472 dev/systrace.c struct proc *q = strp->proc;
q 475 dev/systrace.c psignal(q, SIGKILL);
q 99 dev/usb/ehci.c #define bitmask_snprintf(q,f,b,l) snprintf((b), (l), "%b", (q), (f))
q 78 dev/usb/ohci.c #define bitmask_snprintf(q,f,b,l) snprintf((b), (l), "%b", (q), (f))
q 87 dev/usb/ugen.c struct clist q;
q 347 dev/usb/ugen.c if (clalloc(&sce->q, UGEN_IBSIZE, 0) == -1)
q 356 dev/usb/ugen.c clfree(&sce->q);
q 464 dev/usb/ugen.c ndflush(&sce->q, sce->q.c_cc);
q 465 dev/usb/ugen.c clfree(&sce->q);
q 478 dev/usb/ugen.c clfree(&sce->q);
q 521 dev/usb/ugen.c while (sce->q.c_cc == 0) {
q 545 dev/usb/ugen.c while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) {
q 546 dev/usb/ugen.c n = min(sce->q.c_cc, uio->uio_resid);
q 551 dev/usb/ugen.c q_to_b(&sce->q, buffer, n);
q 836 dev/usb/ugen.c (void)b_to_q(ibuf, count, &sce->q);
q 1325 dev/usb/ugen.c if (sce->q.c_cc > 0)
q 1376 dev/usb/ugen.c kn->kn_data = sce->q.c_cc;
q 86 dev/usb/uhci.c #define bitmask_snprintf(q,f,b,l) snprintf((b), (l), "%b", (q), (f))
q 668 dev/usb/ulpt.c char *p, *q;
q 676 dev/usb/ulpt.c q = strchr(p, ';');
q 677 dev/usb/ulpt.c if (q)
q 678 dev/usb/ulpt.c printf("%.*s", (int)(q - p + 1), p);
q 657 dev/usb/umidi.c struct umidi_endpoint *p, *q, *lowest, *endep, tmpep;
q 714 dev/usb/umidi.c for (q=p+1; q<endep; q++) {
q 716 dev/usb/umidi.c UE_GET_DIR(q->addr)==UE_DIR_OUT) ||
q 718 dev/usb/umidi.c UE_GET_DIR(q->addr)) &&
q 720 dev/usb/umidi.c UE_GET_ADDR(q->addr))))
q 721 dev/usb/umidi.c lowest = q;
q 383 dev/usb/umidi_quirks.c struct umq_data *q;
q 397 dev/usb/umidi_quirks.c for (q=p->quirks; q->type; q++)
q 398 dev/usb/umidi_quirks.c p->type_mask |= 1<<(q->type-1);
q 414 dev/usb/umidi_quirks.c umidi_print_quirk(struct umidi_quirk *q)
q 417 dev/usb/umidi_quirks.c if (q) {
q 419 dev/usb/umidi_quirks.c for (qd=q->quirks; qd->type; qd++)
q 428 dev/usb/umidi_quirks.c umidi_get_quirk_data_from_type(struct umidi_quirk *q, u_int32_t type)
q 431 dev/usb/umidi_quirks.c if (q) {
q 432 dev/usb/umidi_quirks.c for (qd=q->quirks; qd->type; qd++)
q 62 dev/usb/umidi_quirks.h #define UMQ_ISTYPE(q, type) \
q 63 dev/usb/umidi_quirks.h ((q)->sc_quirk && ((q)->sc_quirk->type_mask & (1<<((type)-1))))
q 208 dev/usb/usb_subr.c char *q, *e;
q 212 dev/usb/usb_subr.c q = e = p;
q 213 dev/usb/usb_subr.c while (*q == ' ') /* skip leading spaces */
q 214 dev/usb/usb_subr.c q++;
q 215 dev/usb/usb_subr.c while ((*p = *q++)) /* copy string */
q 99 dev/wscons/wsevent.c if (ev->q != NULL) {
q 106 dev/wscons/wsevent.c ev->q = malloc((u_long)WSEVENT_QSIZE * sizeof(struct wscons_event),
q 108 dev/wscons/wsevent.c bzero((caddr_t)ev->q, WSEVENT_QSIZE * sizeof(struct wscons_event));
q 117 dev/wscons/wsevent.c if (ev->q == NULL) {
q 123 dev/wscons/wsevent.c free(ev->q, M_DEVBUF);
q 124 dev/wscons/wsevent.c ev->q = NULL;
q 167 dev/wscons/wsevent.c error = uiomove((caddr_t)&ev->q[ev->get],
q 180 dev/wscons/wsevent.c error = uiomove((caddr_t)&ev->q[0],
q 90 dev/wscons/wseventvar.h struct wscons_event *q; /* circular buffer (queue) of events */
q 687 dev/wscons/wskbd.c if (evar->q == NULL) {
q 694 dev/wscons/wskbd.c ev = &evar->q[put];
q 308 dev/wscons/wsmouse.c if (evar->q == NULL) {
q 340 dev/wscons/wsmouse.c ev = &evar->q[put];
q 353 dev/wscons/wsmouse.c ev = &evar->q[0]; \
q 405 dev/wscons/wsmux.c ev = &evar->q[put];
q 116 kern/kern_exit.c struct proc *q, *nq;
q 145 kern/kern_exit.c q = TAILQ_FIRST(&p->p_p->ps_threads);
q 146 kern/kern_exit.c for (; q != NULL; q = nq) {
q 147 kern/kern_exit.c nq = TAILQ_NEXT(q, p_thr_link);
q 148 kern/kern_exit.c atomic_setbits_int(&q->p_flag, P_IGNEXITRV);
q 149 kern/kern_exit.c q->p_xstat = rv;
q 150 kern/kern_exit.c psignal(q, SIGKILL);
q 254 kern/kern_exit.c q = LIST_FIRST(&p->p_children);
q 255 kern/kern_exit.c if (q) /* only need this if any child is S_ZOMB */
q 257 kern/kern_exit.c for (; q != 0; q = nq) {
q 258 kern/kern_exit.c nq = LIST_NEXT(q, p_sibling);
q 259 kern/kern_exit.c proc_reparent(q, initproc);
q 264 kern/kern_exit.c if (q->p_flag & P_TRACED) {
q 265 kern/kern_exit.c atomic_clearbits_int(&q->p_flag, P_TRACED);
q 266 kern/kern_exit.c psignal(q, SIGKILL);
q 446 kern/kern_exit.c sys_wait4(struct proc *q, void *v, register_t *retval)
q 459 kern/kern_exit.c SCARG(uap, pid) = -q->p_pgid;
q 465 kern/kern_exit.c LIST_FOREACH(p, &q->p_children, p_sibling) {
q 510 kern/kern_exit.c scheduler_wait_hook(q, p);
q 512 kern/kern_exit.c ruadd(&q->p_stats->p_cru, p->p_ru);
q 550 kern/kern_exit.c if ((error = tsleep(q, PWAIT | PCATCH, "wait", 0)) != 0)
q 93 kern/kern_sig.c cansignal(struct proc *p, struct pcred *pc, struct proc *q, int signum)
q 98 kern/kern_sig.c if (p == q)
q 103 kern/kern_sig.c if (q->p_flag & P_THREAD) {
q 104 kern/kern_sig.c return (p->p_p == q->p_p);
q 108 kern/kern_sig.c if (signum == SIGCONT && q->p_session == p->p_session)
q 115 kern/kern_sig.c if (q->p_flag & P_SUGID) {
q 129 kern/kern_sig.c if (pc->p_ruid == q->p_cred->p_ruid ||
q 130 kern/kern_sig.c pc->pc_ucred->cr_uid == q->p_cred->p_ruid ||
q 131 kern/kern_sig.c pc->p_ruid == q->p_ucred->cr_uid ||
q 132 kern/kern_sig.c pc->pc_ucred->cr_uid == q->p_ucred->cr_uid)
q 142 kern/kern_sig.c if (pc->p_ruid == q->p_cred->p_ruid ||
q 143 kern/kern_sig.c pc->p_ruid == q->p_cred->p_svuid ||
q 144 kern/kern_sig.c pc->pc_ucred->cr_uid == q->p_cred->p_ruid ||
q 145 kern/kern_sig.c pc->pc_ucred->cr_uid == q->p_cred->p_svuid ||
q 146 kern/kern_sig.c pc->p_ruid == q->p_ucred->cr_uid ||
q 147 kern/kern_sig.c pc->pc_ucred->cr_uid == q->p_ucred->cr_uid)
q 782 kern/kern_sig.c struct proc *q;
q 796 kern/kern_sig.c TAILQ_FOREACH(q, &p->p_p->ps_threads, p_thr_link) {
q 797 kern/kern_sig.c if (q == p)
q 799 kern/kern_sig.c if (q->p_sigdivert & (1 << signum)) {
q 800 kern/kern_sig.c q->p_sigdivert = 0;
q 801 kern/kern_sig.c psignal(q, signum);
q 850 kern/kern_sig.c TAILQ_FOREACH(q, &p->p_p->ps_threads, p_thr_link) {
q 851 kern/kern_sig.c if (q != p)
q 852 kern/kern_sig.c psignal(q, signum);
q 861 kern/kern_sig.c TAILQ_FOREACH(q, &p->p_p->ps_threads, p_thr_link) {
q 862 kern/kern_sig.c if (q != p)
q 863 kern/kern_sig.c psignal(q, signum);
q 320 kern/kern_synch.c struct proc *p, **q;
q 326 kern/kern_synch.c for (q = &qp->sq_head; (p = *q) != NULL; ) {
q 336 kern/kern_synch.c *q = p->p_forw;
q 338 kern/kern_synch.c qp->sq_tailp = q;
q 366 kern/kern_synch.c q = &p->p_forw;
q 424 kern/kern_synch.c struct proc *q;
q 427 kern/kern_synch.c TAILQ_FOREACH(q, &p->p_p->ps_threads, p_thr_link) {
q 428 kern/kern_synch.c if (q->p_thrslpid == ident) {
q 429 kern/kern_synch.c wakeup(&q->p_thrslpid);
q 430 kern/kern_synch.c q->p_thrslpid = 0;
q 217 kern/tty.c #define FLUSHQ(q) { \
q 218 kern/tty.c if ((q)->c_cc) \
q 219 kern/tty.c ndflush(q, (q)->c_cc); \
q 209 kern/uipc_socket2.c soqinsque(struct socket *head, struct socket *so, int q)
q 218 kern/uipc_socket2.c if (q == 0) {
q 229 kern/uipc_socket2.c soqremque(struct socket *so, int q)
q 234 kern/uipc_socket2.c if (q == 0) {
q 144 kern/vfs_bio.c int i = 0, q;
q 155 kern/vfs_bio.c q = (i + 1 - PAGE_SHIFT); /* XXX 4096 is queue 1 */
q 156 kern/vfs_bio.c if (q >= BQUEUES)
q 157 kern/vfs_bio.c panic("queue %d > BQUEUES %d", q, BQUEUES);
q 158 kern/vfs_bio.c if (q == 0)
q 160 kern/vfs_bio.c return(q);
q 993 kern/vfs_bio.c int q, gotsome = 0;
q 995 kern/vfs_bio.c for (q = 1; q < BQUEUES; q++) {
q 997 kern/vfs_bio.c while (bqpages[q] > bqpagelow
q 998 kern/vfs_bio.c && (bp = TAILQ_FIRST(&bufqueues[q]))
q 50 lib/libkern/adddi3.c aa.q = a;
q 51 lib/libkern/adddi3.c bb.q = b;
q 54 lib/libkern/adddi3.c return (sum.q);
q 48 lib/libkern/anddi3.c aa.q = a;
q 49 lib/libkern/anddi3.c bb.q = b;
q 52 lib/libkern/anddi3.c return (aa.q);
q 51 lib/libkern/ashldi3.c aa.q = a;
q 60 lib/libkern/ashldi3.c return (aa.q);
q 50 lib/libkern/ashrdi3.c aa.q = a;
q 72 lib/libkern/ashrdi3.c return (aa.q);
q 50 lib/libkern/cmpdi2.c aa.q = a;
q 51 lib/libkern/cmpdi2.c bb.q = b;
q 48 lib/libkern/iordi3.c aa.q = a;
q 49 lib/libkern/iordi3.c bb.q = b;
q 52 lib/libkern/iordi3.c return (aa.q);
q 51 lib/libkern/lshldi3.c aa.q = a;
q 60 lib/libkern/lshldi3.c return (aa.q);
q 50 lib/libkern/lshrdi3.c aa.q = a;
q 59 lib/libkern/lshrdi3.c return (aa.q);
q 117 lib/libkern/muldi3.c u.q = a, negall = 0;
q 119 lib/libkern/muldi3.c u.q = -a, negall = 1;
q 121 lib/libkern/muldi3.c v.q = b;
q 123 lib/libkern/muldi3.c v.q = -b, negall ^= 1;
q 131 lib/libkern/muldi3.c prod.q = __lmulq(u0, v0);
q 139 lib/libkern/muldi3.c low.q = __lmulq(u0, v0);
q 160 lib/libkern/muldi3.c return (negall ? -prod.q : prod.q);
q 241 lib/libkern/muldi3.c return (prod.q);
q 48 lib/libkern/negdi2.c aa.q = a;
q 51 lib/libkern/negdi2.c return (res.q);
q 49 lib/libkern/notdi2.c aa.q = a;
q 52 lib/libkern/notdi2.c return (aa.q);
q 71 lib/libkern/qdivrem.c digit *u, *v, *q;
q 87 lib/libkern/qdivrem.c return (tmp.q);
q 96 lib/libkern/qdivrem.c q = &qspace[0];
q 147 lib/libkern/qdivrem.c return (tmp.q);
q 159 lib/libkern/qdivrem.c q[i] = 0;
q 160 lib/libkern/qdivrem.c q += 4 - m;
q 238 lib/libkern/qdivrem.c q[j] = (digit)qhat;
q 255 lib/libkern/qdivrem.c *arq = tmp.q;
q 260 lib/libkern/qdivrem.c return (tmp.q);
q 64 lib/libkern/quad.h quad_t q; /* as a (signed) quad */
q 1990 lib/libkern/softfloat.c bits32 q;
q 2033 lib/libkern/softfloat.c q = ( bSig <= aSig );
q 2034 lib/libkern/softfloat.c if ( q ) aSig -= bSig;
q 2036 lib/libkern/softfloat.c q = ( ( (bits64) aSig )<<32 ) / bSig;
q 2037 lib/libkern/softfloat.c q >>= 32 - expDiff;
q 2039 lib/libkern/softfloat.c aSig = ( ( aSig>>1 )<<( expDiff - 1 ) ) - bSig * q;
q 2060 lib/libkern/softfloat.c q = q64>>( 64 - expDiff );
q 2062 lib/libkern/softfloat.c aSig = ( ( aSig64>>33 )<<( expDiff - 1 ) ) - bSig * q;
q 2066 lib/libkern/softfloat.c ++q;
q 2070 lib/libkern/softfloat.c if ( ( sigMean < 0 ) || ( ( sigMean == 0 ) && ( q & 1 ) ) ) {
q 2960 lib/libkern/softfloat.c bits64 q, alternateASig;
q 2998 lib/libkern/softfloat.c q = ( bSig <= aSig );
q 2999 lib/libkern/softfloat.c if ( q ) aSig -= bSig;
q 3002 lib/libkern/softfloat.c q = estimateDiv128To64( aSig, 0, bSig );
q 3003 lib/libkern/softfloat.c q = ( 2 < q ) ? q - 2 : 0;
q 3004 lib/libkern/softfloat.c aSig = - ( ( bSig>>2 ) * q );
q 3009 lib/libkern/softfloat.c q = estimateDiv128To64( aSig, 0, bSig );
q 3010 lib/libkern/softfloat.c q = ( 2 < q ) ? q - 2 : 0;
q 3011 lib/libkern/softfloat.c q >>= 64 - expDiff;
q 3013 lib/libkern/softfloat.c aSig = ( ( aSig>>1 )<<( expDiff - 1 ) ) - bSig * q;
q 3021 lib/libkern/softfloat.c ++q;
q 3025 lib/libkern/softfloat.c if ( ( sigMean < 0 ) || ( ( sigMean == 0 ) && ( q & 1 ) ) ) {
q 3900 lib/libkern/softfloat.c bits64 q, term0, term1, alternateASig0, alternateASig1;
q 3943 lib/libkern/softfloat.c q = ( bSig <= aSig0 );
q 3944 lib/libkern/softfloat.c if ( q ) aSig0 -= bSig;
q 3947 lib/libkern/softfloat.c q = estimateDiv128To64( aSig0, aSig1, bSig );
q 3948 lib/libkern/softfloat.c q = ( 2 < q ) ? q - 2 : 0;
q 3949 lib/libkern/softfloat.c mul64To128( bSig, q, &term0, &term1 );
q 3956 lib/libkern/softfloat.c q = estimateDiv128To64( aSig0, aSig1, bSig );
q 3957 lib/libkern/softfloat.c q = ( 2 < q ) ? q - 2 : 0;
q 3958 lib/libkern/softfloat.c q >>= 64 - expDiff;
q 3959 lib/libkern/softfloat.c mul64To128( bSig, q<<( 64 - expDiff ), &term0, &term1 );
q 3963 lib/libkern/softfloat.c ++q;
q 3974 lib/libkern/softfloat.c && ( q & 1 ) )
q 5030 lib/libkern/softfloat.c bits64 aSig0, aSig1, bSig0, bSig1, q, term0, term1, term2;
q 5079 lib/libkern/softfloat.c q = le128( bSig0, bSig1, aSig0, aSig1 );
q 5080 lib/libkern/softfloat.c if ( q ) sub128( aSig0, aSig1, bSig0, bSig1, &aSig0, &aSig1 );
q 5083 lib/libkern/softfloat.c q = estimateDiv128To64( aSig0, aSig1, bSig0 );
q 5084 lib/libkern/softfloat.c q = ( 4 < q ) ? q - 4 : 0;
q 5085 lib/libkern/softfloat.c mul128By64To192( bSig0, bSig1, q, &term0, &term1, &term2 );
q 5092 lib/libkern/softfloat.c q = estimateDiv128To64( aSig0, aSig1, bSig0 );
q 5093 lib/libkern/softfloat.c q = ( 4 < q ) ? q - 4 : 0;
q 5094 lib/libkern/softfloat.c q >>= - expDiff;
q 5103 lib/libkern/softfloat.c mul128By64To192( bSig0, bSig1, q, &term0, &term1, &term2 );
q 5113 lib/libkern/softfloat.c ++q;
q 5119 lib/libkern/softfloat.c || ( ( ( sigMean0 | sigMean1 ) == 0 ) && ( q & 1 ) ) ) {
q 49 lib/libkern/subdi3.c aa.q = a;
q 50 lib/libkern/subdi3.c bb.q = b;
q 53 lib/libkern/subdi3.c return (diff.q);
q 48 lib/libkern/xordi3.c aa.q = a;
q 49 lib/libkern/xordi3.c bb.q = b;
q 52 lib/libkern/xordi3.c return (aa.q);
q 469 lib/libz/inflate.c #define REVERSE(q) \
q 470 lib/libz/inflate.c ((((q) >> 24) & 0xff) + (((q) >> 8) & 0xff00) + \
q 471 lib/libz/inflate.c (((q) & 0xff00) << 8) + (((q) & 0xff) << 24))
q 226 net/if.c caddr_t q;
q 237 net/if.c q = (caddr_t)malloc(n, M_IFADDR, M_WAITOK);
q 238 net/if.c bzero(q, n);
q 240 net/if.c bcopy((caddr_t)ifnet_addrs, q, m);
q 243 net/if.c ifnet_addrs = (struct ifaddr **)q;
q 248 net/if.c q = (caddr_t)malloc(n, M_IFADDR, M_WAITOK);
q 249 net/if.c bzero(q, n);
q 251 net/if.c bcopy((caddr_t)ifindex2ifnet, q, m);
q 254 net/if.c ifindex2ifnet = (struct ifnet **)q;
q 611 net/if.c if_detach_queues(struct ifnet *ifp, struct ifqueue *q)
q 616 net/if.c for (m = q->ifq_head; m; m = next) {
q 632 net/if.c q->ifq_head = m->m_nextpkt;
q 633 net/if.c if (q->ifq_tail == m)
q 634 net/if.c q->ifq_tail = prev;
q 635 net/if.c q->ifq_len--;
q 639 net/if.c IF_DROP(q);
q 1682 net/if_bridge.c struct bridge_rtnode *p, *q;
q 1711 net/if_bridge.c q = p;
q 1714 net/if_bridge.c dir = memcmp(ea, &q->brt_addr, sizeof(q->brt_addr));
q 1717 net/if_bridge.c q->brt_if = ifp;
q 1718 net/if_bridge.c q->brt_flags = flags;
q 1719 net/if_bridge.c } else if (!(q->brt_flags & IFBAF_STATIC))
q 1720 net/if_bridge.c q->brt_if = ifp;
q 1722 net/if_bridge.c if (q->brt_if == ifp)
q 1723 net/if_bridge.c q->brt_age = 1;
q 1724 net/if_bridge.c ifp = q->brt_if;
q 1745 net/if_bridge.c LIST_INSERT_BEFORE(q, p, brt_next);
q 1766 net/if_bridge.c LIST_INSERT_AFTER(q, p, brt_next);
q 849 net/if_spppsubr.c struct sppp **q, *p, *sp = (struct sppp*) ifp;
q 853 net/if_spppsubr.c for (q = &spppq; (p = *q); q = &p->pp_next)
q 855 net/if_spppsubr.c *q = p->pp_next;
q 201 net/if_strip.c #define CCOUNT(q) ((q)->c_cc)
q 600 net/pf_if.c pfi_if_compare(struct pfi_kif *p, struct pfi_kif *q)
q 602 net/pf_if.c return (strncmp(p->pfik_name, q->pfik_name, IFNAMSIZ));
q 185 net/pf_ruleset.c char *p, *q, *r;
q 199 net/pf_ruleset.c while (parent == NULL && (q = strrchr(p, '/')) != NULL) {
q 200 net/pf_ruleset.c *q = 0;
q 206 net/pf_ruleset.c if (q == NULL)
q 207 net/pf_ruleset.c q = p;
q 209 net/pf_ruleset.c q++;
q 211 net/pf_ruleset.c if (!*q) {
q 215 net/pf_ruleset.c while ((r = strchr(q, '/')) != NULL || *q) {
q 218 net/pf_ruleset.c if (!*q || strlen(q) >= PF_ANCHOR_NAME_SIZE ||
q 231 net/pf_ruleset.c strlcpy(anchor->name, q, sizeof(anchor->name));
q 266 net/pf_ruleset.c q = r + 1;
q 268 net/pf_ruleset.c *q = 0;
q 244 net/pf_table.c struct pfr_kentry *p, *q;
q 268 net/pf_table.c q = pfr_lookup_addr(tmpkt, &ad, 1);
q 270 net/pf_table.c if (q != NULL)
q 279 net/pf_table.c if (p == NULL && q == NULL) {
q 415 net/pf_table.c struct pfr_kentry *p, *q;
q 456 net/pf_table.c q = pfr_lookup_addr(tmpkt, &ad, 1);
q 457 net/pf_table.c if (q != NULL) {
q 831 net/pf_table.c struct pfr_kentry *p, *q;
q 833 net/pf_table.c for (p = SLIST_FIRST(workq); p != NULL; p = q) {
q 834 net/pf_table.c q = SLIST_NEXT(p, pfrke_workq);
q 1162 net/pf_table.c struct pfr_ktable *p, *q, *r, key;
q 1181 net/pf_table.c SLIST_FOREACH(q, &addq, pfrkt_workq) {
q 1182 net/pf_table.c if (!pfr_ktable_compare(p, q))
q 1197 net/pf_table.c SLIST_FOREACH(q, &addq, pfrkt_workq) {
q 1198 net/pf_table.c if (!pfr_ktable_compare(&key, q)) {
q 1199 net/pf_table.c p->pfrkt_root = q;
q 1210 net/pf_table.c SLIST_FOREACH(q, &changeq, pfrkt_workq)
q 1211 net/pf_table.c if (!pfr_ktable_compare(&key, q))
q 1242 net/pf_table.c struct pfr_ktable *p, *q, key;
q 1255 net/pf_table.c SLIST_FOREACH(q, &workq, pfrkt_workq)
q 1256 net/pf_table.c if (!pfr_ktable_compare(p, q))
q 1401 net/pf_table.c struct pfr_ktable *p, *q, key;
q 1422 net/pf_table.c SLIST_FOREACH(q, &workq, pfrkt_workq)
q 1423 net/pf_table.c if (!pfr_ktable_compare(p, q))
q 1618 net/pf_table.c struct pfr_ktable *p, *q;
q 1644 net/pf_table.c for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
q 1645 net/pf_table.c q = SLIST_NEXT(p, pfrkt_workq);
q 1673 net/pf_table.c struct pfr_kentry *p, *q, *next;
q 1686 net/pf_table.c q = pfr_lookup_addr(kt, &ad, 1);
q 1687 net/pf_table.c if (q != NULL) {
q 1688 net/pf_table.c if (q->pfrke_not != p->pfrke_not)
q 1689 net/pf_table.c SLIST_INSERT_HEAD(&changeq, q,
q 1691 net/pf_table.c q->pfrke_mark = 1;
q 1817 net/pf_table.c struct pfr_ktable *p, *q;
q 1819 net/pf_table.c for (p = SLIST_FIRST(workq); p; p = q) {
q 1820 net/pf_table.c q = SLIST_NEXT(p, pfrkt_workq);
q 1921 net/pf_table.c struct pfr_ktable *p, *q;
q 1923 net/pf_table.c for (p = SLIST_FIRST(workq); p; p = q) {
q 1924 net/pf_table.c q = SLIST_NEXT(p, pfrkt_workq);
q 1953 net/pf_table.c pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
q 1957 net/pf_table.c if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
q 1959 net/pf_table.c return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
q 170 net/ppp_tty.c #define CCOUNT(q) ((q)->c_cc)
q 615 net/ppp_tty.c u_char *p, *q;
q 642 net/ppp_tty.c for (q = endseq; q < p; ++q)
q 643 net/ppp_tty.c if (putc(*q, &tp->t_outq)) {
q 645 net/ppp_tty.c for (; q > endseq; --q)
q 651 net/ppp_tty.c sc->sc_stats.ppp_obytes += q - endseq;
q 125 net/radix_mpath.c char *p, *q, *eq;
q 168 net/radix_mpath.c q = (char *)netmask;
q 169 net/radix_mpath.c if (bcmp(p + skip, q + skip, l - skip))
q 176 net/radix_mpath.c q += l;
q 178 net/radix_mpath.c while (eq > q)
q 179 net/radix_mpath.c if (*q++) {
q 3112 net/zlib.c #define UPDOUT {s->write=q;}
q 3122 net/zlib.c #define WAVAIL (q<s->read?s->read-q-1:s->end-q)
q 3123 net/zlib.c #define LOADOUT {q=s->write;m=WAVAIL;}
q 3124 net/zlib.c #define WRAP {if(q==s->end&&s->read!=s->window){q=s->window;m=WAVAIL;}}
q 3127 net/zlib.c #define OUTBYTE(a) {*q++=(Byte)(a);m--;}
q 3279 net/zlib.c Bytef *q; /* output window write pointer */
q 3357 net/zlib.c zmemcpy(q, p, t);
q 3359 net/zlib.c q += t; m -= t;
q 3363 net/zlib.c z->total_out + (q >= s->read ? q - s->read :
q 3364 net/zlib.c (s->end - s->read) + (q - s->window))));
q 3505 net/zlib.c z->total_out + (q >= s->read ? q - s->read :
q 3506 net/zlib.c (s->end - s->read) + (q - s->window))));
q 3567 net/zlib.c Bytef *q; /* output window write pointer */
q 3586 net/zlib.c s->check = (*s->checkfn)(s->check, q, t);
q 3587 net/zlib.c zmemcpy(q, p, t);
q 3588 net/zlib.c q += t;
q 3592 net/zlib.c s->read = q; /* drag read pointer forward */
q 3594 net/zlib.c if (q == s->end) {
q 3595 net/zlib.c s->read = q = s->window;
q 3647 net/zlib.c voidpf q, /* opaque pointer (not used) */
q 3735 net/zlib.c inflate_huft *q; /* points to current table */
q 3812 net/zlib.c q = (inflate_huft *)Z_NULL; /* ditto */
q 3845 net/zlib.c if ((q = (inflate_huft *)ZALLOC
q 3852 net/zlib.c q->word.Nalloc = z + 1;
q 3856 net/zlib.c *t = q + 1; /* link to list for huft_free() */
q 3857 net/zlib.c *(t = &(q->next)) = Z_NULL;
q 3858 net/zlib.c u[h] = ++q; /* table starts after link */
q 3866 net/zlib.c r.next = q; /* pointer to this table */
q 3890 net/zlib.c q[j] = r;
q 3995 net/zlib.c local voidpf falloc(q, n, s)
q 3996 net/zlib.c voidpf q; /* opaque pointer (not used) */
q 4002 net/zlib.c if (q) s++; /* to make some compilers happy */
q 4008 net/zlib.c local void ffree(q, p, n)
q 4009 net/zlib.c voidpf q;
q 4014 net/zlib.c if (q) q = p; /* to make some compilers happy */
q 4075 net/zlib.c inflate_huft *p, *q;
q 4081 net/zlib.c q = (--p)->next;
q 4083 net/zlib.c p = q;
q 4173 net/zlib.c Bytef *q; /* output window write pointer */
q 4280 net/zlib.c f = (uInt)(q - s->window) < c->sub.copy.dist ?
q 4281 net/zlib.c s->end - (c->sub.copy.dist - (q - s->window)) :
q 4282 net/zlib.c q - c->sub.copy.dist;
q 4284 net/zlib.c f = q - c->sub.copy.dist;
q 4285 net/zlib.c if ((uInt)(q - s->window) < c->sub.copy.dist)
q 4286 net/zlib.c f = s->end - (c->sub.copy.dist - (q - s->window));
q 4342 net/zlib.c Bytef *p, *q;
q 4346 net/zlib.c q = s->read;
q 4349 net/zlib.c n = (uInt)((q <= s->write ? s->write : s->end) - q);
q 4359 net/zlib.c s->check = (*s->checkfn)(s->check, q, n);
q 4363 net/zlib.c zmemcpy(p, q, n);
q 4366 net/zlib.c q += n;
q 4369 net/zlib.c if (q == s->end)
q 4372 net/zlib.c q = s->window;
q 4377 net/zlib.c n = (uInt)(s->write - q);
q 4387 net/zlib.c s->check = (*s->checkfn)(s->check, q, n);
q 4391 net/zlib.c zmemcpy(p, q, n);
q 4394 net/zlib.c q += n;
q 4399 net/zlib.c s->read = q;
q 4439 net/zlib.c Bytef *q; /* output window write pointer */
q 4464 net/zlib.c *q++ = (Byte)t->base;
q 4494 net/zlib.c if ((uInt)(q - s->window) >= d) /* offset before dest */
q 4496 net/zlib.c r = q - d;
q 4497 net/zlib.c *q++ = *r++; c--; /* minimum count is three, */
q 4498 net/zlib.c *q++ = *r++; c--; /* so unroll loop a little */
q 4502 net/zlib.c e = d - (q - s->window); /* bytes from offset to end */
q 4508 net/zlib.c *q++ = *r++;
q 4514 net/zlib.c *q++ = *r++;
q 4538 net/zlib.c *q++ = (Byte)t->base;
q 746 netinet/ip_input.c struct ipqent *nq, *p, *q;
q 798 netinet/ip_input.c for (p = NULL, q = LIST_FIRST(&fp->ipq_fragq);
q 799 netinet/ip_input.c q != LIST_END(&fp->ipq_fragq); p = q, q = LIST_NEXT(q, ipqe_q))
q 800 netinet/ip_input.c if (ntohs(q->ipqe_ip->ip_off) > ntohs(ipqe->ipqe_ip->ip_off))
q 826 netinet/ip_input.c for (; q != NULL &&
q 828 netinet/ip_input.c ntohs(q->ipqe_ip->ip_off); q = nq) {
q 830 netinet/ip_input.c ntohs(ipqe->ipqe_ip->ip_len)) - ntohs(q->ipqe_ip->ip_off);
q 831 netinet/ip_input.c if (i < ntohs(q->ipqe_ip->ip_len)) {
q 832 netinet/ip_input.c q->ipqe_ip->ip_len =
q 833 netinet/ip_input.c htons(ntohs(q->ipqe_ip->ip_len) - i);
q 834 netinet/ip_input.c q->ipqe_ip->ip_off =
q 835 netinet/ip_input.c htons(ntohs(q->ipqe_ip->ip_off) + i);
q 836 netinet/ip_input.c m_adj(q->ipqe_m, i);
q 839 netinet/ip_input.c nq = LIST_NEXT(q, ipqe_q);
q 840 netinet/ip_input.c m_freem(q->ipqe_m);
q 841 netinet/ip_input.c LIST_REMOVE(q, ipqe_q);
q 842 netinet/ip_input.c pool_put(&ipqent_pool, q);
q 857 netinet/ip_input.c for (p = NULL, q = LIST_FIRST(&fp->ipq_fragq);
q 858 netinet/ip_input.c q != LIST_END(&fp->ipq_fragq); p = q, q = LIST_NEXT(q, ipqe_q)) {
q 859 netinet/ip_input.c if (ntohs(q->ipqe_ip->ip_off) != next)
q 861 netinet/ip_input.c next += ntohs(q->ipqe_ip->ip_len);
q 870 netinet/ip_input.c q = LIST_FIRST(&fp->ipq_fragq);
q 871 netinet/ip_input.c ip = q->ipqe_ip;
q 877 netinet/ip_input.c m = q->ipqe_m;
q 881 netinet/ip_input.c nq = LIST_NEXT(q, ipqe_q);
q 882 netinet/ip_input.c pool_put(&ipqent_pool, q);
q 884 netinet/ip_input.c for (q = nq; q != NULL; q = nq) {
q 885 netinet/ip_input.c t = q->ipqe_m;
q 886 netinet/ip_input.c nq = LIST_NEXT(q, ipqe_q);
q 887 netinet/ip_input.c pool_put(&ipqent_pool, q);
q 930 netinet/ip_input.c struct ipqent *q, *p;
q 932 netinet/ip_input.c for (q = LIST_FIRST(&fp->ipq_fragq); q != LIST_END(&fp->ipq_fragq);
q 933 netinet/ip_input.c q = p) {
q 934 netinet/ip_input.c p = LIST_NEXT(q, ipqe_q);
q 935 netinet/ip_input.c m_freem(q->ipqe_m);
q 936 netinet/ip_input.c LIST_REMOVE(q, ipqe_q);
q 937 netinet/ip_input.c pool_put(&ipqent_pool, q);
q 1320 netinet/ip_input.c struct in_addr *p, *q;
q 1356 netinet/ip_input.c q = (struct in_addr *)(mtod(m, caddr_t) +
q 1366 netinet/ip_input.c printf(" %x", ntohl(q->s_addr));
q 1368 netinet/ip_input.c *q++ = *p--;
q 1373 netinet/ip_input.c *q = ip_srcrt.dst;
q 1376 netinet/ip_input.c printf(" %x\n", ntohl(q->s_addr));
q 196 netinet/tcp_input.c struct tcpqent *p, *q, *nq, *tiqe;
q 231 netinet/tcp_input.c for (p = NULL, q = TAILQ_FIRST(&tp->t_segq); q != NULL;
q 232 netinet/tcp_input.c p = q, q = TAILQ_NEXT(q, tcpqe_q))
q 233 netinet/tcp_input.c if (SEQ_GT(q->tcpqe_tcp->th_seq, th->th_seq))
q 267 netinet/tcp_input.c for (; q != NULL; q = nq) {
q 268 netinet/tcp_input.c struct tcphdr *qhdr = q->tcpqe_tcp;
q 276 netinet/tcp_input.c m_adj(q->tcpqe_m, i);
q 279 netinet/tcp_input.c nq = TAILQ_NEXT(q, tcpqe_q);
q 280 netinet/tcp_input.c m_freem(q->tcpqe_m);
q 281 netinet/tcp_input.c TAILQ_REMOVE(&tp->t_segq, q, tcpqe_q);
q 282 netinet/tcp_input.c pool_put(&tcpqe_pool, q);
q 302 netinet/tcp_input.c q = TAILQ_FIRST(&tp->t_segq);
q 303 netinet/tcp_input.c if (q == NULL || q->tcpqe_tcp->th_seq != tp->rcv_nxt)
q 305 netinet/tcp_input.c if (tp->t_state == TCPS_SYN_RECEIVED && q->tcpqe_tcp->th_reseqlen)
q 308 netinet/tcp_input.c tp->rcv_nxt += q->tcpqe_tcp->th_reseqlen;
q 309 netinet/tcp_input.c flags = q->tcpqe_tcp->th_flags & TH_FIN;
q 311 netinet/tcp_input.c nq = TAILQ_NEXT(q, tcpqe_q);
q 312 netinet/tcp_input.c TAILQ_REMOVE(&tp->t_segq, q, tcpqe_q);
q 315 netinet/tcp_input.c m_freem(q->tcpqe_m);
q 317 netinet/tcp_input.c sbappendstream(&so->so_rcv, q->tcpqe_m);
q 318 netinet/tcp_input.c pool_put(&tcpqe_pool, q);
q 319 netinet/tcp_input.c q = nq;
q 320 netinet/tcp_input.c } while (q != NULL && q->tcpqe_tcp->th_seq == tp->rcv_nxt);
q 565 netinet/tcp_subr.c struct sackhole *p, *q;
q 579 netinet/tcp_subr.c q = p = tp->snd_holes;
q 581 netinet/tcp_subr.c q = p->next;
q 583 netinet/tcp_subr.c p = q;
q 176 netinet/tcp_timer.c struct sackhole *p, *q;
q 180 netinet/tcp_timer.c q = tp->snd_holes;
q 181 netinet/tcp_timer.c while (q != NULL) {
q 182 netinet/tcp_timer.c p = q;
q 183 netinet/tcp_timer.c q = q->next;
q 1489 netinet6/icmp6.c const char *p, *q;
q 1539 netinet6/icmp6.c for (q = p; q < name + namelen && *q && *q != '.'; q++)
q 1564 netinet6/icmp6.c p = q;
q 500 netinet6/in6_ifattach.c u_int8_t *q;
q 517 netinet6/in6_ifattach.c for (q = n; *q; q++) {
q 518 netinet6/in6_ifattach.c if ('A' <= *q && *q <= 'Z')
q 519 netinet6/in6_ifattach.c *q = *q - 'A' + 'a';
q 38 stand/boot/bootarg.c bootarg_t *q = alloc(sizeof(*q) + l - sizeof(q->ba_arg));
q 40 stand/boot/bootarg.c q->ba_type = t;
q 41 stand/boot/bootarg.c q->ba_size = sizeof(*q) + l - sizeof(q->ba_arg);
q 42 stand/boot/bootarg.c bcopy(p, q->ba_arg, l);
q 43 stand/boot/bootarg.c q->ba_next = bootarg_list;
q 44 stand/boot/bootarg.c bootarg_list = q;
q 51 stand/boot/bootarg.c u_char *q;
q 66 stand/boot/bootarg.c for (p = bootarg_list, q = v;
q 67 stand/boot/bootarg.c p != NULL && ((q + p->ba_size) - (u_char *)v) < l;
q 68 stand/boot/bootarg.c q += p->ba_size, p = p->ba_next) {
q 72 stand/boot/bootarg.c bcopy(p, q, p->ba_size);
q 74 stand/boot/bootarg.c p = (bootarg_t *)q;
q 234 stand/boot/cmd.c char *q;
q 237 stand/boot/cmd.c q = nextword(p);
q 248 stand/boot/cmd.c return q;
q 242 stand/boot/vars.c char *p, *q;
q 245 stand/boot/vars.c for (p = environ; p && *p; p = q) {
q 247 stand/boot/vars.c for (q = p; *q != '='; q++)
q 249 stand/boot/vars.c l = max(l, q - p) + 1;
q 250 stand/boot/vars.c for (q = p; *q != '\n'; q++)
q 252 stand/boot/vars.c if (*q)
q 253 stand/boot/vars.c q++;
q 255 stand/boot/vars.c while((*p++ = *q++))
q 297 sys/socketvar.h void soqinsque(struct socket *head, struct socket *so, int q);
q 298 sys/socketvar.h int soqremque(struct socket *so, int q);
q 254 sys/tty.h int b_to_q(u_char *cp, int cc, struct clist *q);
q 257 sys/tty.h int getc(struct clist *q);
q 258 sys/tty.h void ndflush(struct clist *q, int cc);
q 259 sys/tty.h int ndqb(struct clist *q, int flag);
q 260 sys/tty.h u_char *nextc(struct clist *q, u_char *cp, int *c);
q 261 sys/tty.h int putc(int c, struct clist *q);
q 262 sys/tty.h int q_to_b(struct clist *q, u_char *cp, int cc);
q 263 sys/tty.h int unputc(struct clist *q);
q 58 ufs/ext2fs/ext2fs_subr.c #define SETHIGH(q, h) { \
q 60 ufs/ext2fs/ext2fs_subr.c tmp.qcvt = (q); \
q 62 ufs/ext2fs/ext2fs_subr.c (q) = tmp.qcvt; \
q 65 ufs/ext2fs/ext2fs_subr.c #define SETLOW(q, l) { \
q 67 ufs/ext2fs/ext2fs_subr.c tmp.qcvt = (q); \
q 69 ufs/ext2fs/ext2fs_subr.c (q) = tmp.qcvt; \
q 86 ufs/ufs/ufs_vnops.c #define SETHIGH(q, h) { \
q 88 ufs/ufs/ufs_vnops.c tmp.qcvt = (q); \
q 90 ufs/ufs/ufs_vnops.c (q) = tmp.qcvt; \
q 92 ufs/ufs/ufs_vnops.c #define SETLOW(q, l) { \
q 94 ufs/ufs/ufs_vnops.c tmp.qcvt = (q); \
q 96 ufs/ufs/ufs_vnops.c (q) = tmp.qcvt; \
q 47 xfs/xfs_dev-common.c xfs_initq(struct xfs_link *q)
q 49 xfs/xfs_dev-common.c q->next = q;
q 50 xfs/xfs_dev-common.c q->prev = q;
q 55 xfs/xfs_dev-common.c xfs_emptyq(const struct xfs_link *q)
q 57 xfs/xfs_dev-common.c return q->next == q;
q 69 xfs/xfs_dev-common.c xfs_appendq(struct xfs_link *q, struct xfs_link *p)
q 71 xfs/xfs_dev-common.c p->next = q;
q 72 xfs/xfs_dev-common.c p->prev = q->prev;
q 74 xfs/xfs_dev-common.c q->prev = p;
q 92 xfs/xfs_dev.h xfs_initq(struct xfs_link *q);
q 95 xfs/xfs_dev.h xfs_emptyq(const struct xfs_link *q);
q 101 xfs/xfs_dev.h xfs_appendq(struct xfs_link *q, struct xfs_link *p);
q 71 xfs/xfs_message.h #define xfs_handle_eq(p, q) \
q 72 xfs/xfs_message.h ((p)->a == (q)->a && (p)->b == (q)->b && (p)->c == (q)->c && (p)->d == (q)->d)