queue 714 dev/acpi/acpi.c acpi_load_table(paddr_t pa, size_t len, acpi_qhead_t *queue)
queue 729 dev/acpi/acpi.c SIMPLEQ_INSERT_TAIL(queue, entry, q_next);
queue 88 dev/ic/aac.c int aac_enqueue_response(struct aac_softc *sc, int queue,
queue 1886 dev/ic/aac.c aac_enqueue_fib(struct aac_softc *sc, int queue, struct aac_command *cm)
queue 1897 dev/ic/aac.c pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX];
queue 1898 dev/ic/aac.c ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX];
queue 1901 dev/ic/aac.c if (pi >= aac_qinfo[queue].size)
queue 1911 dev/ic/aac.c (sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size;
queue 1912 dev/ic/aac.c (sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr;
queue 1915 dev/ic/aac.c sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1;
queue 1924 dev/ic/aac.c if (aac_qinfo[queue].notify != 0)
queue 1925 dev/ic/aac.c AAC_QNOTIFY(sc, aac_qinfo[queue].notify);
queue 1938 dev/ic/aac.c aac_dequeue_fib(struct aac_softc *sc, int queue, u_int32_t *fib_size,
queue 1947 dev/ic/aac.c pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX];
queue 1948 dev/ic/aac.c ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX];
queue 1957 dev/ic/aac.c if (pi >= aac_qinfo[queue].size)
queue 1965 dev/ic/aac.c if (ci >= aac_qinfo[queue].size)
queue 1969 dev/ic/aac.c *fib_size = (sc->aac_qentries[queue] + ci)->aq_fib_size;
queue 1971 dev/ic/aac.c switch (queue) {
queue 1980 dev/ic/aac.c fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr /
queue 1997 dev/ic/aac.c fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr;
queue 2018 dev/ic/aac.c sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX] = ci + 1;
queue 2021 dev/ic/aac.c if (notify && (aac_qinfo[queue].notify != 0))
queue 2022 dev/ic/aac.c AAC_QNOTIFY(sc, aac_qinfo[queue].notify);
queue 2033 dev/ic/aac.c aac_enqueue_response(struct aac_softc *sc, int queue, struct aac_fib *fib)
queue 2046 dev/ic/aac.c pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX];
queue 2047 dev/ic/aac.c ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX];
queue 2050 dev/ic/aac.c if (pi >= aac_qinfo[queue].size)
queue 2060 dev/ic/aac.c (sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size;
queue 2061 dev/ic/aac.c (sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr;
queue 2064 dev/ic/aac.c sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1;
queue 2067 dev/ic/aac.c if (aac_qinfo[queue].notify != 0)
queue 2068 dev/ic/aac.c AAC_QNOTIFY(sc, aac_qinfo[queue].notify);
queue 415 dev/ic/aic7xxx.c ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue)
queue 422 dev/ic/aic7xxx.c if ((scb = TAILQ_FIRST(queue)) != NULL
queue 1216 dev/ic/aic7xxxvar.h struct scb_tailq *queue);
queue 727 dev/ic/ar5210.c u_int queue;
queue 734 dev/ic/ar5210.c queue = 0;
queue 738 dev/ic/ar5210.c queue = 1;
queue 747 dev/ic/ar5210.c bzero(&hal->ah_txq[queue], sizeof(HAL_TXQ_INFO));
queue 748 dev/ic/ar5210.c hal->ah_txq[queue].tqi_type = queue_type;
queue 752 dev/ic/ar5210.c queue, queue_info) != AH_TRUE)
queue 756 dev/ic/ar5210.c return (queue);
queue 760 dev/ic/ar5210.c ar5k_ar5210_setup_tx_queueprops(struct ath_hal *hal, int queue,
queue 763 dev/ic/ar5210.c AR5K_ASSERT_ENTRY(queue, hal->ah_capabilities.cap_queues.q_tx_num);
queue 765 dev/ic/ar5210.c if (hal->ah_txq[queue].tqi_type == HAL_TX_QUEUE_INACTIVE)
queue 768 dev/ic/ar5210.c hal->ah_txq[queue].tqi_aifs = queue_info->tqi_aifs;
queue 769 dev/ic/ar5210.c hal->ah_txq[queue].tqi_cw_max = queue_info->tqi_cw_max;
queue 770 dev/ic/ar5210.c hal->ah_txq[queue].tqi_cw_min = queue_info->tqi_cw_min;
queue 771 dev/ic/ar5210.c hal->ah_txq[queue].tqi_flags = queue_info->tqi_flags;
queue 777 dev/ic/ar5210.c ar5k_ar5210_get_tx_queueprops(struct ath_hal *hal, int queue,
queue 780 dev/ic/ar5210.c AR5K_ASSERT_ENTRY(queue, hal->ah_capabilities.cap_queues.q_tx_num);
queue 781 dev/ic/ar5210.c bcopy(&hal->ah_txq[queue], queue_info, sizeof(HAL_TXQ_INFO));
queue 786 dev/ic/ar5210.c ar5k_ar5210_release_tx_queue(struct ath_hal *hal, u_int queue)
queue 788 dev/ic/ar5210.c AR5K_ASSERT_ENTRY(queue, hal->ah_capabilities.cap_queues.q_tx_num);
queue 791 dev/ic/ar5210.c hal->ah_txq[queue].tqi_type = HAL_TX_QUEUE_INACTIVE;
queue 815 dev/ic/ar5210.c ar5k_ar5210_reset_tx_queue(struct ath_hal *hal, u_int queue)
queue 820 dev/ic/ar5210.c AR5K_ASSERT_ENTRY(queue, hal->ah_capabilities.cap_queues.q_tx_num);
queue 822 dev/ic/ar5210.c tq = &hal->ah_txq[queue];
queue 869 dev/ic/ar5210.c ar5k_ar5210_get_tx_buf(struct ath_hal *hal, u_int queue)
queue 873 dev/ic/ar5210.c AR5K_ASSERT_ENTRY(queue, hal->ah_capabilities.cap_queues.q_tx_num);
queue 878 dev/ic/ar5210.c switch (hal->ah_txq[queue].tqi_type) {
queue 894 dev/ic/ar5210.c ar5k_ar5210_put_tx_buf(struct ath_hal *hal, u_int queue, u_int32_t phys_addr)
queue 898 dev/ic/ar5210.c AR5K_ASSERT_ENTRY(queue, hal->ah_capabilities.cap_queues.q_tx_num);
queue 903 dev/ic/ar5210.c switch (hal->ah_txq[queue].tqi_type) {
queue 922 dev/ic/ar5210.c ar5k_ar5210_num_tx_pending(struct ath_hal *hal, u_int queue)
queue 928 dev/ic/ar5210.c ar5k_ar5210_tx_start(struct ath_hal *hal, u_int queue)
queue 932 dev/ic/ar5210.c AR5K_ASSERT_ENTRY(queue, hal->ah_capabilities.cap_queues.q_tx_num);
queue 939 dev/ic/ar5210.c switch (hal->ah_txq[queue].tqi_type) {
queue 968 dev/ic/ar5210.c ar5k_ar5210_stop_tx_dma(struct ath_hal *hal, u_int queue)
queue 972 dev/ic/ar5210.c AR5K_ASSERT_ENTRY(queue, hal->ah_capabilities.cap_queues.q_tx_num);
queue 979 dev/ic/ar5210.c switch (hal->ah_txq[queue].tqi_type) {
queue 802 dev/ic/ar5211.c u_int queue;
queue 808 dev/ic/ar5211.c for (queue = HAL_TX_QUEUE_ID_DATA_MIN;
queue 809 dev/ic/ar5211.c hal->ah_txq[queue].tqi_type != HAL_TX_QUEUE_INACTIVE;
queue 810 dev/ic/ar5211.c queue++)
queue 811 dev/ic/ar5211.c if (queue > HAL_TX_QUEUE_ID_DATA_MAX)
queue 814 dev/ic/ar5211.c queue = HAL_TX_QUEUE_ID_PSPOLL;
queue 816 dev/ic/ar5211.c queue = HAL_TX_QUEUE_ID_BEACON;
queue 818 dev/ic/ar5211.c queue = HAL_TX_QUEUE_ID_CAB;
queue 825 dev/ic/ar5211.c bzero(&hal->ah_txq[queue], sizeof(HAL_TXQ_INFO));
queue 826 dev/ic/ar5211.c hal->ah_txq[queue].tqi_type = queue_type;
queue 829 dev/ic/ar5211.c if (ar5k_ar5211_setup_tx_queueprops(hal, queue, queue_info)
queue 834 dev/ic/ar5211.c AR5K_Q_ENABLE_BITS(hal->ah_txq_interrupts, queue);
queue 836 dev/ic/ar5211.c return (queue);
queue 840 dev/ic/ar5211.c ar5k_ar5211_setup_tx_queueprops(struct ath_hal *hal, int queue,
queue 843 dev/ic/ar5211.c AR5K_ASSERT_ENTRY(queue, hal->ah_capabilities.cap_queues.q_tx_num);
queue 845 dev/ic/ar5211.c if (hal->ah_txq[queue].tqi_type == HAL_TX_QUEUE_INACTIVE)
queue 848 dev/ic/ar5211.c bcopy(queue_info, &hal->ah_txq[queue], sizeof(HAL_TXQ_INFO));
queue 853 dev/ic/ar5211.c hal->ah_txq[queue].tqi_flags |=
queue 860 dev/ic/ar5211.c ar5k_ar5211_get_tx_queueprops(struct ath_hal *hal, int queue,
queue 863 dev/ic/ar5211.c AR5K_ASSERT_ENTRY(queue, hal->ah_capabilities.cap_queues.q_tx_num);
queue 864 dev/ic/ar5211.c bcopy(&hal->ah_txq[queue], queue_info, sizeof(HAL_TXQ_INFO));
queue 869 dev/ic/ar5211.c ar5k_ar5211_release_tx_queue(struct ath_hal *hal, u_int queue)
queue 871 dev/ic/ar5211.c AR5K_ASSERT_ENTRY(queue, hal->ah_capabilities.cap_queues.q_tx_num);
queue 874 dev/ic/ar5211.c hal->ah_txq[queue].tqi_type = HAL_TX_QUEUE_INACTIVE;
queue 875 dev/ic/ar5211.c AR5K_Q_DISABLE_BITS(hal->ah_txq_interrupts, queue);
queue 881 dev/ic/ar5211.c ar5k_ar5211_reset_tx_queue(struct ath_hal *hal, u_int queue)
queue 888 dev/ic/ar5211.c AR5K_ASSERT_ENTRY(queue, hal->ah_capabilities.cap_queues.q_tx_num);
queue 890 dev/ic/ar5211.c tq = &hal->ah_txq[queue];
queue 922 dev/ic/ar5211.c AR5K_REG_WRITE(AR5K_AR5211_DCU_RETRY_LMT(queue),
queue 944 dev/ic/ar5211.c AR5K_REG_WRITE(AR5K_AR5211_DCU_LCL_IFS(queue),
queue 953 dev/ic/ar5211.c AR5K_REG_WRITE(AR5K_AR5211_QCU_MISC(queue),
queue 957 dev/ic/ar5211.c AR5K_REG_WRITE(AR5K_AR5211_QCU_CBRCFG(queue),
queue 962 dev/ic/ar5211.c AR5K_REG_ENABLE_BITS(AR5K_AR5211_QCU_MISC(queue),
queue 965 dev/ic/ar5211.c AR5K_REG_ENABLE_BITS(AR5K_AR5211_QCU_MISC(queue),
queue 970 dev/ic/ar5211.c AR5K_REG_WRITE(AR5K_AR5211_QCU_RDYTIMECFG(queue),
queue 977 dev/ic/ar5211.c AR5K_REG_WRITE(AR5K_AR5211_DCU_CHAN_TIME(queue),
queue 983 dev/ic/ar5211.c AR5K_REG_ENABLE_BITS(AR5K_AR5211_QCU_MISC(queue),
queue 989 dev/ic/ar5211.c AR5K_REG_WRITE(AR5K_AR5211_DCU_MISC(queue),
queue 994 dev/ic/ar5211.c AR5K_REG_WRITE(AR5K_AR5211_DCU_MISC(queue),
queue 1003 dev/ic/ar5211.c AR5K_REG_ENABLE_BITS(AR5K_AR5211_QCU_MISC(queue),
queue 1008 dev/ic/ar5211.c AR5K_REG_ENABLE_BITS(AR5K_AR5211_DCU_MISC(queue),
queue 1014 dev/ic/ar5211.c AR5K_REG_WRITE(AR5K_AR5211_QCU_RDYTIMECFG(queue),
queue 1022 dev/ic/ar5211.c AR5K_REG_ENABLE_BITS(AR5K_AR5211_QCU_MISC(queue),
queue 1027 dev/ic/ar5211.c AR5K_REG_ENABLE_BITS(AR5K_AR5211_DCU_MISC(queue),
queue 1033 dev/ic/ar5211.c AR5K_REG_ENABLE_BITS(AR5K_AR5211_QCU_MISC(queue),
queue 1057 dev/ic/ar5211.c ar5k_ar5211_get_tx_buf(struct ath_hal *hal, u_int queue)
queue 1059 dev/ic/ar5211.c AR5K_ASSERT_ENTRY(queue, hal->ah_capabilities.cap_queues.q_tx_num);
queue 1064 dev/ic/ar5211.c return (AR5K_REG_READ(AR5K_AR5211_QCU_TXDP(queue)));
queue 1068 dev/ic/ar5211.c ar5k_ar5211_put_tx_buf(struct ath_hal *hal, u_int queue, u_int32_t phys_addr)
queue 1070 dev/ic/ar5211.c AR5K_ASSERT_ENTRY(queue, hal->ah_capabilities.cap_queues.q_tx_num);
queue 1076 dev/ic/ar5211.c if (AR5K_REG_READ_Q(AR5K_AR5211_QCU_TXE, queue))
queue 1079 dev/ic/ar5211.c AR5K_REG_WRITE(AR5K_AR5211_QCU_TXDP(queue), phys_addr);
queue 1085 dev/ic/ar5211.c ar5k_ar5211_num_tx_pending(struct ath_hal *hal, u_int queue)
queue 1087 dev/ic/ar5211.c AR5K_ASSERT_ENTRY(queue, hal->ah_capabilities.cap_queues.q_tx_num);
queue 1088 dev/ic/ar5211.c return (AR5K_AR5211_QCU_STS(queue) & AR5K_AR5211_QCU_STS_FRMPENDCNT);
queue 1092 dev/ic/ar5211.c ar5k_ar5211_tx_start(struct ath_hal *hal, u_int queue)
queue 1094 dev/ic/ar5211.c AR5K_ASSERT_ENTRY(queue, hal->ah_capabilities.cap_queues.q_tx_num);
queue 1097 dev/ic/ar5211.c if (AR5K_REG_READ_Q(AR5K_AR5211_QCU_TXD, queue))
queue 1101 dev/ic/ar5211.c AR5K_REG_WRITE_Q(AR5K_AR5211_QCU_TXE, queue);
queue 1107 dev/ic/ar5211.c ar5k_ar5211_stop_tx_dma(struct ath_hal *hal, u_int queue)
queue 1111 dev/ic/ar5211.c AR5K_ASSERT_ENTRY(queue, hal->ah_capabilities.cap_queues.q_tx_num);
queue 1116 dev/ic/ar5211.c AR5K_REG_WRITE_Q(AR5K_AR5211_QCU_TXD, queue);
queue 1119 dev/ic/ar5211.c pending = AR5K_REG_READ(AR5K_AR5211_QCU_STS(queue)) &
queue 959 dev/ic/ar5212.c u_int queue;
queue 965 dev/ic/ar5212.c for (queue = HAL_TX_QUEUE_ID_DATA_MIN;
queue 966 dev/ic/ar5212.c hal->ah_txq[queue].tqi_type != HAL_TX_QUEUE_INACTIVE;
queue 967 dev/ic/ar5212.c queue++)
queue 968 dev/ic/ar5212.c if (queue > HAL_TX_QUEUE_ID_DATA_MAX)
queue 971 dev/ic/ar5212.c queue = HAL_TX_QUEUE_ID_PSPOLL;
queue 973 dev/ic/ar5212.c queue = HAL_TX_QUEUE_ID_BEACON;
queue 975 dev/ic/ar5212.c queue = HAL_TX_QUEUE_ID_CAB;
queue 982 dev/ic/ar5212.c bzero(&hal->ah_txq[queue], sizeof(HAL_TXQ_INFO));
queue 984 dev/ic/ar5212.c if (ar5k_ar5212_setup_tx_queueprops(hal, queue, queue_info)
queue 988 dev/ic/ar5212.c hal->ah_txq[queue].tqi_type = queue_type;
queue 990 dev/ic/ar5212.c AR5K_Q_ENABLE_BITS(hal->ah_txq_interrupts, queue);
queue 992 dev/ic/ar5212.c return (queue);
queue 996 dev/ic/ar5212.c ar5k_ar5212_setup_tx_queueprops(struct ath_hal *hal, int queue,
queue 999 dev/ic/ar5212.c AR5K_ASSERT_ENTRY(queue, hal->ah_capabilities.cap_queues.q_tx_num);
queue 1001 dev/ic/ar5212.c if (hal->ah_txq[queue].tqi_type != HAL_TX_QUEUE_INACTIVE)
queue 1004 dev/ic/ar5212.c bcopy(queue_info, &hal->ah_txq[queue], sizeof(HAL_TXQ_INFO));
queue 1009 dev/ic/ar5212.c hal->ah_txq[queue].tqi_flags |=
queue 1016 dev/ic/ar5212.c ar5k_ar5212_get_tx_queueprops(struct ath_hal *hal, int queue,
queue 1019 dev/ic/ar5212.c AR5K_ASSERT_ENTRY(queue, hal->ah_capabilities.cap_queues.q_tx_num);
queue 1020 dev/ic/ar5212.c bcopy(&hal->ah_txq[queue], queue_info, sizeof(HAL_TXQ_INFO));
queue 1025 dev/ic/ar5212.c ar5k_ar5212_release_tx_queue(struct ath_hal *hal, u_int queue)
queue 1027 dev/ic/ar5212.c AR5K_ASSERT_ENTRY(queue, hal->ah_capabilities.cap_queues.q_tx_num);
queue 1030 dev/ic/ar5212.c hal->ah_txq[queue].tqi_type = HAL_TX_QUEUE_INACTIVE;
queue 1031 dev/ic/ar5212.c AR5K_Q_DISABLE_BITS(hal->ah_txq_interrupts, queue);
queue 1037 dev/ic/ar5212.c ar5k_ar5212_reset_tx_queue(struct ath_hal *hal, u_int queue)
queue 1044 dev/ic/ar5212.c AR5K_ASSERT_ENTRY(queue, hal->ah_capabilities.cap_queues.q_tx_num);
queue 1046 dev/ic/ar5212.c tq = &hal->ah_txq[queue];
queue 1081 dev/ic/ar5212.c AR5K_REG_WRITE(AR5K_AR5212_DCU_RETRY_LMT(queue),
queue 1103 dev/ic/ar5212.c AR5K_REG_WRITE(AR5K_AR5212_DCU_LCL_IFS(queue),
queue 1112 dev/ic/ar5212.c AR5K_REG_WRITE(AR5K_AR5212_QCU_MISC(queue),
queue 1116 dev/ic/ar5212.c AR5K_REG_WRITE(AR5K_AR5212_QCU_CBRCFG(queue),
queue 1121 dev/ic/ar5212.c AR5K_REG_ENABLE_BITS(AR5K_AR5212_QCU_MISC(queue),
queue 1124 dev/ic/ar5212.c AR5K_REG_ENABLE_BITS(AR5K_AR5212_QCU_MISC(queue),
queue 1129 dev/ic/ar5212.c AR5K_REG_WRITE(AR5K_AR5212_QCU_RDYTIMECFG(queue),
queue 1136 dev/ic/ar5212.c AR5K_REG_WRITE(AR5K_AR5212_DCU_CHAN_TIME(queue),
queue 1142 dev/ic/ar5212.c AR5K_REG_ENABLE_BITS(AR5K_AR5212_QCU_MISC(queue),
queue 1148 dev/ic/ar5212.c AR5K_REG_WRITE(AR5K_AR5212_DCU_MISC(queue),
queue 1153 dev/ic/ar5212.c AR5K_REG_WRITE(AR5K_AR5212_DCU_MISC(queue),
queue 1162 dev/ic/ar5212.c AR5K_REG_ENABLE_BITS(AR5K_AR5212_QCU_MISC(queue),
queue 1167 dev/ic/ar5212.c AR5K_REG_ENABLE_BITS(AR5K_AR5212_DCU_MISC(queue),
queue 1173 dev/ic/ar5212.c AR5K_REG_WRITE(AR5K_AR5212_QCU_RDYTIMECFG(queue),
queue 1181 dev/ic/ar5212.c AR5K_REG_ENABLE_BITS(AR5K_AR5212_QCU_MISC(queue),
queue 1186 dev/ic/ar5212.c AR5K_REG_ENABLE_BITS(AR5K_AR5212_DCU_MISC(queue),
queue 1192 dev/ic/ar5212.c AR5K_REG_ENABLE_BITS(AR5K_AR5212_QCU_MISC(queue),
queue 1216 dev/ic/ar5212.c ar5k_ar5212_get_tx_buf(struct ath_hal *hal, u_int queue)
queue 1218 dev/ic/ar5212.c AR5K_ASSERT_ENTRY(queue, hal->ah_capabilities.cap_queues.q_tx_num);
queue 1223 dev/ic/ar5212.c return (AR5K_REG_READ(AR5K_AR5212_QCU_TXDP(queue)));
queue 1227 dev/ic/ar5212.c ar5k_ar5212_put_tx_buf(struct ath_hal *hal, u_int queue, u_int32_t phys_addr)
queue 1229 dev/ic/ar5212.c AR5K_ASSERT_ENTRY(queue, hal->ah_capabilities.cap_queues.q_tx_num);
queue 1235 dev/ic/ar5212.c if (AR5K_REG_READ_Q(AR5K_AR5212_QCU_TXE, queue))
queue 1238 dev/ic/ar5212.c AR5K_REG_WRITE(AR5K_AR5212_QCU_TXDP(queue), phys_addr);
queue 1244 dev/ic/ar5212.c ar5k_ar5212_num_tx_pending(struct ath_hal *hal, u_int queue)
queue 1246 dev/ic/ar5212.c AR5K_ASSERT_ENTRY(queue, hal->ah_capabilities.cap_queues.q_tx_num);
queue 1247 dev/ic/ar5212.c return (AR5K_AR5212_QCU_STS(queue) & AR5K_AR5212_QCU_STS_FRMPENDCNT);
queue 1251 dev/ic/ar5212.c ar5k_ar5212_tx_start(struct ath_hal *hal, u_int queue)
queue 1253 dev/ic/ar5212.c AR5K_ASSERT_ENTRY(queue, hal->ah_capabilities.cap_queues.q_tx_num);
queue 1256 dev/ic/ar5212.c if (AR5K_REG_READ_Q(AR5K_AR5212_QCU_TXD, queue))
queue 1260 dev/ic/ar5212.c AR5K_REG_WRITE_Q(AR5K_AR5212_QCU_TXE, queue);
queue 1266 dev/ic/ar5212.c ar5k_ar5212_stop_tx_dma(struct ath_hal *hal, u_int queue)
queue 1270 dev/ic/ar5212.c AR5K_ASSERT_ENTRY(queue, hal->ah_capabilities.cap_queues.q_tx_num);
queue 1275 dev/ic/ar5212.c AR5K_REG_WRITE_Q(AR5K_AR5212_QCU_TXD, queue);
queue 1278 dev/ic/ar5212.c pending = AR5K_REG_READ(AR5K_AR5212_QCU_STS(queue)) &
queue 954 dev/ic/ar5xxx.h _t HAL_BOOL (_a _n##_setup_tx_queueprops)(struct ath_hal *, int queue, \
queue 956 dev/ic/ar5xxx.h _t HAL_BOOL (_a _n##_release_tx_queue)(struct ath_hal *, u_int queue); \
queue 957 dev/ic/ar5xxx.h _t HAL_BOOL (_a _n##_reset_tx_queue)(struct ath_hal *, u_int queue); \
queue 958 dev/ic/ar5xxx.h _t u_int32_t (_a _n##_get_tx_buf)(struct ath_hal *, u_int queue); \
queue 961 dev/ic/ar5xxx.h _t HAL_BOOL (_a _n##_tx_start)(struct ath_hal *, u_int queue); \
queue 962 dev/ic/ar5xxx.h _t HAL_BOOL (_a _n##_stop_tx_dma)(struct ath_hal *, u_int queue); \
queue 350 dev/raidframe/rf_diskqueue.c rf_DiskIOEnqueue(RF_DiskQueue_t *queue, RF_DiskQueueData_t *req, int pri)
queue 362 dev/raidframe/rf_diskqueue.c RF_LOCK_QUEUE_MUTEX(queue, "DiskIOEnqueue");
queue 365 dev/raidframe/rf_diskqueue.c if (RF_QUEUE_EMPTY(queue)) {
queue 367 dev/raidframe/rf_diskqueue.c " (queue empty)\n", pri, queue->row, queue->col);
queue 368 dev/raidframe/rf_diskqueue.c RF_LOCK_QUEUE(queue);
queue 369 dev/raidframe/rf_diskqueue.c rf_DispatchKernelIO(queue, req);
queue 375 dev/raidframe/rf_diskqueue.c queue->queueLength++;
queue 377 dev/raidframe/rf_diskqueue.c " (queue not empty)\n", pri, queue->row,
queue 378 dev/raidframe/rf_diskqueue.c queue->col);
queue 379 dev/raidframe/rf_diskqueue.c req->queue = (void *) queue;
queue 380 dev/raidframe/rf_diskqueue.c (queue->qPtr->Enqueue) (queue->qHdr, req, pri);
queue 390 dev/raidframe/rf_diskqueue.c " c %d\n", pri, queue->row, queue->col);
queue 391 dev/raidframe/rf_diskqueue.c RF_ASSERT(RF_QUEUE_LOCKED(queue));
queue 392 dev/raidframe/rf_diskqueue.c rf_DispatchKernelIO(queue, req);
queue 395 dev/raidframe/rf_diskqueue.c if (RF_OK_TO_DISPATCH(queue, req)) {
queue 398 dev/raidframe/rf_diskqueue.c queue->row, queue->col);
queue 399 dev/raidframe/rf_diskqueue.c rf_DispatchKernelIO(queue, req);
queue 405 dev/raidframe/rf_diskqueue.c queue->queueLength++;
queue 408 dev/raidframe/rf_diskqueue.c queue->row, queue->col);
queue 409 dev/raidframe/rf_diskqueue.c req->queue = (void *) queue;
queue 410 dev/raidframe/rf_diskqueue.c (queue->qPtr->Enqueue) (queue->qHdr, req, pri);
queue 414 dev/raidframe/rf_diskqueue.c RF_UNLOCK_QUEUE_MUTEX(queue, "DiskIOEnqueue");
queue 420 dev/raidframe/rf_diskqueue.c rf_DiskIOComplete(RF_DiskQueue_t *queue, RF_DiskQueueData_t *req, int status)
queue 424 dev/raidframe/rf_diskqueue.c RF_LOCK_QUEUE_MUTEX(queue, "DiskIOComplete");
queue 433 dev/raidframe/rf_diskqueue.c queue->row, queue->col);
queue 434 dev/raidframe/rf_diskqueue.c RF_ASSERT(RF_QUEUE_LOCKED(queue) &&
queue 435 dev/raidframe/rf_diskqueue.c (queue->unlockingOp == NULL));
queue 436 dev/raidframe/rf_diskqueue.c RF_UNLOCK_QUEUE(queue);
queue 438 dev/raidframe/rf_diskqueue.c queue->numOutstanding--;
queue 439 dev/raidframe/rf_diskqueue.c RF_ASSERT(queue->numOutstanding >= 0);
queue 447 dev/raidframe/rf_diskqueue.c while (!done && !RF_QUEUE_FULL(queue) && !RF_QUEUE_LOCKED(queue)) {
queue 448 dev/raidframe/rf_diskqueue.c if (queue->nextLockingOp) {
queue 449 dev/raidframe/rf_diskqueue.c req = queue->nextLockingOp;
queue 450 dev/raidframe/rf_diskqueue.c queue->nextLockingOp = NULL;
queue 453 dev/raidframe/rf_diskqueue.c queue->row, queue->col);
queue 455 dev/raidframe/rf_diskqueue.c req = (queue->qPtr->Dequeue) (queue->qHdr);
queue 459 dev/raidframe/rf_diskqueue.c req->priority, queue->row, queue->col);
queue 470 dev/raidframe/rf_diskqueue.c queue->queueLength--;
queue 471 dev/raidframe/rf_diskqueue.c RF_ASSERT(queue->queueLength >= 0);
queue 477 dev/raidframe/rf_diskqueue.c if (RF_QUEUE_EMPTY(queue)) {
queue 482 dev/raidframe/rf_diskqueue.c queue->row, queue->col);
queue 483 dev/raidframe/rf_diskqueue.c RF_LOCK_QUEUE(queue);
queue 484 dev/raidframe/rf_diskqueue.c rf_DispatchKernelIO(queue, req);
queue 494 dev/raidframe/rf_diskqueue.c queue->row, queue->col);
queue 495 dev/raidframe/rf_diskqueue.c RF_ASSERT(queue->nextLockingOp == NULL);
queue 496 dev/raidframe/rf_diskqueue.c queue->nextLockingOp = req;
queue 506 dev/raidframe/rf_diskqueue.c RF_ASSERT(RF_QUEUE_LOCKED(queue));
queue 510 dev/raidframe/rf_diskqueue.c req->priority, queue->row,
queue 511 dev/raidframe/rf_diskqueue.c queue->col);
queue 512 dev/raidframe/rf_diskqueue.c rf_DispatchKernelIO(queue, req);
queue 515 dev/raidframe/rf_diskqueue.c if (RF_OK_TO_DISPATCH(queue, req)) {
queue 520 dev/raidframe/rf_diskqueue.c req->priority, queue->row,
queue 521 dev/raidframe/rf_diskqueue.c queue->col);
queue 522 dev/raidframe/rf_diskqueue.c rf_DispatchKernelIO(queue, req);
queue 537 dev/raidframe/rf_diskqueue.c queue->row, queue->col);
queue 538 dev/raidframe/rf_diskqueue.c queue->queueLength++;
queue 539 dev/raidframe/rf_diskqueue.c (queue->qPtr->Enqueue)
queue 540 dev/raidframe/rf_diskqueue.c (queue->qHdr, req,
queue 549 dev/raidframe/rf_diskqueue.c RF_UNLOCK_QUEUE_MUTEX(queue, "DiskIOComplete");
queue 558 dev/raidframe/rf_diskqueue.c rf_DiskIOPromote(RF_DiskQueue_t *queue, RF_StripeNum_t parityStripeID,
queue 563 dev/raidframe/rf_diskqueue.c if (!queue->qPtr->Promote)
queue 565 dev/raidframe/rf_diskqueue.c RF_LOCK_QUEUE_MUTEX(queue, "DiskIOPromote");
queue 566 dev/raidframe/rf_diskqueue.c retval = (queue->qPtr->Promote) (queue->qHdr, parityStripeID, which_ru);
queue 567 dev/raidframe/rf_diskqueue.c RF_UNLOCK_QUEUE_MUTEX(queue, "DiskIOPromote");
queue 102 dev/raidframe/rf_diskqueue.h RF_DiskQueue_t *queue; /*
queue 101 dev/raidframe/rf_fifo.c if ((q->hq_count + q->lq_count) != elem->queue->queueLength) {
queue 103 dev/raidframe/rf_fifo.c q->hq_count, q->lq_count, (int) elem->queue->queueLength);
queue 105 dev/raidframe/rf_fifo.c (int) elem->queue->numOutstanding,
queue 106 dev/raidframe/rf_fifo.c (int) elem->queue->maxOutstanding,
queue 107 dev/raidframe/rf_fifo.c (int) elem->queue->row,
queue 108 dev/raidframe/rf_fifo.c (int) elem->queue->col);
queue 110 dev/raidframe/rf_fifo.c RF_ASSERT((q->hq_count + q->lq_count) == elem->queue->queueLength);
queue 1845 dev/raidframe/rf_openbsdkintf.c rf_DispatchKernelIO(RF_DiskQueue_t *queue, RF_DiskQueueData_t *req)
queue 1858 dev/raidframe/rf_openbsdkintf.c req->queue = queue;
queue 1860 dev/raidframe/rf_openbsdkintf.c unit = queue->raidPtr->raidid;
queue 1906 dev/raidframe/rf_openbsdkintf.c queue->row, queue->col));
queue 1915 dev/raidframe/rf_openbsdkintf.c queue->numOutstanding++;
queue 1928 dev/raidframe/rf_openbsdkintf.c rf_InitBP(&raidbp->rf_buf, queue->rf_cinfo->ci_vp,
queue 1929 dev/raidframe/rf_openbsdkintf.c op | bp->b_flags, queue->rf_cinfo->ci_dev,
queue 1932 dev/raidframe/rf_openbsdkintf.c queue->raidPtr->logBytesPerSector, req->b_proc);
queue 1938 dev/raidframe/rf_openbsdkintf.c queue->numOutstanding++;
queue 1939 dev/raidframe/rf_openbsdkintf.c queue->last_deq_sector = req->sectorOffset;
queue 1945 dev/raidframe/rf_openbsdkintf.c queue->curPriority = req->priority;
queue 1948 dev/raidframe/rf_openbsdkintf.c req->type, unit, queue->row, queue->col));
queue 1951 dev/raidframe/rf_openbsdkintf.c (int)(req->numSector << queue->raidPtr->logBytesPerSector),
queue 1952 dev/raidframe/rf_openbsdkintf.c (int)queue->raidPtr->logBytesPerSector));
queue 1976 dev/raidframe/rf_openbsdkintf.c RF_DiskQueue_t *queue;
queue 1989 dev/raidframe/rf_openbsdkintf.c queue = (RF_DiskQueue_t *)req->queue;
queue 2016 dev/raidframe/rf_openbsdkintf.c unit = queue->raidPtr->raidid; /* *Much* simpler :-> */
queue 2024 dev/raidframe/rf_openbsdkintf.c if (queue->raidPtr->Disks[queue->row][queue->col].status ==
queue 2028 dev/raidframe/rf_openbsdkintf.c queue->raidPtr->
queue 2029 dev/raidframe/rf_openbsdkintf.c Disks[queue->row][queue->col].devname);
queue 2030 dev/raidframe/rf_openbsdkintf.c queue->raidPtr->Disks[queue->row][queue->col].status =
queue 2032 dev/raidframe/rf_openbsdkintf.c queue->raidPtr->status[queue->row] = rf_rs_degraded;
queue 2033 dev/raidframe/rf_openbsdkintf.c queue->raidPtr->numFailures++;
queue 2034 dev/raidframe/rf_openbsdkintf.c queue->raidPtr->numNewFailures++;
queue 2044 dev/raidframe/rf_openbsdkintf.c rf_DiskIOComplete(queue, req, (bp->b_flags & B_ERROR) ? 1 : 0);
queue 647 dev/raidframe/rf_paritylogging.c rf_FreeParityLogQueue(RF_Raid_t *raidPtr, RF_ParityLogQueue_t *queue)
queue 651 dev/raidframe/rf_paritylogging.c RF_LOCK_MUTEX(queue->mutex);
queue 652 dev/raidframe/rf_paritylogging.c l1 = queue->parityLogs;
queue 660 dev/raidframe/rf_paritylogging.c RF_UNLOCK_MUTEX(queue->mutex);
queue 661 dev/raidframe/rf_paritylogging.c rf_mutex_destroy(&queue->mutex);
queue 666 dev/raidframe/rf_paritylogging.c rf_FreeRegionBufferQueue(RF_RegionBufferQueue_t *queue)
queue 670 dev/raidframe/rf_paritylogging.c RF_LOCK_MUTEX(queue->mutex);
queue 671 dev/raidframe/rf_paritylogging.c if (queue->availableBuffers != queue->totalBuffers) {
queue 675 dev/raidframe/rf_paritylogging.c for (i = 0; i < queue->totalBuffers; i++)
queue 676 dev/raidframe/rf_paritylogging.c RF_Free(queue->buffers[i], queue->bufferSize);
queue 677 dev/raidframe/rf_paritylogging.c RF_Free(queue->buffers, queue->totalBuffers * sizeof(caddr_t));
queue 678 dev/raidframe/rf_paritylogging.c RF_UNLOCK_MUTEX(queue->mutex);
queue 679 dev/raidframe/rf_paritylogging.c rf_mutex_destroy(&queue->mutex);
queue 118 dev/raidframe/rf_sstf.c _r_ = (_q_)->queue; \
queue 120 dev/raidframe/rf_sstf.c (_q_)->queue = (_r_)->next; \
queue 124 dev/raidframe/rf_sstf.c RF_ASSERT((_q_)->queue == NULL); \
queue 127 dev/raidframe/rf_sstf.c RF_ASSERT((_q_)->queue->prev == (_r_)); \
queue 128 dev/raidframe/rf_sstf.c (_q_)->queue->prev = NULL; \
queue 140 dev/raidframe/rf_sstf.c RF_ASSERT((_r_) == (_q_)->queue); \
queue 142 dev/raidframe/rf_sstf.c (_q_)->queue = NULL; \
queue 151 dev/raidframe/rf_sstf.c if (SNUM_DIFF((_q_)->queue->sectorOffset,_l_) \
queue 161 dev/raidframe/rf_sstf.c rf_closest_to_arm(RF_SstfQ_t *queue, RF_SectorNum_t arm_pos, int *dir,
queue 169 dev/raidframe/rf_sstf.c for (r = queue->queue; r; r = r->next) {
queue 236 dev/raidframe/rf_sstf.c return (queue->queue);
queue 284 dev/raidframe/rf_sstf.c dq = (RF_DiskQueue_t *) req->queue;
queue 290 dev/raidframe/rf_sstf.c rf_do_sstf_ord_q(&sstfq->lopri.queue, &sstfq->lopri.qtail, req);
queue 294 dev/raidframe/rf_sstf.c rf_do_sstf_ord_q(&sstfq->left.queue,
queue 298 dev/raidframe/rf_sstf.c rf_do_sstf_ord_q(&sstfq->right.queue,
queue 306 dev/raidframe/rf_sstf.c rf_do_dequeue(RF_SstfQ_t *queue, RF_DiskQueueData_t *req)
queue 313 dev/raidframe/rf_sstf.c if (req == queue->queue) {
queue 314 dev/raidframe/rf_sstf.c DO_HEAD_DEQ(req2, queue);
queue 317 dev/raidframe/rf_sstf.c if (req == queue->qtail) {
queue 318 dev/raidframe/rf_sstf.c DO_TAIL_DEQ(req2, queue);
queue 324 dev/raidframe/rf_sstf.c queue->qlen--;
queue 341 dev/raidframe/rf_sstf.c dq = (RF_DiskQueue_t *) req->queue;
queue 347 dev/raidframe/rf_sstf.c if (sstfq->left.queue == NULL) {
queue 349 dev/raidframe/rf_sstf.c if (sstfq->right.queue == NULL) {
queue 351 dev/raidframe/rf_sstf.c if (sstfq->lopri.queue == NULL) {
queue 374 dev/raidframe/rf_sstf.c if (sstfq->right.queue == NULL) {
queue 379 dev/raidframe/rf_sstf.c sstfq->right.queue->sectorOffset) <
queue 403 dev/raidframe/rf_sstf.c dq = (RF_DiskQueue_t *) req->queue;
queue 409 dev/raidframe/rf_sstf.c if (scanq->left.queue == NULL) {
queue 411 dev/raidframe/rf_sstf.c if (scanq->right.queue == NULL) {
queue 413 dev/raidframe/rf_sstf.c if (scanq->lopri.queue == NULL) {
queue 428 dev/raidframe/rf_sstf.c if (scanq->right.queue == NULL) {
queue 430 dev/raidframe/rf_sstf.c RF_ASSERT(scanq->left.queue);
queue 434 dev/raidframe/rf_sstf.c RF_ASSERT(scanq->right.queue);
queue 435 dev/raidframe/rf_sstf.c RF_ASSERT(scanq->left.queue);
queue 458 dev/raidframe/rf_sstf.c dq = (RF_DiskQueue_t *) req->queue;
queue 465 dev/raidframe/rf_sstf.c if (cscanq->right.queue) {
queue 469 dev/raidframe/rf_sstf.c if (cscanq->left.queue == NULL) {
queue 471 dev/raidframe/rf_sstf.c if (cscanq->lopri.queue == NULL) {
queue 488 dev/raidframe/rf_sstf.c cscanq->left.queue = cscanq->left.qtail = NULL;
queue 505 dev/raidframe/rf_sstf.c if ((sstfq->left.queue == NULL) && (sstfq->right.queue == NULL)) {
queue 509 dev/raidframe/rf_sstf.c if (sstfq->left.queue == NULL)
queue 510 dev/raidframe/rf_sstf.c req = sstfq->right.queue;
queue 512 dev/raidframe/rf_sstf.c if (sstfq->right.queue == NULL)
queue 513 dev/raidframe/rf_sstf.c req = sstfq->left.queue;
queue 516 dev/raidframe/rf_sstf.c sstfq->right.queue->sectorOffset) <
queue 519 dev/raidframe/rf_sstf.c req = sstfq->right.queue;
queue 542 dev/raidframe/rf_sstf.c if (scanq->left.queue == NULL) {
queue 544 dev/raidframe/rf_sstf.c if (scanq->right.queue == NULL) {
queue 546 dev/raidframe/rf_sstf.c if (scanq->lopri.queue == NULL) {
queue 553 dev/raidframe/rf_sstf.c req = scanq->right.queue;
queue 556 dev/raidframe/rf_sstf.c if (scanq->right.queue == NULL) {
queue 558 dev/raidframe/rf_sstf.c RF_ASSERT(scanq->left.queue);
queue 561 dev/raidframe/rf_sstf.c RF_ASSERT(scanq->right.queue);
queue 562 dev/raidframe/rf_sstf.c RF_ASSERT(scanq->left.queue);
queue 564 dev/raidframe/rf_sstf.c req = scanq->right.queue;
queue 584 dev/raidframe/rf_sstf.c if (cscanq->right.queue) {
queue 585 dev/raidframe/rf_sstf.c req = cscanq->right.queue;
queue 588 dev/raidframe/rf_sstf.c if (cscanq->left.queue == NULL) {
queue 590 dev/raidframe/rf_sstf.c if (cscanq->lopri.queue == NULL) {
queue 602 dev/raidframe/rf_sstf.c req = cscanq->left.queue;
queue 628 dev/raidframe/rf_sstf.c for (r = sstfq->lopri.queue; r; r = next) {
queue 37 dev/raidframe/rf_sstf.h RF_DiskQueueData_t *queue;
queue 1649 dev/usb/ehci.c return (ehci_root_ctrl_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
queue 2083 dev/usb/ehci.c return (ehci_root_intr_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
queue 2682 dev/usb/ehci.c return (ehci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
queue 2916 dev/usb/ehci.c return (ehci_device_bulk_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
queue 3091 dev/usb/ehci.c return (ehci_device_intr_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
queue 2377 dev/usb/ohci.c return (ohci_root_ctrl_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
queue 2716 dev/usb/ohci.c return (ohci_root_intr_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
queue 2773 dev/usb/ohci.c return (ohci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
queue 2849 dev/usb/ohci.c return (ohci_device_bulk_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
queue 2989 dev/usb/ohci.c return (ohci_device_intr_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
queue 3205 dev/usb/ohci.c ohci_device_isoc_start(SIMPLEQ_FIRST(&xfer->pipe->queue));
queue 1782 dev/usb/uhci.c return (uhci_device_bulk_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
queue 1977 dev/usb/uhci.c return (uhci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
queue 2017 dev/usb/uhci.c return (uhci_device_intr_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
queue 2305 dev/usb/uhci.c uhci_device_isoc_start(SIMPLEQ_FIRST(&xfer->pipe->queue));
queue 3088 dev/usb/uhci.c return (uhci_root_ctrl_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
queue 3464 dev/usb/uhci.c return (uhci_root_intr_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
queue 788 dev/usb/usb_subr.c SIMPLEQ_INIT(&p->queue);
queue 139 dev/usb/usbdi.c SIMPLEQ_FOREACH(xfer, &pipe->queue, next) {
queue 251 dev/usb/usbdi.c if (! SIMPLEQ_EMPTY(&pipe->queue))
queue 717 dev/usb/usbdi.c while ((xfer = SIMPLEQ_FIRST(&pipe->queue)) != NULL) {
queue 784 dev/usb/usbdi.c if (xfer != SIMPLEQ_FIRST(&pipe->queue))
queue 786 dev/usb/usbdi.c xfer, SIMPLEQ_FIRST(&pipe->queue));
queue 789 dev/usb/usbdi.c SIMPLEQ_REMOVE_HEAD(&pipe->queue, next);
queue 792 dev/usb/usbdi.c SIMPLEQ_FIRST(&pipe->queue)));
queue 850 dev/usb/usbdi.c SIMPLEQ_INSERT_TAIL(&pipe->queue, xfer, next);
queue 882 dev/usb/usbdi.c xfer = SIMPLEQ_FIRST(&pipe->queue);
queue 175 dev/usb/usbdivar.h SIMPLEQ_HEAD(, usbd_xfer) queue;
queue 673 dev/usb/usbf_subr.c SIMPLEQ_INIT(&p->queue);
queue 694 dev/usb/usbf_subr.c while ((xfer = SIMPLEQ_FIRST(&pipe->queue)) != NULL) {
queue 984 dev/usb/usbf_subr.c SIMPLEQ_INSERT_TAIL(&pipe->queue, xfer, next);
queue 1004 dev/usb/usbf_subr.c xfer = SIMPLEQ_FIRST(&pipe->queue);
queue 1034 dev/usb/usbf_subr.c KASSERT(SIMPLEQ_FIRST(&pipe->queue) == xfer);
queue 1035 dev/usb/usbf_subr.c SIMPLEQ_REMOVE_HEAD(&pipe->queue, next);
queue 123 dev/usb/usbfvar.h SIMPLEQ_HEAD(, usbf_xfer) queue;
queue 167 kern/vfs_bio.c int queue;
queue 186 kern/vfs_bio.c queue = size2cqueue(&qs);
queue 188 kern/vfs_bio.c bqpages[queue] -= btoc(bp->b_bufsize);
queue 197 kern/vfs_bio.c int npages, queue;
queue 207 kern/vfs_bio.c queue = size2cqueue(&size);
queue 212 kern/vfs_bio.c bqpages[queue] += npages;
queue 267 kern/vfs_bio.c int queue, qs;
queue 276 kern/vfs_bio.c queue = size2cqueue(&qs);
queue 293 kern/vfs_bio.c dp = &bufqueues[queue];
queue 757 kern/vfs_bio.c int queue, qs;
queue 784 kern/vfs_bio.c queue = size2cqueue(&qs);
queue 786 kern/vfs_bio.c bqpages[queue] += btoc(bp->b_bufsize);
queue 789 kern/vfs_bio.c binsheadfree(bp, &bufqueues[queue]);
queue 795 kern/vfs_bio.c int queue, qs;
queue 798 kern/vfs_bio.c queue = size2cqueue(&qs);
queue 802 kern/vfs_bio.c bqpages[queue] += btoc(bp->b_bufsize);
queue 805 kern/vfs_bio.c bufq = &bufqueues[queue];
queue 960 kern/vfs_bio.c int s, error, queue, qs;
queue 976 kern/vfs_bio.c queue = size2cqueue(&qs);
queue 984 kern/vfs_bio.c bp = TAILQ_FIRST(&bufqueues[queue]);
queue 985 kern/vfs_bio.c queue++;
queue 986 kern/vfs_bio.c } while (bp == NULL && queue < BQUEUES);
queue 1198 net/route.c struct rttimer *), struct rttimer_queue *queue)
queue 1204 net/route.c rt->rt_rmx.rmx_expire = time_second + queue->rtq_timeout;
queue 1232 net/route.c r->rtt_queue = queue;
queue 1234 net/route.c TAILQ_INSERT_TAIL(&queue->rtq_head, r, rtt_next);