ring              391 dev/ic/acx100.c 	struct acx100_conf_fw_ring ring;
ring              413 dev/ic/acx100.c 	bzero(&ring, sizeof(ring));
ring              414 dev/ic/acx100.c 	ring.fw_ring_size = htole32(ACX100_FW_TXRING_SIZE +
ring              417 dev/ic/acx100.c 	ring.fw_txring_num = 1;
ring              418 dev/ic/acx100.c 	ring.fw_txring_addr = htole32(txring_start);
ring              419 dev/ic/acx100.c 	ring.fw_txring_prio = ACX100_TXRING_PRIO_DEFAULT;
ring              420 dev/ic/acx100.c 	ring.fw_txdesc_num = 0; /* XXX ignored?? */
ring              422 dev/ic/acx100.c 	ring.fw_rxring_addr = htole32(rxring_start);
ring              423 dev/ic/acx100.c 	ring.fw_rxdesc_num = 0; /* XXX ignored?? */
ring              425 dev/ic/acx100.c 	ring.opt = ACX100_RINGOPT_AUTO_RESET;
ring              426 dev/ic/acx100.c 	ACX100_SET_RING_END(&ring, ring_end);
ring              427 dev/ic/acx100.c 	if (acx_set_conf(sc, ACX100_CONF_FW_RING, &ring, sizeof(ring)) != 0) {
ring              252 dev/ic/malo.c  int	malo_alloc_rx_ring(struct malo_softc *sc, struct malo_rx_ring *ring,
ring              254 dev/ic/malo.c  void	malo_reset_rx_ring(struct malo_softc *sc, struct malo_rx_ring *ring);
ring              255 dev/ic/malo.c  void	malo_free_rx_ring(struct malo_softc *sc, struct malo_rx_ring *ring);
ring              256 dev/ic/malo.c  int	malo_alloc_tx_ring(struct malo_softc *sc, struct malo_tx_ring *ring,
ring              258 dev/ic/malo.c  void	malo_reset_tx_ring(struct malo_softc *sc, struct malo_tx_ring *ring);
ring              259 dev/ic/malo.c  void	malo_free_tx_ring(struct malo_softc *sc, struct malo_tx_ring *ring);
ring              553 dev/ic/malo.c  malo_alloc_rx_ring(struct malo_softc *sc, struct malo_rx_ring *ring, int count)
ring              559 dev/ic/malo.c  	ring->count = count;
ring              560 dev/ic/malo.c  	ring->cur = ring->next = 0;
ring              565 dev/ic/malo.c  	    BUS_DMA_NOWAIT, &ring->map);
ring              574 dev/ic/malo.c  	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
ring              581 dev/ic/malo.c  	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
ring              582 dev/ic/malo.c  	    count * sizeof(struct malo_rx_desc), (caddr_t *)&ring->desc,
ring              590 dev/ic/malo.c  	error = bus_dmamap_load(sc->sc_dmat, ring->map, ring->desc,
ring              598 dev/ic/malo.c  	bzero(ring->desc, count * sizeof(struct malo_rx_desc));
ring              599 dev/ic/malo.c  	ring->physaddr = ring->map->dm_segs->ds_addr;
ring              601 dev/ic/malo.c  	ring->data = malloc(count * sizeof (struct malo_rx_data), M_DEVBUF,
ring              603 dev/ic/malo.c  	if (ring->data == NULL) {
ring              613 dev/ic/malo.c  	bzero(ring->data, count * sizeof (struct malo_rx_data));
ring              615 dev/ic/malo.c  		desc = &ring->desc[i];
ring              616 dev/ic/malo.c  		data = &ring->data[i];
ring              652 dev/ic/malo.c  		desc->physnext = htole32(ring->physaddr +
ring              656 dev/ic/malo.c  	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
ring              661 dev/ic/malo.c  fail:	malo_free_rx_ring(sc, ring);
ring              666 dev/ic/malo.c  malo_reset_rx_ring(struct malo_softc *sc, struct malo_rx_ring *ring)
ring              670 dev/ic/malo.c  	for (i = 0; i < ring->count; i++)
ring              671 dev/ic/malo.c  		ring->desc[i].status = 0;
ring              673 dev/ic/malo.c  	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
ring              676 dev/ic/malo.c  	ring->cur = ring->next = 0;
ring              680 dev/ic/malo.c  malo_free_rx_ring(struct malo_softc *sc, struct malo_rx_ring *ring)
ring              685 dev/ic/malo.c  	if (ring->desc != NULL) {
ring              686 dev/ic/malo.c  		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
ring              687 dev/ic/malo.c  		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
ring              688 dev/ic/malo.c  		bus_dmamap_unload(sc->sc_dmat, ring->map);
ring              689 dev/ic/malo.c  		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)ring->desc,
ring              690 dev/ic/malo.c  		    ring->count * sizeof(struct malo_rx_desc));
ring              691 dev/ic/malo.c  		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
ring              694 dev/ic/malo.c  	if (ring->data != NULL) {
ring              695 dev/ic/malo.c  		for (i = 0; i < ring->count; i++) {
ring              696 dev/ic/malo.c  			data = &ring->data[i];
ring              709 dev/ic/malo.c  		free(ring->data, M_DEVBUF);
ring              714 dev/ic/malo.c  malo_alloc_tx_ring(struct malo_softc *sc, struct malo_tx_ring *ring,
ring              719 dev/ic/malo.c  	ring->count = count;
ring              720 dev/ic/malo.c  	ring->queued = 0;
ring              721 dev/ic/malo.c  	ring->cur = ring->next = ring->stat = 0;
ring              725 dev/ic/malo.c  	    count * sizeof(struct malo_tx_desc), 0, BUS_DMA_NOWAIT, &ring->map);
ring              734 dev/ic/malo.c  	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
ring              741 dev/ic/malo.c  	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
ring              742 dev/ic/malo.c  	    count * sizeof(struct malo_tx_desc), (caddr_t *)&ring->desc,
ring              750 dev/ic/malo.c  	error = bus_dmamap_load(sc->sc_dmat, ring->map, ring->desc,
ring              758 dev/ic/malo.c  	memset(ring->desc, 0, count * sizeof(struct malo_tx_desc));
ring              759 dev/ic/malo.c  	ring->physaddr = ring->map->dm_segs->ds_addr;
ring              761 dev/ic/malo.c  	ring->data = malloc(count * sizeof(struct malo_tx_data), M_DEVBUF,
ring              763 dev/ic/malo.c  	if (ring->data == NULL) {
ring              770 dev/ic/malo.c  	memset(ring->data, 0, count * sizeof(struct malo_tx_data));
ring              774 dev/ic/malo.c  		    &ring->data[i].map);
ring              780 dev/ic/malo.c  		ring->desc[i].physnext = htole32(ring->physaddr +
ring              786 dev/ic/malo.c  fail:	malo_free_tx_ring(sc, ring);
ring              791 dev/ic/malo.c  malo_reset_tx_ring(struct malo_softc *sc, struct malo_tx_ring *ring)
ring              797 dev/ic/malo.c  	for (i = 0; i < ring->count; i++) {
ring              798 dev/ic/malo.c  		desc = &ring->desc[i];
ring              799 dev/ic/malo.c  		data = &ring->data[i];
ring              818 dev/ic/malo.c  	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
ring              821 dev/ic/malo.c  	ring->queued = 0;
ring              822 dev/ic/malo.c  	ring->cur = ring->next = ring->stat = 0;
ring              826 dev/ic/malo.c  malo_free_tx_ring(struct malo_softc *sc, struct malo_tx_ring *ring)
ring              831 dev/ic/malo.c  	if (ring->desc != NULL) {
ring              832 dev/ic/malo.c  		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
ring              833 dev/ic/malo.c  		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
ring              834 dev/ic/malo.c  		bus_dmamap_unload(sc->sc_dmat, ring->map);
ring              835 dev/ic/malo.c  		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)ring->desc,
ring              836 dev/ic/malo.c  		    ring->count * sizeof(struct malo_tx_desc));
ring              837 dev/ic/malo.c  		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
ring              840 dev/ic/malo.c  	if (ring->data != NULL) {
ring              841 dev/ic/malo.c  		for (i = 0; i < ring->count; i++) {
ring              842 dev/ic/malo.c  			data = &ring->data[i];
ring              861 dev/ic/malo.c  		free(ring->data, M_DEVBUF);
ring              342 dev/ic/rt2560.c rt2560_alloc_tx_ring(struct rt2560_softc *sc, struct rt2560_tx_ring *ring,
ring              347 dev/ic/rt2560.c 	ring->count = count;
ring              348 dev/ic/rt2560.c 	ring->queued = 0;
ring              349 dev/ic/rt2560.c 	ring->cur = ring->next = 0;
ring              350 dev/ic/rt2560.c 	ring->cur_encrypt = ring->next_encrypt = 0;
ring              353 dev/ic/rt2560.c 	    count * RT2560_TX_DESC_SIZE, 0, BUS_DMA_NOWAIT, &ring->map);
ring              361 dev/ic/rt2560.c 	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
ring              368 dev/ic/rt2560.c 	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
ring              369 dev/ic/rt2560.c 	    count * RT2560_TX_DESC_SIZE, (caddr_t *)&ring->desc,
ring              377 dev/ic/rt2560.c 	error = bus_dmamap_load(sc->sc_dmat, ring->map, ring->desc,
ring              385 dev/ic/rt2560.c 	memset(ring->desc, 0, count * RT2560_TX_DESC_SIZE);
ring              386 dev/ic/rt2560.c 	ring->physaddr = ring->map->dm_segs->ds_addr;
ring              388 dev/ic/rt2560.c 	ring->data = malloc(count * sizeof (struct rt2560_tx_data), M_DEVBUF,
ring              390 dev/ic/rt2560.c 	if (ring->data == NULL) {
ring              397 dev/ic/rt2560.c 	memset(ring->data, 0, count * sizeof (struct rt2560_tx_data));
ring              401 dev/ic/rt2560.c 		    &ring->data[i].map);
ring              411 dev/ic/rt2560.c fail:	rt2560_free_tx_ring(sc, ring);
ring              416 dev/ic/rt2560.c rt2560_reset_tx_ring(struct rt2560_softc *sc, struct rt2560_tx_ring *ring)
ring              420 dev/ic/rt2560.c 	for (i = 0; i < ring->count; i++) {
ring              421 dev/ic/rt2560.c 		struct rt2560_tx_desc *desc = &ring->desc[i];
ring              422 dev/ic/rt2560.c 		struct rt2560_tx_data *data = &ring->data[i];
ring              441 dev/ic/rt2560.c 	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
ring              444 dev/ic/rt2560.c 	ring->queued = 0;
ring              445 dev/ic/rt2560.c 	ring->cur = ring->next = 0;
ring              446 dev/ic/rt2560.c 	ring->cur_encrypt = ring->next_encrypt = 0;
ring              450 dev/ic/rt2560.c rt2560_free_tx_ring(struct rt2560_softc *sc, struct rt2560_tx_ring *ring)
ring              454 dev/ic/rt2560.c 	if (ring->desc != NULL) {
ring              455 dev/ic/rt2560.c 		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
ring              456 dev/ic/rt2560.c 		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
ring              457 dev/ic/rt2560.c 		bus_dmamap_unload(sc->sc_dmat, ring->map);
ring              458 dev/ic/rt2560.c 		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)ring->desc,
ring              459 dev/ic/rt2560.c 		    ring->count * RT2560_TX_DESC_SIZE);
ring              460 dev/ic/rt2560.c 		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
ring              463 dev/ic/rt2560.c 	if (ring->data != NULL) {
ring              464 dev/ic/rt2560.c 		for (i = 0; i < ring->count; i++) {
ring              465 dev/ic/rt2560.c 			struct rt2560_tx_data *data = &ring->data[i];
ring              484 dev/ic/rt2560.c 		free(ring->data, M_DEVBUF);
ring              489 dev/ic/rt2560.c rt2560_alloc_rx_ring(struct rt2560_softc *sc, struct rt2560_rx_ring *ring,
ring              494 dev/ic/rt2560.c 	ring->count = count;
ring              495 dev/ic/rt2560.c 	ring->cur = ring->next = 0;
ring              496 dev/ic/rt2560.c 	ring->cur_decrypt = 0;
ring              499 dev/ic/rt2560.c 	    count * RT2560_RX_DESC_SIZE, 0, BUS_DMA_NOWAIT, &ring->map);
ring              507 dev/ic/rt2560.c 	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
ring              514 dev/ic/rt2560.c 	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
ring              515 dev/ic/rt2560.c 	    count * RT2560_RX_DESC_SIZE, (caddr_t *)&ring->desc,
ring              523 dev/ic/rt2560.c 	error = bus_dmamap_load(sc->sc_dmat, ring->map, ring->desc,
ring              531 dev/ic/rt2560.c 	memset(ring->desc, 0, count * RT2560_RX_DESC_SIZE);
ring              532 dev/ic/rt2560.c 	ring->physaddr = ring->map->dm_segs->ds_addr;
ring              534 dev/ic/rt2560.c 	ring->data = malloc(count * sizeof (struct rt2560_rx_data), M_DEVBUF,
ring              536 dev/ic/rt2560.c 	if (ring->data == NULL) {
ring              546 dev/ic/rt2560.c 	memset(ring->data, 0, count * sizeof (struct rt2560_rx_data));
ring              586 dev/ic/rt2560.c 	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
ring              591 dev/ic/rt2560.c fail:	rt2560_free_rx_ring(sc, ring);
ring              596 dev/ic/rt2560.c rt2560_reset_rx_ring(struct rt2560_softc *sc, struct rt2560_rx_ring *ring)
ring              600 dev/ic/rt2560.c 	for (i = 0; i < ring->count; i++) {
ring              601 dev/ic/rt2560.c 		ring->desc[i].flags = htole32(RT2560_RX_BUSY);
ring              602 dev/ic/rt2560.c 		ring->data[i].drop = 0;
ring              605 dev/ic/rt2560.c 	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
ring              608 dev/ic/rt2560.c 	ring->cur = ring->next = 0;
ring              609 dev/ic/rt2560.c 	ring->cur_decrypt = 0;
ring              613 dev/ic/rt2560.c rt2560_free_rx_ring(struct rt2560_softc *sc, struct rt2560_rx_ring *ring)
ring              617 dev/ic/rt2560.c 	if (ring->desc != NULL) {
ring              618 dev/ic/rt2560.c 		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
ring              619 dev/ic/rt2560.c 		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
ring              620 dev/ic/rt2560.c 		bus_dmamap_unload(sc->sc_dmat, ring->map);
ring              621 dev/ic/rt2560.c 		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)ring->desc,
ring              622 dev/ic/rt2560.c 		    ring->count * RT2560_RX_DESC_SIZE);
ring              623 dev/ic/rt2560.c 		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
ring              626 dev/ic/rt2560.c 	if (ring->data != NULL) {
ring              627 dev/ic/rt2560.c 		for (i = 0; i < ring->count; i++) {
ring              628 dev/ic/rt2560.c 			struct rt2560_rx_data *data = &ring->data[i];
ring              641 dev/ic/rt2560.c 		free(ring->data, M_DEVBUF);
ring              376 dev/ic/rt2661.c rt2661_alloc_tx_ring(struct rt2661_softc *sc, struct rt2661_tx_ring *ring,
ring              381 dev/ic/rt2661.c 	ring->count = count;
ring              382 dev/ic/rt2661.c 	ring->queued = 0;
ring              383 dev/ic/rt2661.c 	ring->cur = ring->next = ring->stat = 0;
ring              386 dev/ic/rt2661.c 	    count * RT2661_TX_DESC_SIZE, 0, BUS_DMA_NOWAIT, &ring->map);
ring              394 dev/ic/rt2661.c 	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
ring              401 dev/ic/rt2661.c 	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
ring              402 dev/ic/rt2661.c 	    count * RT2661_TX_DESC_SIZE, (caddr_t *)&ring->desc,
ring              410 dev/ic/rt2661.c 	error = bus_dmamap_load(sc->sc_dmat, ring->map, ring->desc,
ring              418 dev/ic/rt2661.c 	memset(ring->desc, 0, count * RT2661_TX_DESC_SIZE);
ring              419 dev/ic/rt2661.c 	ring->physaddr = ring->map->dm_segs->ds_addr;
ring              421 dev/ic/rt2661.c 	ring->data = malloc(count * sizeof (struct rt2661_tx_data), M_DEVBUF,
ring              423 dev/ic/rt2661.c 	if (ring->data == NULL) {
ring              430 dev/ic/rt2661.c 	memset(ring->data, 0, count * sizeof (struct rt2661_tx_data));
ring              434 dev/ic/rt2661.c 		    &ring->data[i].map);
ring              444 dev/ic/rt2661.c fail:	rt2661_free_tx_ring(sc, ring);
ring              449 dev/ic/rt2661.c rt2661_reset_tx_ring(struct rt2661_softc *sc, struct rt2661_tx_ring *ring)
ring              453 dev/ic/rt2661.c 	for (i = 0; i < ring->count; i++) {
ring              454 dev/ic/rt2661.c 		struct rt2661_tx_desc *desc = &ring->desc[i];
ring              455 dev/ic/rt2661.c 		struct rt2661_tx_data *data = &ring->data[i];
ring              474 dev/ic/rt2661.c 	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
ring              477 dev/ic/rt2661.c 	ring->queued = 0;
ring              478 dev/ic/rt2661.c 	ring->cur = ring->next = ring->stat = 0;
ring              482 dev/ic/rt2661.c rt2661_free_tx_ring(struct rt2661_softc *sc, struct rt2661_tx_ring *ring)
ring              486 dev/ic/rt2661.c 	if (ring->desc != NULL) {
ring              487 dev/ic/rt2661.c 		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
ring              488 dev/ic/rt2661.c 		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
ring              489 dev/ic/rt2661.c 		bus_dmamap_unload(sc->sc_dmat, ring->map);
ring              490 dev/ic/rt2661.c 		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)ring->desc,
ring              491 dev/ic/rt2661.c 		    ring->count * RT2661_TX_DESC_SIZE);
ring              492 dev/ic/rt2661.c 		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
ring              495 dev/ic/rt2661.c 	if (ring->data != NULL) {
ring              496 dev/ic/rt2661.c 		for (i = 0; i < ring->count; i++) {
ring              497 dev/ic/rt2661.c 			struct rt2661_tx_data *data = &ring->data[i];
ring              515 dev/ic/rt2661.c 		free(ring->data, M_DEVBUF);
ring              520 dev/ic/rt2661.c rt2661_alloc_rx_ring(struct rt2661_softc *sc, struct rt2661_rx_ring *ring,
ring              525 dev/ic/rt2661.c 	ring->count = count;
ring              526 dev/ic/rt2661.c 	ring->cur = ring->next = 0;
ring              529 dev/ic/rt2661.c 	    count * RT2661_RX_DESC_SIZE, 0, BUS_DMA_NOWAIT, &ring->map);
ring              537 dev/ic/rt2661.c 	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
ring              544 dev/ic/rt2661.c 	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
ring              545 dev/ic/rt2661.c 	    count * RT2661_RX_DESC_SIZE, (caddr_t *)&ring->desc,
ring              553 dev/ic/rt2661.c 	error = bus_dmamap_load(sc->sc_dmat, ring->map, ring->desc,
ring              561 dev/ic/rt2661.c 	memset(ring->desc, 0, count * RT2661_RX_DESC_SIZE);
ring              562 dev/ic/rt2661.c 	ring->physaddr = ring->map->dm_segs->ds_addr;
ring              564 dev/ic/rt2661.c 	ring->data = malloc(count * sizeof (struct rt2661_rx_data), M_DEVBUF,
ring              566 dev/ic/rt2661.c 	if (ring->data == NULL) {
ring              576 dev/ic/rt2661.c 	memset(ring->data, 0, count * sizeof (struct rt2661_rx_data));
ring              616 dev/ic/rt2661.c 	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
ring              621 dev/ic/rt2661.c fail:	rt2661_free_rx_ring(sc, ring);
ring              626 dev/ic/rt2661.c rt2661_reset_rx_ring(struct rt2661_softc *sc, struct rt2661_rx_ring *ring)
ring              630 dev/ic/rt2661.c 	for (i = 0; i < ring->count; i++)
ring              631 dev/ic/rt2661.c 		ring->desc[i].flags = htole32(RT2661_RX_BUSY);
ring              633 dev/ic/rt2661.c 	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
ring              636 dev/ic/rt2661.c 	ring->cur = ring->next = 0;
ring              640 dev/ic/rt2661.c rt2661_free_rx_ring(struct rt2661_softc *sc, struct rt2661_rx_ring *ring)
ring              644 dev/ic/rt2661.c 	if (ring->desc != NULL) {
ring              645 dev/ic/rt2661.c 		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
ring              646 dev/ic/rt2661.c 		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
ring              647 dev/ic/rt2661.c 		bus_dmamap_unload(sc->sc_dmat, ring->map);
ring              648 dev/ic/rt2661.c 		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)ring->desc,
ring              649 dev/ic/rt2661.c 		    ring->count * RT2661_RX_DESC_SIZE);
ring              650 dev/ic/rt2661.c 		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
ring              653 dev/ic/rt2661.c 	if (ring->data != NULL) {
ring              654 dev/ic/rt2661.c 		for (i = 0; i < ring->count; i++) {
ring              655 dev/ic/rt2661.c 			struct rt2661_rx_data *data = &ring->data[i];
ring              668 dev/ic/rt2661.c 		free(ring->data, M_DEVBUF);
ring              252 dev/ic/rtwvar.h #define RTW_DESC_OFFSET(ring, i)	offsetof(struct rtw_descs, ring[i])
ring              253 dev/ic/rtwvar.h #define RTW_RING_OFFSET(ring)		RTW_DESC_OFFSET(ring, 0)
ring              254 dev/ic/rtwvar.h #define RTW_RING_BASE(sc, ring)		((sc)->sc_desc_physaddr + \
ring              255 dev/ic/rtwvar.h 					 RTW_RING_OFFSET(ring))
ring              390 dev/pci/if_iwi.c iwi_alloc_cmd_ring(struct iwi_softc *sc, struct iwi_cmd_ring *ring)
ring              394 dev/pci/if_iwi.c 	ring->queued = 0;
ring              395 dev/pci/if_iwi.c 	ring->cur = ring->next = 0;
ring              400 dev/pci/if_iwi.c 	    BUS_DMA_NOWAIT, &ring->map);
ring              409 dev/pci/if_iwi.c 	    &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
ring              416 dev/pci/if_iwi.c 	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
ring              418 dev/pci/if_iwi.c 	    (caddr_t *)&ring->desc, BUS_DMA_NOWAIT);
ring              425 dev/pci/if_iwi.c 	error = bus_dmamap_load(sc->sc_dmat, ring->map, ring->desc,
ring              434 dev/pci/if_iwi.c 	bzero(ring->desc, sizeof (struct iwi_cmd_desc) * IWI_CMD_RING_COUNT);
ring              437 dev/pci/if_iwi.c fail:	iwi_free_cmd_ring(sc, ring);
ring              442 dev/pci/if_iwi.c iwi_reset_cmd_ring(struct iwi_softc *sc, struct iwi_cmd_ring *ring)
ring              444 dev/pci/if_iwi.c 	ring->queued = 0;
ring              445 dev/pci/if_iwi.c 	ring->cur = ring->next = 0;
ring              449 dev/pci/if_iwi.c iwi_free_cmd_ring(struct iwi_softc *sc, struct iwi_cmd_ring *ring)
ring              451 dev/pci/if_iwi.c 	if (ring->map != NULL) {
ring              452 dev/pci/if_iwi.c 		if (ring->desc != NULL) {
ring              453 dev/pci/if_iwi.c 			bus_dmamap_unload(sc->sc_dmat, ring->map);
ring              454 dev/pci/if_iwi.c 			bus_dmamem_unmap(sc->sc_dmat, (caddr_t)ring->desc,
ring              456 dev/pci/if_iwi.c 			bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
ring              458 dev/pci/if_iwi.c 		bus_dmamap_destroy(sc->sc_dmat, ring->map);
ring              463 dev/pci/if_iwi.c iwi_alloc_tx_ring(struct iwi_softc *sc, struct iwi_tx_ring *ring,
ring              469 dev/pci/if_iwi.c 	ring->queued = 0;
ring              470 dev/pci/if_iwi.c 	ring->cur = ring->next = 0;
ring              471 dev/pci/if_iwi.c 	ring->csr_ridx = csr_ridx;
ring              472 dev/pci/if_iwi.c 	ring->csr_widx = csr_widx;
ring              477 dev/pci/if_iwi.c 	    &ring->map);
ring              486 dev/pci/if_iwi.c 	    &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
ring              493 dev/pci/if_iwi.c 	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
ring              495 dev/pci/if_iwi.c 	    (caddr_t *)&ring->desc, BUS_DMA_NOWAIT);
ring              502 dev/pci/if_iwi.c 	error = bus_dmamap_load(sc->sc_dmat, ring->map, ring->desc,
ring              511 dev/pci/if_iwi.c 	bzero(ring->desc, sizeof (struct iwi_tx_desc) * IWI_TX_RING_COUNT);
ring              514 dev/pci/if_iwi.c 		data = &ring->data[i];
ring              527 dev/pci/if_iwi.c fail:	iwi_free_tx_ring(sc, ring);
ring              532 dev/pci/if_iwi.c iwi_reset_tx_ring(struct iwi_softc *sc, struct iwi_tx_ring *ring)
ring              538 dev/pci/if_iwi.c 		data = &ring->data[i];
ring              547 dev/pci/if_iwi.c 	ring->queued = 0;
ring              548 dev/pci/if_iwi.c 	ring->cur = ring->next = 0;
ring              552 dev/pci/if_iwi.c iwi_free_tx_ring(struct iwi_softc *sc, struct iwi_tx_ring *ring)
ring              557 dev/pci/if_iwi.c 	if (ring->map != NULL) {
ring              558 dev/pci/if_iwi.c 		if (ring->desc != NULL) {
ring              559 dev/pci/if_iwi.c 			bus_dmamap_unload(sc->sc_dmat, ring->map);
ring              560 dev/pci/if_iwi.c 			bus_dmamem_unmap(sc->sc_dmat, (caddr_t)ring->desc,
ring              562 dev/pci/if_iwi.c 			bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
ring              564 dev/pci/if_iwi.c 		bus_dmamap_destroy(sc->sc_dmat, ring->map);
ring              568 dev/pci/if_iwi.c 		data = &ring->data[i];
ring              579 dev/pci/if_iwi.c iwi_alloc_rx_ring(struct iwi_softc *sc, struct iwi_rx_ring *ring)
ring              584 dev/pci/if_iwi.c 	ring->cur = 0;
ring              628 dev/pci/if_iwi.c fail:	iwi_free_rx_ring(sc, ring);
ring              633 dev/pci/if_iwi.c iwi_reset_rx_ring(struct iwi_softc *sc, struct iwi_rx_ring *ring)
ring              635 dev/pci/if_iwi.c 	ring->cur = 0;
ring              639 dev/pci/if_iwi.c iwi_free_rx_ring(struct iwi_softc *sc, struct iwi_rx_ring *ring)
ring             1179 dev/pci/if_nfe.c nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
ring             1190 dev/pci/if_nfe.c 		desc = (void **)&ring->desc64;
ring             1193 dev/pci/if_nfe.c 		desc = (void **)&ring->desc32;
ring             1197 dev/pci/if_nfe.c 	ring->cur = ring->next = 0;
ring             1198 dev/pci/if_nfe.c 	ring->bufsz = MCLBYTES;
ring             1201 dev/pci/if_nfe.c 	    NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
ring             1209 dev/pci/if_nfe.c 	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
ring             1216 dev/pci/if_nfe.c 	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
ring             1224 dev/pci/if_nfe.c 	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
ring             1233 dev/pci/if_nfe.c 	ring->physaddr = ring->map->dm_segs[0].ds_addr;
ring             1236 dev/pci/if_nfe.c 		ring->bufsz = NFE_JBYTES;
ring             1311 dev/pci/if_nfe.c 	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
ring             1316 dev/pci/if_nfe.c fail:	nfe_free_rx_ring(sc, ring);
ring             1321 dev/pci/if_nfe.c nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
ring             1327 dev/pci/if_nfe.c 			ring->desc64[i].length = htole16(ring->bufsz);
ring             1328 dev/pci/if_nfe.c 			ring->desc64[i].flags = htole16(NFE_RX_READY);
ring             1330 dev/pci/if_nfe.c 			ring->desc32[i].length = htole16(ring->bufsz);
ring             1331 dev/pci/if_nfe.c 			ring->desc32[i].flags = htole16(NFE_RX_READY);
ring             1335 dev/pci/if_nfe.c 	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
ring             1338 dev/pci/if_nfe.c 	ring->cur = ring->next = 0;
ring             1342 dev/pci/if_nfe.c nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
ring             1349 dev/pci/if_nfe.c 		desc = ring->desc64;
ring             1352 dev/pci/if_nfe.c 		desc = ring->desc32;
ring             1357 dev/pci/if_nfe.c 		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
ring             1358 dev/pci/if_nfe.c 		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
ring             1359 dev/pci/if_nfe.c 		bus_dmamap_unload(sc->sc_dmat, ring->map);
ring             1362 dev/pci/if_nfe.c 		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
ring             1366 dev/pci/if_nfe.c 		data = &ring->data[i];
ring             1419 dev/pci/if_nfe.c 	struct nfe_rx_ring *ring = &sc->rxq;
ring             1429 dev/pci/if_nfe.c 	    NFE_JPOOL_SIZE, 0, BUS_DMA_NOWAIT, &ring->jmap);
ring             1437 dev/pci/if_nfe.c 	    &ring->jseg, 1, &nsegs, BUS_DMA_NOWAIT);
ring             1444 dev/pci/if_nfe.c 	error = bus_dmamem_map(sc->sc_dmat, &ring->jseg, nsegs, NFE_JPOOL_SIZE,
ring             1445 dev/pci/if_nfe.c 	    &ring->jpool, BUS_DMA_NOWAIT);
ring             1452 dev/pci/if_nfe.c 	error = bus_dmamap_load(sc->sc_dmat, ring->jmap, ring->jpool,
ring             1461 dev/pci/if_nfe.c 	SLIST_INIT(&ring->jfreelist);
ring             1463 dev/pci/if_nfe.c 	buf = ring->jpool;
ring             1464 dev/pci/if_nfe.c 	physaddr = ring->jmap->dm_segs[0].ds_addr;
ring             1466 dev/pci/if_nfe.c 		jbuf = &ring->jbuf[i];
ring             1471 dev/pci/if_nfe.c 		SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
ring             1486 dev/pci/if_nfe.c 	struct nfe_rx_ring *ring = &sc->rxq;
ring             1488 dev/pci/if_nfe.c 	if (ring->jmap != NULL) {
ring             1489 dev/pci/if_nfe.c 		bus_dmamap_sync(sc->sc_dmat, ring->jmap, 0,
ring             1490 dev/pci/if_nfe.c 		    ring->jmap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
ring             1491 dev/pci/if_nfe.c 		bus_dmamap_unload(sc->sc_dmat, ring->jmap);
ring             1492 dev/pci/if_nfe.c 		bus_dmamap_destroy(sc->sc_dmat, ring->jmap);
ring             1494 dev/pci/if_nfe.c 	if (ring->jpool != NULL) {
ring             1495 dev/pci/if_nfe.c 		bus_dmamem_unmap(sc->sc_dmat, ring->jpool, NFE_JPOOL_SIZE);
ring             1496 dev/pci/if_nfe.c 		bus_dmamem_free(sc->sc_dmat, &ring->jseg, 1);
ring             1501 dev/pci/if_nfe.c nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
ring             1508 dev/pci/if_nfe.c 		desc = (void **)&ring->desc64;
ring             1511 dev/pci/if_nfe.c 		desc = (void **)&ring->desc32;
ring             1515 dev/pci/if_nfe.c 	ring->queued = 0;
ring             1516 dev/pci/if_nfe.c 	ring->cur = ring->next = 0;
ring             1519 dev/pci/if_nfe.c 	    NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
ring             1528 dev/pci/if_nfe.c 	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
ring             1535 dev/pci/if_nfe.c 	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
ring             1543 dev/pci/if_nfe.c 	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
ring             1552 dev/pci/if_nfe.c 	ring->physaddr = ring->map->dm_segs[0].ds_addr;
ring             1557 dev/pci/if_nfe.c 		    &ring->data[i].map);
ring             1567 dev/pci/if_nfe.c fail:	nfe_free_tx_ring(sc, ring);
ring             1572 dev/pci/if_nfe.c nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
ring             1579 dev/pci/if_nfe.c 			ring->desc64[i].flags = 0;
ring             1581 dev/pci/if_nfe.c 			ring->desc32[i].flags = 0;
ring             1583 dev/pci/if_nfe.c 		data = &ring->data[i];
ring             1594 dev/pci/if_nfe.c 	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
ring             1597 dev/pci/if_nfe.c 	ring->queued = 0;
ring             1598 dev/pci/if_nfe.c 	ring->cur = ring->next = 0;
ring             1602 dev/pci/if_nfe.c nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
ring             1609 dev/pci/if_nfe.c 		desc = ring->desc64;
ring             1612 dev/pci/if_nfe.c 		desc = ring->desc32;
ring             1617 dev/pci/if_nfe.c 		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
ring             1618 dev/pci/if_nfe.c 		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
ring             1619 dev/pci/if_nfe.c 		bus_dmamap_unload(sc->sc_dmat, ring->map);
ring             1622 dev/pci/if_nfe.c 		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
ring             1626 dev/pci/if_nfe.c 		data = &ring->data[i];
ring             1638 dev/pci/if_nfe.c 		data = &ring->data[i];
ring             1052 dev/pci/if_nxe.c 	struct nxe_ctx_ring		*ring;
ring             1104 dev/pci/if_nxe.c 		ring = &ctx->ctx_rx_rings[i];
ring             1110 dev/pci/if_nxe.c 		ring->r_addr = htole64(NXE_DMA_DVA(nr->nr_dmamem));
ring             1111 dev/pci/if_nxe.c 		ring->r_size = htole32(nr->nr_nentries);
ring              500 dev/pci/if_wpi.c 	struct wpi_rx_ring *ring = &sc->rxq;
ring              504 dev/pci/if_wpi.c 	error = wpi_dma_contig_alloc(sc->sc_dmat, &ring->buf_dma, NULL,
ring              513 dev/pci/if_wpi.c 	SLIST_INIT(&ring->freelist);
ring              515 dev/pci/if_wpi.c 		struct wpi_rbuf *rbuf = &ring->rbuf[i];
ring              518 dev/pci/if_wpi.c 		rbuf->vaddr = ring->buf_dma.vaddr + i * WPI_RBUF_SIZE;
ring              519 dev/pci/if_wpi.c 		rbuf->paddr = ring->buf_dma.paddr + i * WPI_RBUF_SIZE;
ring              521 dev/pci/if_wpi.c 		SLIST_INSERT_HEAD(&ring->freelist, rbuf, next);
ring              533 dev/pci/if_wpi.c wpi_alloc_rx_ring(struct wpi_softc *sc, struct wpi_rx_ring *ring)
ring              537 dev/pci/if_wpi.c 	ring->cur = 0;
ring              539 dev/pci/if_wpi.c 	error = wpi_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma,
ring              540 dev/pci/if_wpi.c 	    (void **)&ring->desc, WPI_RX_RING_COUNT * sizeof (uint32_t),
ring              552 dev/pci/if_wpi.c 		struct wpi_rx_data *data = &ring->data[i];
ring              574 dev/pci/if_wpi.c 		ring->desc[i] = htole32(rbuf->paddr);
ring              579 dev/pci/if_wpi.c fail:	wpi_free_rx_ring(sc, ring);
ring              584 dev/pci/if_wpi.c wpi_reset_rx_ring(struct wpi_softc *sc, struct wpi_rx_ring *ring)
ring              602 dev/pci/if_wpi.c 	ring->cur = 0;
ring              606 dev/pci/if_wpi.c wpi_free_rx_ring(struct wpi_softc *sc, struct wpi_rx_ring *ring)
ring              610 dev/pci/if_wpi.c 	wpi_dma_contig_free(&ring->desc_dma);
ring              613 dev/pci/if_wpi.c 		if (ring->data[i].m != NULL)
ring              614 dev/pci/if_wpi.c 			m_freem(ring->data[i].m);
ring              619 dev/pci/if_wpi.c wpi_alloc_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring, int count,
ring              624 dev/pci/if_wpi.c 	ring->qid = qid;
ring              625 dev/pci/if_wpi.c 	ring->count = count;
ring              626 dev/pci/if_wpi.c 	ring->queued = 0;
ring              627 dev/pci/if_wpi.c 	ring->cur = 0;
ring              629 dev/pci/if_wpi.c 	error = wpi_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma,
ring              630 dev/pci/if_wpi.c 	    (void **)&ring->desc, count * sizeof (struct wpi_tx_desc),
ring              639 dev/pci/if_wpi.c 	sc->shared->txbase[qid] = htole32(ring->desc_dma.paddr);
ring              641 dev/pci/if_wpi.c 	error = wpi_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma,
ring              642 dev/pci/if_wpi.c 	    (void **)&ring->cmd, count * sizeof (struct wpi_tx_cmd), 4,
ring              650 dev/pci/if_wpi.c 	ring->data = malloc(count * sizeof (struct wpi_tx_data), M_DEVBUF,
ring              652 dev/pci/if_wpi.c 	if (ring->data == NULL) {
ring              658 dev/pci/if_wpi.c 	memset(ring->data, 0, count * sizeof (struct wpi_tx_data));
ring              661 dev/pci/if_wpi.c 		struct wpi_tx_data *data = &ring->data[i];
ring              675 dev/pci/if_wpi.c fail:	wpi_free_tx_ring(sc, ring);
ring              680 dev/pci/if_wpi.c wpi_reset_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring)
ring              686 dev/pci/if_wpi.c 	WPI_WRITE(sc, WPI_TX_CONFIG(ring->qid), 0);
ring              688 dev/pci/if_wpi.c 		if (WPI_READ(sc, WPI_TX_STATUS) & WPI_TX_IDLE(ring->qid))
ring              695 dev/pci/if_wpi.c 		    sc->sc_dev.dv_xname, ring->qid);
ring              700 dev/pci/if_wpi.c 	for (i = 0; i < ring->count; i++) {
ring              701 dev/pci/if_wpi.c 		struct wpi_tx_data *data = &ring->data[i];
ring              710 dev/pci/if_wpi.c 	ring->queued = 0;
ring              711 dev/pci/if_wpi.c 	ring->cur = 0;
ring              715 dev/pci/if_wpi.c wpi_free_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring)
ring              719 dev/pci/if_wpi.c 	wpi_dma_contig_free(&ring->desc_dma);
ring              720 dev/pci/if_wpi.c 	wpi_dma_contig_free(&ring->cmd_dma);
ring              722 dev/pci/if_wpi.c 	if (ring->data != NULL) {
ring              723 dev/pci/if_wpi.c 		for (i = 0; i < ring->count; i++) {
ring              724 dev/pci/if_wpi.c 			struct wpi_tx_data *data = &ring->data[i];
ring              731 dev/pci/if_wpi.c 		free(ring->data, M_DEVBUF);
ring             1200 dev/pci/if_wpi.c 	struct wpi_rx_ring *ring = &sc->rxq;
ring             1222 dev/pci/if_wpi.c 	    "chan=%d tstamp=%llu\n", ring->cur, letoh32(desc->len),
ring             1254 dev/pci/if_wpi.c 	ring->desc[ring->cur] = htole32(rbuf->paddr);
ring             1322 dev/pci/if_wpi.c 	struct wpi_tx_ring *ring = &sc->txq[desc->qid & 0x3];
ring             1323 dev/pci/if_wpi.c 	struct wpi_tx_data *data = &ring->data[desc->idx];
ring             1354 dev/pci/if_wpi.c 	ring->queued--;
ring             1364 dev/pci/if_wpi.c 	struct wpi_tx_ring *ring = &sc->cmdq;
ring             1370 dev/pci/if_wpi.c 	data = &ring->data[desc->idx];
ring             1379 dev/pci/if_wpi.c 	wakeup(&ring->cmd[desc->idx]);
ring             1560 dev/pci/if_wpi.c 	struct wpi_tx_ring *ring = &sc->txq[ac];
ring             1569 dev/pci/if_wpi.c 	desc = &ring->desc[ring->cur];
ring             1570 dev/pci/if_wpi.c 	data = &ring->data[ring->cur];
ring             1610 dev/pci/if_wpi.c 	cmd = &ring->cmd[ring->cur];
ring             1613 dev/pci/if_wpi.c 	cmd->qid = ring->qid;
ring             1614 dev/pci/if_wpi.c 	cmd->idx = ring->cur;
ring             1738 dev/pci/if_wpi.c 	    ring->qid, ring->cur, m0->m_pkthdr.len, data->map->dm_nsegs));
ring             1743 dev/pci/if_wpi.c 	desc->segs[0].addr = htole32(ring->cmd_dma.paddr +
ring             1744 dev/pci/if_wpi.c 	    ring->cur * sizeof (struct wpi_tx_cmd));
ring             1753 dev/pci/if_wpi.c 	ring->queued++;
ring             1756 dev/pci/if_wpi.c 	ring->cur = (ring->cur + 1) % WPI_TX_RING_COUNT;
ring             1757 dev/pci/if_wpi.c 	WPI_WRITE(sc, WPI_TX_WIDX, ring->qid << 8 | ring->cur);
ring             2027 dev/pci/if_wpi.c 	struct wpi_tx_ring *ring = &sc->cmdq;
ring             2033 dev/pci/if_wpi.c 	desc = &ring->desc[ring->cur];
ring             2034 dev/pci/if_wpi.c 	cmd = &ring->cmd[ring->cur];
ring             2038 dev/pci/if_wpi.c 	cmd->qid = ring->qid;
ring             2039 dev/pci/if_wpi.c 	cmd->idx = ring->cur;
ring             2043 dev/pci/if_wpi.c 	desc->segs[0].addr = htole32(ring->cmd_dma.paddr +
ring             2044 dev/pci/if_wpi.c 	    ring->cur * sizeof (struct wpi_tx_cmd));
ring             2048 dev/pci/if_wpi.c 	ring->cur = (ring->cur + 1) % WPI_CMD_RING_COUNT;
ring             2049 dev/pci/if_wpi.c 	WPI_WRITE(sc, WPI_TX_WIDX, ring->qid << 8 | ring->cur);
ring             2277 dev/pci/if_wpi.c 	struct wpi_tx_ring *ring = &sc->cmdq;
ring             2285 dev/pci/if_wpi.c 	desc = &ring->desc[ring->cur];
ring             2286 dev/pci/if_wpi.c 	data = &ring->data[ring->cur];
ring             2295 dev/pci/if_wpi.c 	cmd = &ring->cmd[ring->cur];
ring             2298 dev/pci/if_wpi.c 	cmd->qid = ring->qid;
ring             2299 dev/pci/if_wpi.c 	cmd->idx = ring->cur;
ring             2329 dev/pci/if_wpi.c 	desc->segs[0].addr = htole32(ring->cmd_dma.paddr +
ring             2330 dev/pci/if_wpi.c 	    ring->cur * sizeof (struct wpi_tx_cmd));
ring             2336 dev/pci/if_wpi.c 	ring->cur = (ring->cur + 1) % WPI_CMD_RING_COUNT;
ring             2337 dev/pci/if_wpi.c 	WPI_WRITE(sc, WPI_TX_WIDX, ring->qid << 8 | ring->cur);
ring             2415 dev/pci/if_wpi.c 	struct wpi_tx_ring *ring = &sc->cmdq;
ring             2428 dev/pci/if_wpi.c 	desc = &ring->desc[ring->cur];
ring             2429 dev/pci/if_wpi.c 	data = &ring->data[ring->cur];
ring             2449 dev/pci/if_wpi.c 	cmd->qid = ring->qid;
ring             2450 dev/pci/if_wpi.c 	cmd->idx = ring->cur;
ring             2560 dev/pci/if_wpi.c 	ring->cur = (ring->cur + 1) % WPI_CMD_RING_COUNT;
ring             2561 dev/pci/if_wpi.c 	WPI_WRITE(sc, WPI_TX_WIDX, ring->qid << 8 | ring->cur);