map 403 arch/i386/i386/ioapic.c struct mp_intr_map *map;
map 406 arch/i386/i386/ioapic.c map = pp->ip_map;
map 407 arch/i386/i386/ioapic.c redlo = (map == NULL) ? IOAPIC_REDLO_MASK : map->redir;
map 437 arch/i386/i386/ioapic.c if (map != NULL && ((map->flags & 3) == MPS_INTPO_DEF)) {
map 3651 arch/i386/i386/machdep.c struct i386_bus_dmamap *map;
map 3674 arch/i386/i386/machdep.c map = (struct i386_bus_dmamap *)mapstore;
map 3675 arch/i386/i386/machdep.c map->_dm_size = size;
map 3676 arch/i386/i386/machdep.c map->_dm_segcnt = nsegments;
map 3677 arch/i386/i386/machdep.c map->_dm_maxsegsz = maxsegsz;
map 3678 arch/i386/i386/machdep.c map->_dm_boundary = boundary;
map 3679 arch/i386/i386/machdep.c map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
map 3680 arch/i386/i386/machdep.c map->dm_mapsize = 0; /* no valid mappings */
map 3681 arch/i386/i386/machdep.c map->dm_nsegs = 0;
map 3683 arch/i386/i386/machdep.c *dmamp = map;
map 3692 arch/i386/i386/machdep.c _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
map 3695 arch/i386/i386/machdep.c free(map, M_DEVBUF);
map 3703 arch/i386/i386/machdep.c _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
map 3712 arch/i386/i386/machdep.c map->dm_mapsize = 0;
map 3713 arch/i386/i386/machdep.c map->dm_nsegs = 0;
map 3715 arch/i386/i386/machdep.c if (buflen > map->_dm_size)
map 3719 arch/i386/i386/machdep.c error = _bus_dmamap_load_buffer(t, map, buf, buflen, p, flags,
map 3722 arch/i386/i386/machdep.c map->dm_mapsize = buflen;
map 3723 arch/i386/i386/machdep.c map->dm_nsegs = seg + 1;
map 3732 arch/i386/i386/machdep.c _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
map 3742 arch/i386/i386/machdep.c map->dm_mapsize = 0;
map 3743 arch/i386/i386/machdep.c map->dm_nsegs = 0;
map 3750 arch/i386/i386/machdep.c if (m0->m_pkthdr.len > map->_dm_size)
map 3759 arch/i386/i386/machdep.c error = _bus_dmamap_load_buffer(t, map, m->m_data, m->m_len,
map 3764 arch/i386/i386/machdep.c map->dm_mapsize = m0->m_pkthdr.len;
map 3765 arch/i386/i386/machdep.c map->dm_nsegs = seg + 1;
map 3774 arch/i386/i386/machdep.c _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
map 3787 arch/i386/i386/machdep.c map->dm_mapsize = 0;
map 3788 arch/i386/i386/machdep.c map->dm_nsegs = 0;
map 3793 arch/i386/i386/machdep.c if (resid > map->_dm_size)
map 3815 arch/i386/i386/machdep.c error = _bus_dmamap_load_buffer(t, map, addr, minlen,
map 3822 arch/i386/i386/machdep.c map->dm_mapsize = uio->uio_resid;
map 3823 arch/i386/i386/machdep.c map->dm_nsegs = seg + 1;
map 3833 arch/i386/i386/machdep.c _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
map 3836 arch/i386/i386/machdep.c if (nsegs > map->_dm_segcnt || size > map->_dm_size)
map 3842 arch/i386/i386/machdep.c if (map->_dm_boundary) {
map 3843 arch/i386/i386/machdep.c bus_addr_t bmask = ~(map->_dm_boundary - 1);
map 3847 arch/i386/i386/machdep.c if (segs[i].ds_len > map->_dm_maxsegsz)
map 3855 arch/i386/i386/machdep.c bcopy(segs, map->dm_segs, nsegs * sizeof(*segs));
map 3856 arch/i386/i386/machdep.c map->dm_nsegs = nsegs;
map 3865 arch/i386/i386/machdep.c _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
map 3872 arch/i386/i386/machdep.c map->dm_mapsize = 0;
map 3873 arch/i386/i386/machdep.c map->dm_nsegs = 0;
map 4012 arch/i386/i386/machdep.c _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
map 4028 arch/i386/i386/machdep.c bmask = ~(map->_dm_boundary - 1);
map 4046 arch/i386/i386/machdep.c if (map->_dm_boundary > 0) {
map 4047 arch/i386/i386/machdep.c baddr = (curaddr + map->_dm_boundary) & bmask;
map 4057 arch/i386/i386/machdep.c map->dm_segs[seg].ds_addr = curaddr;
map 4058 arch/i386/i386/machdep.c map->dm_segs[seg].ds_len = sgsize;
map 4062 arch/i386/i386/machdep.c (map->dm_segs[seg].ds_len + sgsize) <=
map 4063 arch/i386/i386/machdep.c map->_dm_maxsegsz &&
map 4064 arch/i386/i386/machdep.c (map->_dm_boundary == 0 ||
map 4065 arch/i386/i386/machdep.c (map->dm_segs[seg].ds_addr & bmask) ==
map 4067 arch/i386/i386/machdep.c map->dm_segs[seg].ds_len += sgsize;
map 4069 arch/i386/i386/machdep.c if (++seg >= map->_dm_segcnt)
map 4071 arch/i386/i386/machdep.c map->dm_segs[seg].ds_addr = curaddr;
map 4072 arch/i386/i386/machdep.c map->dm_segs[seg].ds_len = sgsize;
map 410 arch/i386/i386/mpbios.c mpbios_search(struct device *self, paddr_t start, int count, struct mp_map *map)
map 431 arch/i386/i386/mpbios.c return (mpbios_map(start + i, len, map));
map 613 arch/i386/i386/pmap.c pmap_exec_fixup(struct vm_map *map, struct trapframe *tf, struct pcb *pcb)
map 616 arch/i386/i386/pmap.c struct pmap *pm = vm_map_pmap(map);
map 619 arch/i386/i386/pmap.c vm_map_lock(map);
map 620 arch/i386/i386/pmap.c for (ent = (&map->header)->next; ent != &map->header; ent = ent->next) {
map 628 arch/i386/i386/pmap.c vm_map_unlock(map);
map 1244 arch/i386/i386/pmap.c struct vm_map *map;
map 1257 arch/i386/i386/pmap.c map = kernel_map;
map 1259 arch/i386/i386/pmap.c map = kmem_map;
map 1260 arch/i386/i386/pmap.c if (vm_map_lock_try(map)) {
map 1267 arch/i386/i386/pmap.c uvm_unmap_remove(map, (vaddr_t)pvp, ((vaddr_t)pvp) + PAGE_SIZE,
map 1269 arch/i386/i386/pmap.c vm_map_unlock(map);
map 457 arch/i386/i386/trap.c struct vm_map *map;
map 477 arch/i386/i386/trap.c map = kernel_map;
map 479 arch/i386/i386/trap.c map = &vm->vm_map;
map 482 arch/i386/i386/trap.c if (map == kernel_map && va == 0) {
map 490 arch/i386/i386/trap.c rv = uvm_fault(map, va, 0, ftype);
map 494 arch/i386/i386/trap.c if (map != kernel_map)
map 510 arch/i386/i386/trap.c map, va, ftype, rv);
map 663 arch/i386/isa/isa_machdep.c bus_dmamap_t map;
map 674 arch/i386/isa/isa_machdep.c map = *dmamp;
map 675 arch/i386/isa/isa_machdep.c map->_dm_cookie = NULL;
map 704 arch/i386/isa/isa_machdep.c ((map->_dm_size / NBPG) + 1) > map->_dm_segcnt) {
map 706 arch/i386/isa/isa_machdep.c cookiesize += (sizeof(bus_dma_segment_t) * map->_dm_segcnt);
map 720 arch/i386/isa/isa_machdep.c map->_dm_cookie = cookie;
map 730 arch/i386/isa/isa_machdep.c error = _isa_dma_alloc_bouncebuf(t, map, size, flags);
map 735 arch/i386/isa/isa_machdep.c if (map->_dm_cookie != NULL)
map 736 arch/i386/isa/isa_machdep.c free(map->_dm_cookie, M_DEVBUF);
map 737 arch/i386/isa/isa_machdep.c _bus_dmamap_destroy(t, map);
map 746 arch/i386/isa/isa_machdep.c _isa_bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
map 748 arch/i386/isa/isa_machdep.c struct i386_isa_dma_cookie *cookie = map->_dm_cookie;
map 754 arch/i386/isa/isa_machdep.c _isa_dma_free_bouncebuf(t, map);
map 757 arch/i386/isa/isa_machdep.c _bus_dmamap_destroy(t, map);
map 764 arch/i386/isa/isa_machdep.c _isa_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
map 767 arch/i386/isa/isa_machdep.c struct i386_isa_dma_cookie *cookie = map->_dm_cookie;
map 781 arch/i386/isa/isa_machdep.c map->_dm_segcnt, map->_dm_boundary, p) == 0)
map 782 arch/i386/isa/isa_machdep.c return (_bus_dmamap_load(t, map, buf, buflen,
map 791 arch/i386/isa/isa_machdep.c error = _isa_dma_alloc_bouncebuf(t, map, buflen,
map 803 arch/i386/isa/isa_machdep.c error = _bus_dmamap_load(t, map, cookie->id_bouncebuf,
map 811 arch/i386/isa/isa_machdep.c if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
map 812 arch/i386/isa/isa_machdep.c _isa_dma_free_bouncebuf(t, map);
map 821 arch/i386/isa/isa_machdep.c error = _bus_dmamap_load(t, map, buf, buflen, p, flags);
map 831 arch/i386/isa/isa_machdep.c _isa_bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m,
map 842 arch/i386/isa/isa_machdep.c _isa_bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
map 854 arch/i386/isa/isa_machdep.c _isa_bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
map 865 arch/i386/isa/isa_machdep.c _isa_bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
map 867 arch/i386/isa/isa_machdep.c struct i386_isa_dma_cookie *cookie = map->_dm_cookie;
map 874 arch/i386/isa/isa_machdep.c (map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
map 875 arch/i386/isa/isa_machdep.c _isa_dma_free_bouncebuf(t, map);
map 882 arch/i386/isa/isa_machdep.c _bus_dmamap_unload(t, map);
map 889 arch/i386/isa/isa_machdep.c _isa_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
map 892 arch/i386/isa/isa_machdep.c struct i386_isa_dma_cookie *cookie = map->_dm_cookie;
map 896 arch/i386/isa/isa_machdep.c if (offset >= map->dm_mapsize)
map 898 arch/i386/isa/isa_machdep.c if (len == 0 || (offset + len) > map->dm_mapsize)
map 941 arch/i386/isa/isa_machdep.c _bus_dmamap_sync(t, map, op);
map 1072 arch/i386/isa/isa_machdep.c _isa_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map, bus_size_t size, int flags)
map 1074 arch/i386/isa/isa_machdep.c struct i386_isa_dma_cookie *cookie = map->_dm_cookie;
map 1079 arch/i386/isa/isa_machdep.c NBPG, map->_dm_boundary, cookie->id_bouncesegs,
map 1080 arch/i386/isa/isa_machdep.c map->_dm_segcnt, &cookie->id_nbouncesegs, flags);
map 1102 arch/i386/isa/isa_machdep.c _isa_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map)
map 1104 arch/i386/isa/isa_machdep.c struct i386_isa_dma_cookie *cookie = map->_dm_cookie;
map 82 ddb/db_break.c db_set_breakpoint(struct vm_map *map, db_addr_t addr, int count)
map 86 ddb/db_break.c if (db_find_breakpoint(map, addr)) {
map 104 ddb/db_break.c bkpt->map = map;
map 115 ddb/db_break.c db_delete_breakpoint(struct vm_map *map, db_addr_t addr)
map 123 ddb/db_break.c if (db_map_equal(bkpt->map, map) &&
map 138 ddb/db_break.c db_find_breakpoint(struct vm_map *map, db_addr_t addr)
map 146 ddb/db_break.c if (db_map_equal(bkpt->map, map) &&
map 171 ddb/db_break.c if (db_map_current(bkpt->map)) {
map 191 ddb/db_break.c if (db_map_current(bkpt->map)) {
map 221 ddb/db_break.c bkpt->map = NULL;
map 258 ddb/db_break.c db_map_current(bkpt->map) ? "*" : " ",
map 259 ddb/db_break.c bkpt->map, bkpt->init_count);
map 307 ddb/db_break.c db_map_current(struct vm_map *map)
map 312 ddb/db_break.c return ((map == NULL) ||
map 313 ddb/db_break.c (map == kernel_map) ||
map 315 ddb/db_break.c (map == thread->proc->map)));
map 336 ddb/db_break.c return thread->proc->map;
map 42 ddb/db_break.h struct vm_map *map; /* in this map */
map 87 ddb/db_watch.c db_set_watchpoint(struct vm_map *map, db_addr_t addr, vsize_t size)
map 91 ddb/db_watch.c if (map == NULL) {
map 103 ddb/db_watch.c if (db_map_equal(watch->map, map) &&
map 116 ddb/db_watch.c watch->map = map;
map 127 ddb/db_watch.c db_delete_watchpoint(struct vm_map *map, db_addr_t addr)
map 135 ddb/db_watch.c if (db_map_equal(watch->map, map) &&
map 161 ddb/db_watch.c db_map_current(watch->map) ? "*" : " ",
map 162 ddb/db_watch.c watch->map, watch->loaddr,
map 208 ddb/db_watch.c pmap_protect(watch->map->pmap,
map 212 ddb/db_watch.c pmap_update(watch->map->pmap);
map 224 ddb/db_watch.c db_find_watchpoint(struct vm_map *map, db_addr_t addr, db_regs_t *regs)
map 232 ddb/db_watch.c if (db_map_equal(watch->map, map)) {
map 40 ddb/db_watch.h struct vm_map *map; /* in this map */
map 130 dev/acpi/acpimadt.c struct mp_intr_map *map;
map 240 dev/acpi/acpimadt.c map = malloc(sizeof (struct mp_intr_map), M_DEVBUF, M_NOWAIT);
map 241 dev/acpi/acpimadt.c if (map == NULL)
map 244 dev/acpi/acpimadt.c memset(map, 0, sizeof *map);
map 245 dev/acpi/acpimadt.c map->ioapic = apic;
map 246 dev/acpi/acpimadt.c map->ioapic_pin = pin - apic->sc_apic_vecbase;
map 247 dev/acpi/acpimadt.c map->bus_pin = entry->madt_override.source;
map 248 dev/acpi/acpimadt.c map->flags = entry->madt_override.flags;
map 250 dev/acpi/acpimadt.c map->global_int = entry->madt_override.global_int;
map 252 dev/acpi/acpimadt.c acpimadt_cfg_intr(entry->madt_override.flags, &map->redir);
map 254 dev/acpi/acpimadt.c map->ioapic_ih = APIC_INT_VIA_APIC |
map 258 dev/acpi/acpimadt.c apic->sc_pins[pin].ip_map = map;
map 260 dev/acpi/acpimadt.c map->next = mp_isa_bus->mb_intrs;
map 261 dev/acpi/acpimadt.c mp_isa_bus->mb_intrs = map;
map 272 dev/acpi/acpimadt.c map = &mp_intrs[mp_nintrs++];
map 273 dev/acpi/acpimadt.c memset(map, 0, sizeof *map);
map 274 dev/acpi/acpimadt.c map->cpu_id = lapic_map[entry->madt_lapic_nmi.acpi_proc_id];
map 275 dev/acpi/acpimadt.c map->ioapic_pin = pin;
map 276 dev/acpi/acpimadt.c map->flags = entry->madt_lapic_nmi.flags;
map 278 dev/acpi/acpimadt.c acpimadt_cfg_intr(entry->madt_lapic_nmi.flags, &map->redir);
map 279 dev/acpi/acpimadt.c map->redir &= ~IOAPIC_REDLO_DEL_MASK;
map 280 dev/acpi/acpimadt.c map->redir |= (IOAPIC_REDLO_DEL_NMI << IOAPIC_REDLO_DEL_SHIFT);
map 296 dev/acpi/acpimadt.c map = malloc(sizeof (struct mp_intr_map), M_DEVBUF, M_NOWAIT);
map 297 dev/acpi/acpimadt.c if (map == NULL)
map 300 dev/acpi/acpimadt.c memset(map, 0, sizeof *map);
map 301 dev/acpi/acpimadt.c map->ioapic = apic;
map 302 dev/acpi/acpimadt.c map->ioapic_pin = pin;
map 303 dev/acpi/acpimadt.c map->bus_pin = pin;
map 305 dev/acpi/acpimadt.c map->global_int = -1;
map 307 dev/acpi/acpimadt.c map->redir = (IOAPIC_REDLO_DEL_LOPRI << IOAPIC_REDLO_DEL_SHIFT);
map 309 dev/acpi/acpimadt.c map->ioapic_ih = APIC_INT_VIA_APIC |
map 313 dev/acpi/acpimadt.c apic->sc_pins[pin].ip_map = map;
map 315 dev/acpi/acpimadt.c map->next = mp_isa_bus->mb_intrs;
map 316 dev/acpi/acpimadt.c mp_isa_bus->mb_intrs = map;
map 148 dev/acpi/acpiprt.c struct mp_intr_map *map;
map 217 dev/acpi/acpiprt.c map = malloc(sizeof (struct mp_intr_map), M_DEVBUF, M_NOWAIT);
map 218 dev/acpi/acpiprt.c if (map == NULL)
map 221 dev/acpi/acpiprt.c memset(map, 0, sizeof *map);
map 222 dev/acpi/acpiprt.c map->ioapic = apic;
map 223 dev/acpi/acpiprt.c map->ioapic_pin = irq - apic->sc_apic_vecbase;
map 224 dev/acpi/acpiprt.c map->bus_pin = ((addr >> 14) & 0x7c) | (pin & 0x3);
map 225 dev/acpi/acpiprt.c map->redir = IOAPIC_REDLO_ACTLO | IOAPIC_REDLO_LEVEL;
map 226 dev/acpi/acpiprt.c map->redir |= (IOAPIC_REDLO_DEL_LOPRI << IOAPIC_REDLO_DEL_SHIFT);
map 228 dev/acpi/acpiprt.c map->ioapic_ih = APIC_INT_VIA_APIC |
map 230 dev/acpi/acpiprt.c (map->ioapic_pin << APIC_INT_PIN_SHIFT));
map 232 dev/acpi/acpiprt.c apic->sc_pins[map->ioapic_pin].ip_map = map;
map 234 dev/acpi/acpiprt.c map->next = mp_busses[sc->sc_bus].mb_intrs;
map 235 dev/acpi/acpiprt.c mp_busses[sc->sc_bus].mb_intrs = map;
map 3420 dev/acpi/dsdt.c static union acpi_resource map;
map 3424 dev/acpi/dsdt.c if (rlen >= sizeof(map))
map 3427 dev/acpi/dsdt.c memset(&map, 0, sizeof(map));
map 3428 dev/acpi/dsdt.c memcpy(&map, crs, rlen);
map 3430 dev/acpi/dsdt.c return ↦
map 502 dev/adb/akbdmap.h #define KBD_MAP(name, base, map) \
map 503 dev/adb/akbdmap.h { name, base, sizeof(map)/sizeof(keysym_t), map }
map 151 dev/gpio/gpio.c gpio_pin_map(void *gpio, int offset, u_int32_t mask, struct gpio_pinmap *map)
map 168 dev/gpio/gpio.c map->pm_map[npins++] = pin;
map 170 dev/gpio/gpio.c map->pm_size = npins;
map 176 dev/gpio/gpio.c gpio_pin_unmap(void *gpio, struct gpio_pinmap *map)
map 181 dev/gpio/gpio.c for (i = 0; i < map->pm_size; i++) {
map 182 dev/gpio/gpio.c pin = map->pm_map[i];
map 188 dev/gpio/gpio.c gpio_pin_read(void *gpio, struct gpio_pinmap *map, int pin)
map 192 dev/gpio/gpio.c return (gpiobus_pin_read(sc->sc_gc, map->pm_map[pin]));
map 196 dev/gpio/gpio.c gpio_pin_write(void *gpio, struct gpio_pinmap *map, int pin, int value)
map 200 dev/gpio/gpio.c return (gpiobus_pin_write(sc->sc_gc, map->pm_map[pin], value));
map 204 dev/gpio/gpio.c gpio_pin_ctl(void *gpio, struct gpio_pinmap *map, int pin, int flags)
map 208 dev/gpio/gpio.c return (gpiobus_pin_ctl(sc->sc_gc, map->pm_map[pin], flags));
map 212 dev/gpio/gpio.c gpio_pin_caps(void *gpio, struct gpio_pinmap *map, int pin)
map 216 dev/gpio/gpio.c return (sc->sc_pins[map->pm_map[pin]].pin_caps);
map 661 dev/hil/hilkbdmap.c #define KBD_MAP(name, base, map) \
map 662 dev/hil/hilkbdmap.c { name, base, sizeof(map)/sizeof(keysym_t), map }
map 47 dev/i2o/iopspvar.h #define IOPSP_TIDMAP(map, t, l) (map[(t) * IOPSP_MAX_LUN + (l)])
map 2127 dev/ic/acx.c bus_dmamap_t map;
map 2159 dev/ic/acx.c map = rb->rb_mbuf_dmamap;
map 2161 dev/ic/acx.c bd->mbuf_tmp_dmamap = map;
map 649 dev/ic/acx100.c const uint8_t *map;
map 653 dev/ic/acx100.c map = acx100_txpower_maxim;
map 657 dev/ic/acx100.c map = acx100_txpower_rfmd;
map 665 dev/ic/acx100.c acx_write_phyreg(sc, ACXRV_PHYREG_TXPOWER, map[ACX100_TXPOWER]);
map 9997 dev/ic/aic79xx.c ahd_createdmamem(struct ahd_softc *ahd, size_t size, struct map_node *map,
map 10003 dev/ic/aic79xx.c bzero(map, sizeof(*map));
map 10006 dev/ic/aic79xx.c &map->dmamap)) != 0) {
map 10012 dev/ic/aic79xx.c if ((error = bus_dmamem_alloc(tag, size, PAGE_SIZE, 0, &map->dmaseg,
map 10019 dev/ic/aic79xx.c if ((error = bus_dmamem_map(tag, &map->dmaseg, nseg, size,
map 10020 dev/ic/aic79xx.c (caddr_t *)&map->vaddr, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
map 10026 dev/ic/aic79xx.c if ((error = bus_dmamap_load(tag, map->dmamap, map->vaddr, size, NULL,
map 10033 dev/ic/aic79xx.c map->size = size;
map 10034 dev/ic/aic79xx.c map->busaddr = map->dmamap->dm_segs[0].ds_addr;
map 10038 dev/ic/aic79xx.c bus_dmamem_unmap(tag, map->vaddr, size);
map 10040 dev/ic/aic79xx.c bus_dmamem_free(tag, &map->dmaseg, 1);
map 10042 dev/ic/aic79xx.c bus_dmamap_destroy(tag, map->dmamap);
map 10044 dev/ic/aic79xx.c bzero(map, sizeof(*map));
map 10049 dev/ic/aic79xx.c ahd_freedmamem(struct ahd_softc* ahd, struct map_node *map)
map 10053 dev/ic/aic79xx.c bus_dmamap_unload(tag, map->dmamap);
map 10054 dev/ic/aic79xx.c bus_dmamem_unmap(tag, map->vaddr, map->size);
map 10055 dev/ic/aic79xx.c bus_dmamem_free(tag, &map->dmaseg, 1);
map 10056 dev/ic/aic79xx.c bus_dmamap_destroy(tag, map->dmamap);
map 260 dev/ic/aic7xxx.c bus_dmamap_t map,
map 7350 dev/ic/aic7xxx.c ahc_freedmamem(bus_dma_tag_t tag, int size, bus_dmamap_t map, caddr_t vaddr,
map 7354 dev/ic/aic7xxx.c bus_dmamap_unload(tag, map);
map 7357 dev/ic/aic7xxx.c bus_dmamap_destroy(tag, map);
map 758 dev/ic/ciss.c lmap->size = htobe32(sc->maxunits * sizeof(lmap->map));
map 759 dev/ic/ciss.c total = sizeof(*lmap) + (sc->maxunits - 1) * sizeof(lmap->map);
map 782 dev/ic/ciss.c lmap->map[0].tgt, lmap->map[0].tgt2));
map 129 dev/ic/cissreg.h } map[1];
map 1942 dev/ic/dc.c bus_dmamap_t map;
map 1962 dev/ic/dc.c map = sc->dc_cdata.dc_rx_chain[i].sd_map;
map 1964 dev/ic/dc.c sc->sc_rx_sparemap = map;
map 2358 dev/ic/dc.c bus_dmamap_t map = sc->dc_cdata.dc_tx_chain[idx].sd_map;
map 2360 dev/ic/dc.c bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
map 2362 dev/ic/dc.c bus_dmamap_unload(sc->sc_dmat, map);
map 2604 dev/ic/dc.c bus_dmamap_t map;
map 2611 dev/ic/dc.c map = sc->sc_tx_sparemap;
map 2613 dev/ic/dc.c if (bus_dmamap_load_mbuf(sc->sc_dmat, map,
map 2619 dev/ic/dc.c for (i = 0; i < map->dm_nsegs; i++) {
map 2623 dev/ic/dc.c bus_dmamap_unload(sc->sc_dmat, map);
map 2629 dev/ic/dc.c bus_dmamap_unload(sc->sc_dmat, map);
map 2634 dev/ic/dc.c f->dc_ctl = htole32(DC_TXCTL_TLINK | map->dm_segs[i].ds_len);
map 2640 dev/ic/dc.c f->dc_data = htole32(map->dm_segs[i].ds_addr);
map 2649 dev/ic/dc.c sc->dc_cdata.dc_tx_chain[cur].sd_map = map;
map 2664 dev/ic/dc.c bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
map 3168 dev/ic/dc.c bus_dmamap_t map = sc->dc_cdata.dc_rx_chain[i].sd_map;
map 3170 dev/ic/dc.c bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
map 3172 dev/ic/dc.c bus_dmamap_unload(sc->sc_dmat, map);
map 3187 dev/ic/dc.c bus_dmamap_t map = sc->dc_cdata.dc_tx_chain[i].sd_map;
map 3189 dev/ic/dc.c bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
map 3191 dev/ic/dc.c bus_dmamap_unload(sc->sc_dmat, map);
map 166 dev/ic/fxpvar.h #define FXP_RXMAP_PUT(sc,map) ((sc)->sc_rxmaps[--(sc)->sc_rxfree] = (map))
map 1608 dev/ic/gem.c bus_dmamap_t map;
map 1611 dev/ic/gem.c map = sc->sc_txd[cur].sd_map;
map 1613 dev/ic/gem.c if (bus_dmamap_load_mbuf(sc->sc_dmatag, map, mhead,
map 1618 dev/ic/gem.c if ((sc->sc_tx_cnt + map->dm_nsegs) > (GEM_NTXDESC - 2)) {
map 1619 dev/ic/gem.c bus_dmamap_unload(sc->sc_dmatag, map);
map 1623 dev/ic/gem.c bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize,
map 1626 dev/ic/gem.c for (i = 0; i < map->dm_nsegs; i++) {
map 1628 dev/ic/gem.c GEM_DMA_WRITE(sc, map->dm_segs[i].ds_addr);
map 1629 dev/ic/gem.c flags = (map->dm_segs[i].ds_len & GEM_TD_BUFSIZE) |
map 1631 dev/ic/gem.c ((i == (map->dm_nsegs - 1)) ? GEM_TD_END_OF_PACKET : 0);
map 1641 dev/ic/gem.c sc->sc_tx_cnt += map->dm_nsegs;
map 1643 dev/ic/gem.c sc->sc_txd[cur].sd_map = map;
map 1495 dev/ic/hme.c bus_dmamap_t map;
map 1538 dev/ic/hme.c map = d->sd_map;
map 1540 dev/ic/hme.c sc->sc_rxmap_spare = map;
map 1420 dev/ic/isp.c isp_getmap(struct ispsoftc *isp, fcpos_map_t *map)
map 1441 dev/ic/isp.c MEMCPY(map, fcp->isp_scratch, sizeof (fcpos_map_t));
map 1442 dev/ic/isp.c map->fwmap = mbs.param[1] != 0;
map 714 dev/ic/ispmbox.h u_int8_t map[127];
map 70 dev/ic/malo.c bus_dmamap_t map;
map 75 dev/ic/malo.c bus_dmamap_t map;
map 565 dev/ic/malo.c BUS_DMA_NOWAIT, &ring->map);
map 590 dev/ic/malo.c error = bus_dmamap_load(sc->sc_dmat, ring->map, ring->desc,
map 599 dev/ic/malo.c ring->physaddr = ring->map->dm_segs->ds_addr;
map 619 dev/ic/malo.c 0, BUS_DMA_NOWAIT, &data->map);
map 642 dev/ic/malo.c error = bus_dmamap_load(sc->sc_dmat, data->map,
map 651 dev/ic/malo.c desc->physdata = htole32(data->map->dm_segs->ds_addr);
map 656 dev/ic/malo.c bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
map 673 dev/ic/malo.c bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
map 686 dev/ic/malo.c bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
map 687 dev/ic/malo.c ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
map 688 dev/ic/malo.c bus_dmamap_unload(sc->sc_dmat, ring->map);
map 699 dev/ic/malo.c bus_dmamap_sync(sc->sc_dmat, data->map, 0,
map 700 dev/ic/malo.c data->map->dm_mapsize,
map 702 dev/ic/malo.c bus_dmamap_unload(sc->sc_dmat, data->map);
map 706 dev/ic/malo.c if (data->map != NULL)
map 707 dev/ic/malo.c bus_dmamap_destroy(sc->sc_dmat, data->map);
map 725 dev/ic/malo.c count * sizeof(struct malo_tx_desc), 0, BUS_DMA_NOWAIT, &ring->map);
map 750 dev/ic/malo.c error = bus_dmamap_load(sc->sc_dmat, ring->map, ring->desc,
map 759 dev/ic/malo.c ring->physaddr = ring->map->dm_segs->ds_addr;
map 774 dev/ic/malo.c &ring->data[i].map);
map 802 dev/ic/malo.c bus_dmamap_sync(sc->sc_dmat, data->map, 0,
map 803 dev/ic/malo.c data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
map 804 dev/ic/malo.c bus_dmamap_unload(sc->sc_dmat, data->map);
map 818 dev/ic/malo.c bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
map 832 dev/ic/malo.c bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
map 833 dev/ic/malo.c ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
map 834 dev/ic/malo.c bus_dmamap_unload(sc->sc_dmat, ring->map);
map 845 dev/ic/malo.c bus_dmamap_sync(sc->sc_dmat, data->map, 0,
map 846 dev/ic/malo.c data->map->dm_mapsize,
map 848 dev/ic/malo.c bus_dmamap_unload(sc->sc_dmat, data->map);
map 858 dev/ic/malo.c if (data->map != NULL)
map 859 dev/ic/malo.c bus_dmamap_destroy(sc->sc_dmat, data->map);
map 1393 dev/ic/malo.c bus_dmamap_sync(sc->sc_dmat, data->map, 0,
map 1394 dev/ic/malo.c data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
map 1395 dev/ic/malo.c bus_dmamap_unload(sc->sc_dmat, data->map);
map 1494 dev/ic/malo.c error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m0,
map 1508 dev/ic/malo.c data->map->dm_segs, data->map->dm_nsegs);
map 1510 dev/ic/malo.c bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
map 1512 dev/ic/malo.c bus_dmamap_sync(sc->sc_dmat, sc->sc_txring.map,
map 1612 dev/ic/malo.c error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m0,
map 1626 dev/ic/malo.c data->map->dm_segs, data->map->dm_nsegs);
map 1628 dev/ic/malo.c bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
map 1630 dev/ic/malo.c bus_dmamap_sync(sc->sc_dmat, sc->sc_txring.map,
map 1677 dev/ic/malo.c bus_dmamap_sync(sc->sc_dmat, sc->sc_rxring.map,
map 1705 dev/ic/malo.c bus_dmamap_sync(sc->sc_dmat, data->map, 0,
map 1706 dev/ic/malo.c data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
map 1707 dev/ic/malo.c bus_dmamap_unload(sc->sc_dmat, data->map);
map 1709 dev/ic/malo.c error = bus_dmamap_load(sc->sc_dmat, data->map,
map 1714 dev/ic/malo.c error = bus_dmamap_load(sc->sc_dmat, data->map,
map 1730 dev/ic/malo.c desc->physdata = htole32(data->map->dm_segs->ds_addr);
map 1783 dev/ic/malo.c bus_dmamap_sync(sc->sc_dmat, sc->sc_rxring.map,
map 24 dev/ic/malo.h bus_dmamap_t map;
map 38 dev/ic/malo.h bus_dmamap_t map;
map 370 dev/ic/mtd8xx.c bus_dmamap_t map;
map 377 dev/ic/mtd8xx.c map = sc->sc_tx_sparemap;
map 379 dev/ic/mtd8xx.c if (bus_dmamap_load_mbuf(sc->sc_dmat, map,
map 385 dev/ic/mtd8xx.c for (i = 0; i < map->dm_nsegs; i++) {
map 388 dev/ic/mtd8xx.c bus_dmamap_unload(sc->sc_dmat, map);
map 393 dev/ic/mtd8xx.c f->td_tcw = htole32(map->dm_segs[i].ds_len);
map 394 dev/ic/mtd8xx.c total_len += map->dm_segs[i].ds_len;
map 400 dev/ic/mtd8xx.c f->td_buf = htole32(map->dm_segs[i].ds_addr);
map 409 dev/ic/mtd8xx.c sc->mtd_cdata.mtd_tx_chain[cur].sd_map = map;
map 415 dev/ic/mtd8xx.c bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
map 497 dev/ic/mtd8xx.c bus_dmamap_t map;
map 518 dev/ic/mtd8xx.c map = sc->mtd_cdata.mtd_rx_chain[i].sd_map;
map 520 dev/ic/mtd8xx.c sc->sc_rx_sparemap = map;
map 785 dev/ic/mtd8xx.c bus_dmamap_t map = sc->mtd_cdata.mtd_rx_chain[i].sd_map;
map 787 dev/ic/mtd8xx.c bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
map 789 dev/ic/mtd8xx.c bus_dmamap_unload(sc->sc_dmat, map);
map 804 dev/ic/mtd8xx.c bus_dmamap_t map = sc->mtd_cdata.mtd_tx_chain[i].sd_map;
map 806 dev/ic/mtd8xx.c bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
map 808 dev/ic/mtd8xx.c bus_dmamap_unload(sc->sc_dmat, map);
map 1074 dev/ic/mtd8xx.c bus_dmamap_t map =
map 1076 dev/ic/mtd8xx.c bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
map 1078 dev/ic/mtd8xx.c bus_dmamap_unload(sc->sc_dmat, map);
map 1107 dev/ic/re.c bus_dmamap_t map;
map 1136 dev/ic/re.c map = rxs->rxs_dmamap;
map 1137 dev/ic/re.c error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
map 1143 dev/ic/re.c bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
map 1159 dev/ic/re.c cmdstat = map->dm_segs[0].ds_len;
map 1162 dev/ic/re.c re_set_bufaddr(d, map->dm_segs[0].ds_addr);
map 1530 dev/ic/re.c bus_dmamap_t map;
map 1569 dev/ic/re.c map = txq->txq_dmamap;
map 1570 dev/ic/re.c error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
map 1579 dev/ic/re.c nsegs = map->dm_nsegs;
map 1596 dev/ic/re.c bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
map 1612 dev/ic/re.c for (seg = 0; seg < map->dm_nsegs;
map 1634 dev/ic/re.c re_set_bufaddr(d, map->dm_segs[seg].ds_addr);
map 1635 dev/ic/re.c cmdstat = rl_flags | map->dm_segs[seg].ds_len;
map 1703 dev/ic/re.c bus_dmamap_unload(sc->sc_dmat, map);
map 353 dev/ic/rt2560.c count * RT2560_TX_DESC_SIZE, 0, BUS_DMA_NOWAIT, &ring->map);
map 377 dev/ic/rt2560.c error = bus_dmamap_load(sc->sc_dmat, ring->map, ring->desc,
map 386 dev/ic/rt2560.c ring->physaddr = ring->map->dm_segs->ds_addr;
map 401 dev/ic/rt2560.c &ring->data[i].map);
map 425 dev/ic/rt2560.c bus_dmamap_sync(sc->sc_dmat, data->map, 0,
map 426 dev/ic/rt2560.c data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
map 427 dev/ic/rt2560.c bus_dmamap_unload(sc->sc_dmat, data->map);
map 441 dev/ic/rt2560.c bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
map 455 dev/ic/rt2560.c bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
map 456 dev/ic/rt2560.c ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
map 457 dev/ic/rt2560.c bus_dmamap_unload(sc->sc_dmat, ring->map);
map 468 dev/ic/rt2560.c bus_dmamap_sync(sc->sc_dmat, data->map, 0,
map 469 dev/ic/rt2560.c data->map->dm_mapsize,
map 471 dev/ic/rt2560.c bus_dmamap_unload(sc->sc_dmat, data->map);
map 481 dev/ic/rt2560.c if (data->map != NULL)
map 482 dev/ic/rt2560.c bus_dmamap_destroy(sc->sc_dmat, data->map);
map 499 dev/ic/rt2560.c count * RT2560_RX_DESC_SIZE, 0, BUS_DMA_NOWAIT, &ring->map);
map 523 dev/ic/rt2560.c error = bus_dmamap_load(sc->sc_dmat, ring->map, ring->desc,
map 532 dev/ic/rt2560.c ring->physaddr = ring->map->dm_segs->ds_addr;
map 552 dev/ic/rt2560.c 0, BUS_DMA_NOWAIT, &data->map);
map 574 dev/ic/rt2560.c error = bus_dmamap_load(sc->sc_dmat, data->map,
map 583 dev/ic/rt2560.c desc->physaddr = htole32(data->map->dm_segs->ds_addr);
map 586 dev/ic/rt2560.c bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
map 605 dev/ic/rt2560.c bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
map 618 dev/ic/rt2560.c bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
map 619 dev/ic/rt2560.c ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
map 620 dev/ic/rt2560.c bus_dmamap_unload(sc->sc_dmat, ring->map);
map 631 dev/ic/rt2560.c bus_dmamap_sync(sc->sc_dmat, data->map, 0,
map 632 dev/ic/rt2560.c data->map->dm_mapsize,
map 634 dev/ic/rt2560.c bus_dmamap_unload(sc->sc_dmat, data->map);
map 638 dev/ic/rt2560.c if (data->map != NULL)
map 639 dev/ic/rt2560.c bus_dmamap_destroy(sc->sc_dmat, data->map);
map 893 dev/ic/rt2560.c bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
map 909 dev/ic/rt2560.c bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
map 935 dev/ic/rt2560.c bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
map 977 dev/ic/rt2560.c bus_dmamap_sync(sc->sc_dmat, data->map, 0,
map 978 dev/ic/rt2560.c data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
map 979 dev/ic/rt2560.c bus_dmamap_unload(sc->sc_dmat, data->map);
map 988 dev/ic/rt2560.c bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
map 1013 dev/ic/rt2560.c bus_dmamap_sync(sc->sc_dmat, sc->prioq.map,
map 1043 dev/ic/rt2560.c bus_dmamap_sync(sc->sc_dmat, data->map, 0,
map 1044 dev/ic/rt2560.c data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
map 1045 dev/ic/rt2560.c bus_dmamap_unload(sc->sc_dmat, data->map);
map 1054 dev/ic/rt2560.c bus_dmamap_sync(sc->sc_dmat, sc->prioq.map,
map 1093 dev/ic/rt2560.c bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
map 1131 dev/ic/rt2560.c bus_dmamap_sync(sc->sc_dmat, data->map, 0,
map 1132 dev/ic/rt2560.c data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
map 1133 dev/ic/rt2560.c bus_dmamap_unload(sc->sc_dmat, data->map);
map 1135 dev/ic/rt2560.c error = bus_dmamap_load(sc->sc_dmat, data->map,
map 1141 dev/ic/rt2560.c error = bus_dmamap_load(sc->sc_dmat, data->map,
map 1159 dev/ic/rt2560.c desc->physaddr = htole32(data->map->dm_segs->ds_addr);
map 1206 dev/ic/rt2560.c bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
map 1235 dev/ic/rt2560.c bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
map 1264 dev/ic/rt2560.c bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
map 1304 dev/ic/rt2560.c bus_dmamap_sync(sc->sc_dmat, data->map, 0,
map 1305 dev/ic/rt2560.c data->map->dm_mapsize, BUS_DMASYNC_PREWRITE);
map 1553 dev/ic/rt2560.c error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m0,
map 1567 dev/ic/rt2560.c data->map->dm_segs->ds_addr);
map 1569 dev/ic/rt2560.c bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
map 1571 dev/ic/rt2560.c bus_dmamap_sync(sc->sc_dmat, sc->bcnq.map,
map 1624 dev/ic/rt2560.c error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m0,
map 1674 dev/ic/rt2560.c data->map->dm_segs->ds_addr);
map 1676 dev/ic/rt2560.c bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
map 1678 dev/ic/rt2560.c bus_dmamap_sync(sc->sc_dmat, sc->prioq.map,
map 1785 dev/ic/rt2560.c error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, mprot,
map 1804 dev/ic/rt2560.c data->map->dm_segs->ds_addr);
map 1806 dev/ic/rt2560.c bus_dmamap_sync(sc->sc_dmat, data->map, 0,
map 1807 dev/ic/rt2560.c data->map->dm_mapsize, BUS_DMASYNC_PREWRITE);
map 1808 dev/ic/rt2560.c bus_dmamap_sync(sc->sc_dmat, txq->map,
map 1822 dev/ic/rt2560.c error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m0,
map 1853 dev/ic/rt2560.c error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m0,
map 1899 dev/ic/rt2560.c data->map->dm_segs->ds_addr);
map 1901 dev/ic/rt2560.c bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
map 1903 dev/ic/rt2560.c bus_dmamap_sync(sc->sc_dmat, txq->map,
map 55 dev/ic/rt2560var.h bus_dmamap_t map;
map 61 dev/ic/rt2560var.h bus_dmamap_t map;
map 75 dev/ic/rt2560var.h bus_dmamap_t map;
map 81 dev/ic/rt2560var.h bus_dmamap_t map;
map 386 dev/ic/rt2661.c count * RT2661_TX_DESC_SIZE, 0, BUS_DMA_NOWAIT, &ring->map);
map 410 dev/ic/rt2661.c error = bus_dmamap_load(sc->sc_dmat, ring->map, ring->desc,
map 419 dev/ic/rt2661.c ring->physaddr = ring->map->dm_segs->ds_addr;
map 434 dev/ic/rt2661.c &ring->data[i].map);
map 458 dev/ic/rt2661.c bus_dmamap_sync(sc->sc_dmat, data->map, 0,
map 459 dev/ic/rt2661.c data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
map 460 dev/ic/rt2661.c bus_dmamap_unload(sc->sc_dmat, data->map);
map 474 dev/ic/rt2661.c bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
map 487 dev/ic/rt2661.c bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
map 488 dev/ic/rt2661.c ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
map 489 dev/ic/rt2661.c bus_dmamap_unload(sc->sc_dmat, ring->map);
map 500 dev/ic/rt2661.c bus_dmamap_sync(sc->sc_dmat, data->map, 0,
map 501 dev/ic/rt2661.c data->map->dm_mapsize,
map 503 dev/ic/rt2661.c bus_dmamap_unload(sc->sc_dmat, data->map);
map 512 dev/ic/rt2661.c if (data->map != NULL)
map 513 dev/ic/rt2661.c bus_dmamap_destroy(sc->sc_dmat, data->map);
map 529 dev/ic/rt2661.c count * RT2661_RX_DESC_SIZE, 0, BUS_DMA_NOWAIT, &ring->map);
map 553 dev/ic/rt2661.c error = bus_dmamap_load(sc->sc_dmat, ring->map, ring->desc,
map 562 dev/ic/rt2661.c ring->physaddr = ring->map->dm_segs->ds_addr;
map 582 dev/ic/rt2661.c 0, BUS_DMA_NOWAIT, &data->map);
map 604 dev/ic/rt2661.c error = bus_dmamap_load(sc->sc_dmat, data->map,
map 613 dev/ic/rt2661.c desc->physaddr = htole32(data->map->dm_segs->ds_addr);
map 616 dev/ic/rt2661.c bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
map 633 dev/ic/rt2661.c bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
map 645 dev/ic/rt2661.c bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
map 646 dev/ic/rt2661.c ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
map 647 dev/ic/rt2661.c bus_dmamap_unload(sc->sc_dmat, ring->map);
map 658 dev/ic/rt2661.c bus_dmamap_sync(sc->sc_dmat, data->map, 0,
map 659 dev/ic/rt2661.c data->map->dm_mapsize,
map 661 dev/ic/rt2661.c bus_dmamap_unload(sc->sc_dmat, data->map);
map 665 dev/ic/rt2661.c if (data->map != NULL)
map 666 dev/ic/rt2661.c bus_dmamap_destroy(sc->sc_dmat, data->map);
map 968 dev/ic/rt2661.c bus_dmamap_sync(sc->sc_dmat, txq->map,
map 976 dev/ic/rt2661.c bus_dmamap_sync(sc->sc_dmat, data->map, 0,
map 977 dev/ic/rt2661.c data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
map 978 dev/ic/rt2661.c bus_dmamap_unload(sc->sc_dmat, data->map);
map 986 dev/ic/rt2661.c bus_dmamap_sync(sc->sc_dmat, txq->map,
map 1011 dev/ic/rt2661.c bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
map 1054 dev/ic/rt2661.c bus_dmamap_sync(sc->sc_dmat, data->map, 0,
map 1055 dev/ic/rt2661.c data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
map 1056 dev/ic/rt2661.c bus_dmamap_unload(sc->sc_dmat, data->map);
map 1058 dev/ic/rt2661.c error = bus_dmamap_load(sc->sc_dmat, data->map,
map 1064 dev/ic/rt2661.c error = bus_dmamap_load(sc->sc_dmat, data->map,
map 1082 dev/ic/rt2661.c desc->physaddr = htole32(data->map->dm_segs->ds_addr);
map 1136 dev/ic/rt2661.c bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
map 1474 dev/ic/rt2661.c error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m0,
map 1521 dev/ic/rt2661.c m0->m_pkthdr.len, rate, data->map->dm_segs, data->map->dm_nsegs,
map 1524 dev/ic/rt2661.c bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
map 1526 dev/ic/rt2661.c bus_dmamap_sync(sc->sc_dmat, sc->mgtq.map,
map 1631 dev/ic/rt2661.c error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, mprot,
map 1649 dev/ic/rt2661.c 0, mprot->m_pkthdr.len, protrate, data->map->dm_segs,
map 1650 dev/ic/rt2661.c data->map->dm_nsegs, ac);
map 1652 dev/ic/rt2661.c bus_dmamap_sync(sc->sc_dmat, data->map, 0,
map 1653 dev/ic/rt2661.c data->map->dm_mapsize, BUS_DMASYNC_PREWRITE);
map 1654 dev/ic/rt2661.c bus_dmamap_sync(sc->sc_dmat, txq->map,
map 1667 dev/ic/rt2661.c error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m0,
map 1698 dev/ic/rt2661.c error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m0,
map 1743 dev/ic/rt2661.c data->map->dm_segs, data->map->dm_nsegs, ac);
map 1745 dev/ic/rt2661.c bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
map 1747 dev/ic/rt2661.c bus_dmamap_sync(sc->sc_dmat, txq->map, txq->cur * RT2661_TX_DESC_SIZE,
map 51 dev/ic/rt2661var.h bus_dmamap_t map;
map 57 dev/ic/rt2661var.h bus_dmamap_t map;
map 70 dev/ic/rt2661var.h bus_dmamap_t map;
map 75 dev/ic/rt2661var.h bus_dmamap_t map;
map 516 dev/ic/vga.c vga_extended_attach(self, iot, memt, type, map)
map 520 dev/ic/vga.c paddr_t (*map)(void *, off_t, int);
map 541 dev/ic/vga.c vc->vc_mmap = map;
map 1100 dev/ic/xl.c bus_dmamap_t map;
map 1120 dev/ic/xl.c if (c->map->dm_nsegs != 0) {
map 1121 dev/ic/xl.c bus_dmamap_sync(sc->sc_dmat, c->map,
map 1122 dev/ic/xl.c 0, c->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
map 1123 dev/ic/xl.c bus_dmamap_unload(sc->sc_dmat, c->map);
map 1126 dev/ic/xl.c map = c->map;
map 1127 dev/ic/xl.c c->map = sc->sc_rx_sparemap;
map 1128 dev/ic/xl.c sc->sc_rx_sparemap = map;
map 1133 dev/ic/xl.c bus_dmamap_sync(sc->sc_dmat, c->map, 0, c->map->dm_mapsize,
map 1138 dev/ic/xl.c htole32(c->map->dm_segs[0].ds_addr + ETHER_ALIGN);
map 1140 dev/ic/xl.c htole32(c->map->dm_segs[0].ds_len | XL_LAST_FRAG);
map 1348 dev/ic/xl.c if (cur_tx->map->dm_nsegs != 0) {
map 1349 dev/ic/xl.c bus_dmamap_t map = cur_tx->map;
map 1351 dev/ic/xl.c bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
map 1353 dev/ic/xl.c bus_dmamap_unload(sc->sc_dmat, map);
map 1401 dev/ic/xl.c if (cur_tx->map->dm_nsegs != 0) {
map 1402 dev/ic/xl.c bus_dmamap_sync(sc->sc_dmat, cur_tx->map,
map 1403 dev/ic/xl.c 0, cur_tx->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
map 1404 dev/ic/xl.c bus_dmamap_unload(sc->sc_dmat, cur_tx->map);
map 1612 dev/ic/xl.c bus_dmamap_t map;
map 1614 dev/ic/xl.c map = sc->sc_tx_sparemap;
map 1617 dev/ic/xl.c error = bus_dmamap_load_mbuf(sc->sc_dmat, map,
map 1630 dev/ic/xl.c for (frag = 0, total_len = 0; frag < map->dm_nsegs; frag++) {
map 1633 dev/ic/xl.c total_len += map->dm_segs[frag].ds_len;
map 1635 dev/ic/xl.c htole32(map->dm_segs[frag].ds_addr);
map 1637 dev/ic/xl.c htole32(map->dm_segs[frag].ds_len);
map 1672 dev/ic/xl.c bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
map 1675 dev/ic/xl.c if (c->map->dm_nsegs != 0) {
map 1676 dev/ic/xl.c bus_dmamap_sync(sc->sc_dmat, c->map,
map 1677 dev/ic/xl.c 0, c->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
map 1678 dev/ic/xl.c bus_dmamap_unload(sc->sc_dmat, c->map);
map 1682 dev/ic/xl.c sc->sc_tx_sparemap = c->map;
map 1683 dev/ic/xl.c c->map = map;
map 2384 dev/ic/xl.c bus_dmamap_t map;
map 2391 dev/ic/xl.c if (sc->xl_cdata.xl_rx_chain[i].map->dm_nsegs != 0) {
map 2392 dev/ic/xl.c map = sc->xl_cdata.xl_rx_chain[i].map;
map 2394 dev/ic/xl.c bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
map 2396 dev/ic/xl.c bus_dmamap_unload(sc->sc_dmat, map);
map 2409 dev/ic/xl.c if (sc->xl_cdata.xl_tx_chain[i].map->dm_nsegs != 0) {
map 2410 dev/ic/xl.c map = sc->xl_cdata.xl_tx_chain[i].map;
map 2412 dev/ic/xl.c bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
map 2414 dev/ic/xl.c bus_dmamap_unload(sc->sc_dmat, map);
map 2520 dev/ic/xl.c &sc->xl_cdata.xl_rx_chain[i].map) != 0) {
map 2534 dev/ic/xl.c &sc->xl_cdata.xl_tx_chain[i].map) != 0) {
map 469 dev/ic/xlreg.h bus_dmamap_t map;
map 476 dev/ic/xlreg.h bus_dmamap_t map;
map 776 dev/isa/ad1848.c ad1848_mixer_find_dev(map, cnt, cp)
map 777 dev/isa/ad1848.c ad1848_devmap_t *map;
map 785 dev/isa/ad1848.c if (map[idx].id == cp->dev) {
map 786 dev/isa/ad1848.c return (&map[idx]);
map 793 dev/isa/ad1848.c ad1848_mixer_get_port(ac, map, cnt, cp)
map 795 dev/isa/ad1848.c struct ad1848_devmap *map;
map 804 dev/isa/ad1848.c if (!(entry = ad1848_mixer_find_dev(map, cnt, cp)))
map 867 dev/isa/ad1848.c ad1848_mixer_set_port(ac, map, cnt, cp)
map 869 dev/isa/ad1848.c struct ad1848_devmap *map;
map 878 dev/isa/ad1848.c if (!(entry = ad1848_mixer_find_dev(map, cnt, cp)))
map 727 dev/pci/agp.c agp_free_dmamem(bus_dma_tag_t tag, size_t size, bus_dmamap_t map,
map 731 dev/pci/agp.c bus_dmamap_unload(tag, map);
map 732 dev/pci/agp.c bus_dmamap_destroy(tag, map);
map 152 dev/pci/auich.c bus_dmamap_t map;
map 1093 dev/pci/auich.c p->size, 0, BUS_DMA_NOWAIT, &p->map)) != 0) {
map 1102 dev/pci/auich.c if ((error = bus_dmamap_load(sc->dmat, p->map, p->addr, p->size,
map 1106 dev/pci/auich.c bus_dmamap_destroy(sc->dmat, p->map);
map 1134 dev/pci/auich.c bus_dmamap_unload(sc->dmat, p->map);
map 1135 dev/pci/auich.c bus_dmamap_destroy(sc->dmat, p->map);
map 1488 dev/pci/auich.c sc->dmalist_pcmi[i].base = p->map->dm_segs[0].ds_addr;
map 90 dev/pci/auixp.c #define DMAADDR(p) ((p)->map->dm_segs[0].ds_addr)
map 1131 dev/pci/auixp.c BUS_DMA_NOWAIT, &dma->map);
map 1139 dev/pci/auixp.c error = bus_dmamap_load(sc->sc_dmat, dma->map, dma->addr, dma->size, NULL,
map 1147 dev/pci/auixp.c bus_dmamap_destroy(sc->sc_dmat, dma->map);
map 1162 dev/pci/auixp.c bus_dmamap_unload(sc->sc_dmat, p->map);
map 1163 dev/pci/auixp.c bus_dmamap_destroy(sc->sc_dmat, p->map);
map 53 dev/pci/auixpvar.h bus_dmamap_t map;
map 78 dev/pci/autri.c #define DMAADDR(p) ((p)->map->dm_segs[0].ds_addr)
map 887 dev/pci/autri.c 0, BUS_DMA_NOWAIT, &p->map);
map 891 dev/pci/autri.c error = bus_dmamap_load(sc->sc_dmatag, p->map, p->addr, p->size, NULL,
map 898 dev/pci/autri.c bus_dmamap_destroy(sc->sc_dmatag, p->map);
map 911 dev/pci/autri.c bus_dmamap_unload(sc->sc_dmatag, p->map);
map 912 dev/pci/autri.c bus_dmamap_destroy(sc->sc_dmatag, p->map);
map 35 dev/pci/autrivar.h bus_dmamap_t map;
map 69 dev/pci/auvia.c bus_dmamap_t map;
map 728 dev/pci/auvia.c BUS_DMA_NOWAIT, &p->map)) != 0) {
map 734 dev/pci/auvia.c if ((error = bus_dmamap_load(sc->sc_dmat, p->map, p->addr, size, NULL,
map 748 dev/pci/auvia.c bus_dmamap_destroy(sc->sc_dmat, p->map);
map 767 dev/pci/auvia.c bus_dmamap_unload(sc->sc_dmat, p->map);
map 768 dev/pci/auvia.c bus_dmamap_destroy(sc->sc_dmat, p->map);
map 818 dev/pci/auvia.c s = p->map->dm_segs[0].ds_addr;
map 883 dev/pci/auvia.c ch->sc_dma_ops_dma->map->dm_segs[0].ds_addr);
map 928 dev/pci/auvia.c ch->sc_dma_ops_dma->map->dm_segs[0].ds_addr);
map 145 dev/pci/azalia.c bus_dmamap_t map;
map 150 dev/pci/azalia.c #define AZALIA_DMA_DMAADDR(p) ((p)->map->dm_segs[0].ds_addr)
map 1007 dev/pci/azalia.c BUS_DMA_NOWAIT, &d->map);
map 1010 dev/pci/azalia.c err = bus_dmamap_load(az->dmat, d->map, d->addr, size,
map 1022 dev/pci/azalia.c bus_dmamap_destroy(az->dmat, d->map);
map 1036 dev/pci/azalia.c bus_dmamap_unload(az->dmat, d->map);
map 1037 dev/pci/azalia.c bus_dmamap_destroy(az->dmat, d->map);
map 93 dev/pci/cs4280.c bus_dmamap_t map;
map 101 dev/pci/cs4280.c #define DMAADDR(p) ((p)->map->dm_segs[0].ds_addr)
map 892 dev/pci/cs4280.c err = cs4280_download(sc, &BA1Struct->map[offset],
map 943 dev/pci/cs4280.c err = cs4280_checkimage(sc, &BA1Struct->map[offset],
map 1347 dev/pci/cs4280.c bus_dmamap_unload(sc->sc_dmatag, p->map);
map 1348 dev/pci/cs4280.c bus_dmamap_destroy(sc->sc_dmatag, p->map);
map 1383 dev/pci/cs4280.c 0, BUS_DMA_NOWAIT, &p->map);
map 1390 dev/pci/cs4280.c error = bus_dmamap_load(sc->sc_dmatag, p->map, p->addr, p->size, NULL,
map 1400 dev/pci/cs4280.c bus_dmamap_destroy(sc->sc_dmatag, p->map);
map 274 dev/pci/cs4280reg.h u_int32_t map[BA1_DWORD_SIZE];
map 69 dev/pci/cs4281.c bus_dmamap_t map;
map 77 dev/pci/cs4281.c #define DMAADDR(p) ((p)->map->dm_segs[0].ds_addr)
map 1386 dev/pci/cs4281.c bus_dmamap_unload(sc->sc_dmatag, p->map);
map 1387 dev/pci/cs4281.c bus_dmamap_destroy(sc->sc_dmatag, p->map);
map 1556 dev/pci/cs4281.c 0, BUS_DMA_NOWAIT, &p->map);
map 1563 dev/pci/cs4281.c error = bus_dmamap_load(sc->sc_dmatag, p->map, p->addr, p->size, NULL,
map 1573 dev/pci/cs4281.c bus_dmamap_destroy(sc->sc_dmatag, p->map);
map 102 dev/pci/eap.c bus_dmamap_t map;
map 110 dev/pci/eap.c #define DMAADDR(p) ((p)->map->dm_segs[0].ds_addr)
map 827 dev/pci/eap.c 0, BUS_DMA_NOWAIT, &p->map);
map 831 dev/pci/eap.c error = bus_dmamap_load(sc->sc_dmatag, p->map, p->addr, p->size, NULL,
map 838 dev/pci/eap.c bus_dmamap_destroy(sc->sc_dmatag, p->map);
map 849 dev/pci/eap.c bus_dmamap_unload(sc->sc_dmatag, p->map);
map 850 dev/pci/eap.c bus_dmamap_destroy(sc->sc_dmatag, p->map);
map 323 dev/pci/emuxki.c mem->bound, bus_dma_flags, &(mem->map))) {
map 330 dev/pci/emuxki.c if (bus_dmamap_load(dmat, mem->map, mem->kaddr,
map 332 dev/pci/emuxki.c bus_dmamap_destroy(dmat, mem->map);
map 345 dev/pci/emuxki.c bus_dmamap_unload(mem->dmat, mem->map);
map 346 dev/pci/emuxki.c bus_dmamap_destroy(mem->dmat, mem->map);
map 59 dev/pci/emuxkivar.h bus_dmamap_t map;
map 959 dev/pci/esa.c BUS_DMA_NOWAIT, &p->map);
map 963 dev/pci/esa.c error = bus_dmamap_load(sc->sc_dmat, p->map, p->addr, p->size, NULL,
map 971 dev/pci/esa.c bus_dmamap_destroy(sc->sc_dmat, p->map);
map 984 dev/pci/esa.c bus_dmamap_unload(sc->sc_dmat, p->map);
map 985 dev/pci/esa.c bus_dmamap_destroy(sc->sc_dmat, p->map);
map 51 dev/pci/esavar.h #define DMAADDR(p) ((p)->map->dm_segs[0].ds_addr)
map 64 dev/pci/esavar.h bus_dmamap_t map;
map 76 dev/pci/fms.c bus_dmamap_t map;
map 737 dev/pci/fms.c BUS_DMA_NOWAIT, &p->map)) != 0) {
map 743 dev/pci/fms.c if ((error = bus_dmamap_load(sc->sc_dmat, p->map, p->addr, size, NULL,
map 757 dev/pci/fms.c bus_dmamap_destroy(sc->sc_dmat, p->map);
map 778 dev/pci/fms.c bus_dmamap_unload(sc->sc_dmat, p->map);
map 779 dev/pci/fms.c bus_dmamap_destroy(sc->sc_dmat, p->map);
map 853 dev/pci/fms.c sc->sc_play_start = p->map->dm_segs[0].ds_addr;
map 891 dev/pci/fms.c sc->sc_rec_start = p->map->dm_segs[0].ds_addr;
map 1258 dev/pci/hifn7751.c hifn_dmamap_aligned(bus_dmamap_t map)
map 1262 dev/pci/hifn7751.c for (i = 0; i < map->dm_nsegs; i++) {
map 1263 dev/pci/hifn7751.c if (map->dm_segs[i].ds_addr & 3)
map 1265 dev/pci/hifn7751.c if ((i != (map->dm_nsegs - 1)) &&
map 1266 dev/pci/hifn7751.c (map->dm_segs[i].ds_len & 3))
map 1276 dev/pci/hifn7751.c bus_dmamap_t map = cmd->dst_map;
map 1281 dev/pci/hifn7751.c for (i = 0; i < map->dm_nsegs - 1; i++) {
map 1282 dev/pci/hifn7751.c dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr);
map 1284 dev/pci/hifn7751.c HIFN_D_MASKDONEIRQ | map->dm_segs[i].ds_len);
map 1299 dev/pci/hifn7751.c p = map->dm_segs[i].ds_addr;
map 1301 dev/pci/hifn7751.c map->dm_segs[i].ds_len;
map 1308 dev/pci/hifn7751.c if ((map->dm_segs[i].ds_len - cmd->sloplen) != 0) {
map 1309 dev/pci/hifn7751.c dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr);
map 1312 dev/pci/hifn7751.c (map->dm_segs[i].ds_len - cmd->sloplen));
map 1348 dev/pci/hifn7751.c bus_dmamap_t map = cmd->src_map;
map 1353 dev/pci/hifn7751.c for (i = 0; i < map->dm_nsegs; i++) {
map 1354 dev/pci/hifn7751.c if (i == map->dm_nsegs - 1)
map 1357 dev/pci/hifn7751.c dma->srcr[idx].p = htole32(map->dm_segs[i].ds_addr);
map 1358 dev/pci/hifn7751.c dma->srcr[idx].l = htole32(map->dm_segs[i].ds_len |
map 1372 dev/pci/hifn7751.c dma->srcu += map->dm_nsegs;
map 3243 dev/pci/if_bnx.c bus_dmamap_t map;
map 3310 dev/pci/if_bnx.c map = sc->rx_mbuf_map[*chain_prod];
map 3312 dev/pci/if_bnx.c if (bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m_new, BUS_DMA_NOWAIT)) {
map 3335 dev/pci/if_bnx.c addr = (u_int32_t)(map->dm_segs[0].ds_addr);
map 3337 dev/pci/if_bnx.c addr = (u_int32_t)((u_int64_t)map->dm_segs[0].ds_addr >> 32);
map 3339 dev/pci/if_bnx.c rxbd->rx_bd_len = htole32(map->dm_segs[0].ds_len);
map 3341 dev/pci/if_bnx.c *prod_bseq += map->dm_segs[0].ds_len;
map 3343 dev/pci/if_bnx.c for (i = 1; i < map->dm_nsegs; i++) {
map 3350 dev/pci/if_bnx.c addr = (u_int32_t)(map->dm_segs[i].ds_addr);
map 3352 dev/pci/if_bnx.c addr = (u_int32_t)((u_int64_t)map->dm_segs[i].ds_addr >> 32);
map 3354 dev/pci/if_bnx.c rxbd->rx_bd_len = htole32(map->dm_segs[i].ds_len);
map 3356 dev/pci/if_bnx.c *prod_bseq += map->dm_segs[i].ds_len;
map 3368 dev/pci/if_bnx.c sc->rx_mbuf_map[*chain_prod] = map;
map 3369 dev/pci/if_bnx.c sc->free_rx_bd -= map->dm_nsegs;
map 3372 dev/pci/if_bnx.c map->dm_nsegs));
map 4309 dev/pci/if_bnx.c bus_dmamap_t map;
map 4343 dev/pci/if_bnx.c map = sc->tx_mbuf_map[chain_prod];
map 4346 dev/pci/if_bnx.c error = bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m0, BUS_DMA_NOWAIT);
map 4361 dev/pci/if_bnx.c if (map->dm_nsegs > (USABLE_TX_BD - sc->used_tx_bd - BNX_TX_SLACK_SPACE)) {
map 4362 dev/pci/if_bnx.c bus_dmamap_unload(sc->bnx_dmatag, map);
map 4383 dev/pci/if_bnx.c for (i = 0; i < map->dm_nsegs ; i++) {
map 4387 dev/pci/if_bnx.c addr = (u_int32_t)(map->dm_segs[i].ds_addr);
map 4389 dev/pci/if_bnx.c addr = (u_int32_t)((u_int64_t)map->dm_segs[i].ds_addr >> 32);
map 4391 dev/pci/if_bnx.c txbd->tx_bd_mss_nbytes = htole16(map->dm_segs[i].ds_len);
map 4394 dev/pci/if_bnx.c prod_bseq += map->dm_segs[i].ds_len;
map 4420 dev/pci/if_bnx.c sc->used_tx_bd += map->dm_nsegs;
map 1843 dev/pci/if_cas.c bus_dmamap_t map;
map 1846 dev/pci/if_cas.c map = sc->sc_txd[cur].sd_map;
map 1848 dev/pci/if_cas.c if (bus_dmamap_load_mbuf(sc->sc_dmatag, map, mhead,
map 1853 dev/pci/if_cas.c if ((sc->sc_tx_cnt + map->dm_nsegs) > (CAS_NTXDESC - 2)) {
map 1854 dev/pci/if_cas.c bus_dmamap_unload(sc->sc_dmatag, map);
map 1858 dev/pci/if_cas.c bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize,
map 1861 dev/pci/if_cas.c for (i = 0; i < map->dm_nsegs; i++) {
map 1863 dev/pci/if_cas.c CAS_DMA_WRITE(map->dm_segs[i].ds_addr);
map 1864 dev/pci/if_cas.c flags = (map->dm_segs[i].ds_len & CAS_TD_BUFSIZE) |
map 1866 dev/pci/if_cas.c ((i == (map->dm_nsegs - 1)) ? CAS_TD_END_OF_PACKET : 0);
map 1876 dev/pci/if_cas.c sc->sc_tx_cnt += map->dm_nsegs;
map 1878 dev/pci/if_cas.c sc->sc_txd[cur].sd_map = map;
map 3061 dev/pci/if_de.c bus_dmamap_t map;
map 3066 dev/pci/if_de.c map = TULIP_GETCTX(m, bus_dmamap_t);
map 3067 dev/pci/if_de.c bus_dmamap_unload(sc->tulip_dmatag, map);
map 3068 dev/pci/if_de.c sc->tulip_txmaps[sc->tulip_txmaps_free++] = map;
map 3099 dev/pci/if_de.c bus_dmamap_t map;
map 3104 dev/pci/if_de.c map = TULIP_GETCTX(m, bus_dmamap_t);
map 3105 dev/pci/if_de.c bus_dmamap_unload(sc->tulip_dmatag, map);
map 3106 dev/pci/if_de.c sc->tulip_rxmaps[sc->tulip_rxmaps_free++] = map;
map 3197 dev/pci/if_de.c bus_dmamap_t map;
map 3261 dev/pci/if_de.c map = TULIP_GETCTX(me, bus_dmamap_t);
map 3262 dev/pci/if_de.c TULIP_RXMAP_POSTSYNC(sc, map);
map 3263 dev/pci/if_de.c bus_dmamap_unload(sc->tulip_dmatag, map);
map 3264 dev/pci/if_de.c sc->tulip_rxmaps[sc->tulip_rxmaps_free++] = map;
map 3283 dev/pci/if_de.c map = TULIP_GETCTX(me, bus_dmamap_t);
map 3284 dev/pci/if_de.c bus_dmamap_sync(sc->tulip_dmatag, map, 0, me->m_len,
map 3286 dev/pci/if_de.c bus_dmamap_unload(sc->tulip_dmatag, map);
map 3287 dev/pci/if_de.c sc->tulip_rxmaps[sc->tulip_rxmaps_free++] = map;
map 3333 dev/pci/if_de.c map = TULIP_GETCTX(me, bus_dmamap_t);
map 3334 dev/pci/if_de.c bus_dmamap_unload(sc->tulip_dmatag, map);
map 3335 dev/pci/if_de.c sc->tulip_rxmaps[sc->tulip_rxmaps_free++] = map;
map 3417 dev/pci/if_de.c map = sc->tulip_rxmaps[--sc->tulip_rxmaps_free];
map 3426 dev/pci/if_de.c TULIP_SETCTX(ms, map);
map 3427 dev/pci/if_de.c error = bus_dmamap_load(sc->tulip_dmatag, map, mtod(ms, void *),
map 3434 dev/pci/if_de.c nextout->d_addr1 = map->dm_segs[0].ds_addr;
map 3435 dev/pci/if_de.c nextout->d_length1 = map->dm_segs[0].ds_len;
map 3436 dev/pci/if_de.c if (map->dm_nsegs == 2) {
map 3437 dev/pci/if_de.c nextout->d_addr2 = map->dm_segs[1].ds_addr;
map 3438 dev/pci/if_de.c nextout->d_length2 = map->dm_segs[1].ds_len;
map 3509 dev/pci/if_de.c bus_dmamap_t map = TULIP_GETCTX(m, bus_dmamap_t);
map 3510 dev/pci/if_de.c TULIP_TXMAP_POSTSYNC(sc, map);
map 3511 dev/pci/if_de.c sc->tulip_txmaps[sc->tulip_txmaps_free++] = map;
map 3816 dev/pci/if_de.c bus_dmamap_t map;
map 3865 dev/pci/if_de.c map = sc->tulip_txmaps[sc->tulip_txmaps_free-1];
map 3873 dev/pci/if_de.c error = bus_dmamap_load_mbuf(sc->tulip_dmatag, map, m, BUS_DMA_NOWAIT);
map 3908 dev/pci/if_de.c error = bus_dmamap_load_mbuf(sc->tulip_dmatag, map, m, BUS_DMA_NOWAIT);
map 3919 dev/pci/if_de.c if ((freedescs -= (map->dm_nsegs + 1) / 2) <= 0
map 3934 dev/pci/if_de.c bus_dmamap_unload(sc->tulip_dmatag, map);
map 3937 dev/pci/if_de.c for (; map->dm_nsegs - segcnt > 1; segcnt += 2) {
map 3941 dev/pci/if_de.c eop->d_addr1 = map->dm_segs[segcnt].ds_addr;
map 3942 dev/pci/if_de.c eop->d_length1 = map->dm_segs[segcnt].ds_len;
map 3943 dev/pci/if_de.c eop->d_addr2 = map->dm_segs[segcnt+1].ds_addr;
map 3944 dev/pci/if_de.c eop->d_length2 = map->dm_segs[segcnt+1].ds_len;
map 3949 dev/pci/if_de.c if (segcnt < map->dm_nsegs) {
map 3953 dev/pci/if_de.c eop->d_addr1 = map->dm_segs[segcnt].ds_addr;
map 3954 dev/pci/if_de.c eop->d_length1 = map->dm_segs[segcnt].ds_len;
map 3960 dev/pci/if_de.c TULIP_TXMAP_PRESYNC(sc, map);
map 3961 dev/pci/if_de.c TULIP_SETCTX(m, map);
map 3962 dev/pci/if_de.c map = NULL;
map 4400 dev/pci/if_de.c bus_dmamap_t map;
map 4402 dev/pci/if_de.c BUS_DMA_NOWAIT, &map);
map 4404 dev/pci/if_de.c error = bus_dmamap_load(sc->tulip_dmatag, map, desc,
map 4407 dev/pci/if_de.c bus_dmamap_destroy(sc->tulip_dmatag, map);
map 4409 dev/pci/if_de.c *map_p = map;
map 4454 dev/pci/if_de.c bus_dmamap_t map;
map 4455 dev/pci/if_de.c if ((error = TULIP_TXMAP_CREATE(sc, &map)) == 0)
map 4456 dev/pci/if_de.c sc->tulip_txmaps[sc->tulip_txmaps_free++] = map;
map 4479 dev/pci/if_de.c bus_dmamap_t map;
map 4480 dev/pci/if_de.c if ((error = TULIP_RXMAP_CREATE(sc, &map)) == 0)
map 4481 dev/pci/if_de.c sc->tulip_rxmaps[sc->tulip_rxmaps_free++] = map;
map 764 dev/pci/if_devar.h #define TULIP_RXMAP_PRESYNC(sc, map) \
map 765 dev/pci/if_devar.h bus_dmamap_sync((sc)->tulip_dmatag, (map), 0, (map)->dm_mapsize, \
map 767 dev/pci/if_devar.h #define TULIP_RXMAP_POSTSYNC(sc, map) \
map 768 dev/pci/if_devar.h bus_dmamap_sync((sc)->tulip_dmatag, (map), 0, (map)->dm_mapsize, \
map 783 dev/pci/if_devar.h #define TULIP_TXMAP_PRESYNC(sc, map) \
map 784 dev/pci/if_devar.h bus_dmamap_sync((sc)->tulip_dmatag, (map), 0, (map)->dm_mapsize, \
map 786 dev/pci/if_devar.h #define TULIP_TXMAP_POSTSYNC(sc, map) \
map 787 dev/pci/if_devar.h bus_dmamap_sync((sc)->tulip_dmatag, (map), 0, (map)->dm_mapsize, \
map 938 dev/pci/if_em.c bus_dmamap_t map;
map 972 dev/pci/if_em.c map = tx_buffer->map;
map 974 dev/pci/if_em.c error = bus_dmamap_load_mbuf(sc->txtag, map, m_head, BUS_DMA_NOWAIT);
map 979 dev/pci/if_em.c EM_KASSERT(map->dm_nsegs!= 0, ("em_encap: empty packet"));
map 981 dev/pci/if_em.c if (map->dm_nsegs > sc->num_tx_desc_avail - 2)
map 997 dev/pci/if_em.c for (j = 0; j < map->dm_nsegs; j++) {
map 1004 dev/pci/if_em.c array_elements = em_fill_descriptors(map->dm_segs[j].ds_addr,
map 1005 dev/pci/if_em.c map->dm_segs[j].ds_len,
map 1032 dev/pci/if_em.c current_tx_desc->buffer_addr = htole64(map->dm_segs[j].ds_addr);
map 1034 dev/pci/if_em.c sc->txd_cmd | txd_lower | map->dm_segs[j].ds_len);
map 1049 dev/pci/if_em.c sc->num_tx_desc_avail -= map->dm_nsegs;
map 1052 dev/pci/if_em.c tx_buffer_mapped->map = tx_buffer->map;
map 1053 dev/pci/if_em.c tx_buffer->map = map;
map 1054 dev/pci/if_em.c bus_dmamap_sync(sc->txtag, map, 0, map->dm_mapsize,
map 1093 dev/pci/if_em.c bus_dmamap_unload(sc->txtag, map);
map 1882 dev/pci/if_em.c BUS_DMA_NOWAIT, &tx_buffer->map);
map 2000 dev/pci/if_em.c if (tx_buffer->map != NULL &&
map 2001 dev/pci/if_em.c tx_buffer->map->dm_nsegs > 0) {
map 2002 dev/pci/if_em.c bus_dmamap_sync(sc->txtag, tx_buffer->map,
map 2003 dev/pci/if_em.c 0, tx_buffer->map->dm_mapsize,
map 2006 dev/pci/if_em.c tx_buffer->map);
map 2012 dev/pci/if_em.c if (tx_buffer->map != NULL) {
map 2014 dev/pci/if_em.c tx_buffer->map);
map 2015 dev/pci/if_em.c tx_buffer->map = NULL;
map 2156 dev/pci/if_em.c if (tx_buffer->map->dm_nsegs > 0) {
map 2158 dev/pci/if_em.c tx_buffer->map, 0,
map 2159 dev/pci/if_em.c tx_buffer->map->dm_mapsize,
map 2162 dev/pci/if_em.c tx_buffer->map);
map 2219 dev/pci/if_em.c bus_dmamap_t map;
map 2252 dev/pci/if_em.c bus_dmamap_unload(sc->rxtag, rx_buffer->map);
map 2254 dev/pci/if_em.c map = rx_buffer->map;
map 2255 dev/pci/if_em.c rx_buffer->map = sc->rx_sparemap;
map 2256 dev/pci/if_em.c sc->rx_sparemap = map;
map 2258 dev/pci/if_em.c bus_dmamap_sync(sc->rxtag, rx_buffer->map, 0,
map 2259 dev/pci/if_em.c rx_buffer->map->dm_mapsize, BUS_DMASYNC_PREREAD);
map 2263 dev/pci/if_em.c sc->rx_desc_base[i].buffer_addr = htole64(rx_buffer->map->dm_segs[0].ds_addr);
map 2309 dev/pci/if_em.c &rx_buffer->map);
map 2453 dev/pci/if_em.c if (rx_buffer->map != NULL &&
map 2454 dev/pci/if_em.c rx_buffer->map->dm_nsegs > 0) {
map 2455 dev/pci/if_em.c bus_dmamap_sync(sc->rxtag, rx_buffer->map,
map 2456 dev/pci/if_em.c 0, rx_buffer->map->dm_mapsize,
map 2459 dev/pci/if_em.c rx_buffer->map);
map 2465 dev/pci/if_em.c if (rx_buffer->map != NULL) {
map 2467 dev/pci/if_em.c rx_buffer->map);
map 2468 dev/pci/if_em.c rx_buffer->map = NULL;
map 2523 dev/pci/if_em.c bus_dmamap_sync(sc->rxtag, sc->rx_buffer_area[i].map,
map 2524 dev/pci/if_em.c 0, sc->rx_buffer_area[i].map->dm_mapsize,
map 277 dev/pci/if_em.h bus_dmamap_t map; /* bus_dma map for packet */
map 451 dev/pci/if_ipw.c 1, sizeof (struct ipw_hdr), 0, BUS_DMA_NOWAIT, &shdr->map);
map 467 dev/pci/if_ipw.c MCLBYTES, 0, BUS_DMA_NOWAIT, &sbuf->map);
map 511 dev/pci/if_ipw.c 0, BUS_DMA_NOWAIT, &sbuf->map);
map 518 dev/pci/if_ipw.c error = bus_dmamap_load(sc->sc_dmat, sbuf->map,
map 528 dev/pci/if_ipw.c sbd->bd->physaddr = htole32(sbuf->map->dm_segs[0].ds_addr);
map 581 dev/pci/if_ipw.c bus_dmamap_destroy(sc->sc_dmat, sc->shdr_list[i].map);
map 584 dev/pci/if_ipw.c bus_dmamap_destroy(sc->sc_dmat, sc->tx_sbuf_list[i].map);
map 588 dev/pci/if_ipw.c if (sbuf->map != NULL) {
map 590 dev/pci/if_ipw.c bus_dmamap_unload(sc->sc_dmat, sbuf->map);
map 593 dev/pci/if_ipw.c bus_dmamap_destroy(sc->sc_dmat, sbuf->map);
map 761 dev/pci/if_ipw.c bus_dmamap_sync(sc->sc_dmat, sbuf->map, 0, sizeof (struct ipw_cmd),
map 780 dev/pci/if_ipw.c bus_dmamap_sync(sc->sc_dmat, sbuf->map, 0, sizeof state,
map 847 dev/pci/if_ipw.c bus_dmamap_sync(sc->sc_dmat, sbuf->map, 0, letoh32(status->len),
map 849 dev/pci/if_ipw.c bus_dmamap_unload(sc->sc_dmat, sbuf->map);
map 851 dev/pci/if_ipw.c error = bus_dmamap_load(sc->sc_dmat, sbuf->map, mtod(mnew, void *),
map 857 dev/pci/if_ipw.c error = bus_dmamap_load(sc->sc_dmat, sbuf->map,
map 870 dev/pci/if_ipw.c sbd->bd->physaddr = htole32(sbuf->map->dm_segs[0].ds_addr);
map 984 dev/pci/if_ipw.c bus_dmamap_unload(sc->sc_dmat, shdr->map);
map 990 dev/pci/if_ipw.c bus_dmamap_unload(sc->sc_dmat, sbuf->map);
map 1180 dev/pci/if_ipw.c error = bus_dmamap_load_mbuf(sc->sc_dmat, sbuf->map, m, BUS_DMA_NOWAIT);
map 1211 dev/pci/if_ipw.c error = bus_dmamap_load_mbuf(sc->sc_dmat, sbuf->map, m,
map 1221 dev/pci/if_ipw.c error = bus_dmamap_load(sc->sc_dmat, shdr->map, &shdr->hdr,
map 1226 dev/pci/if_ipw.c bus_dmamap_unload(sc->sc_dmat, sbuf->map);
map 1237 dev/pci/if_ipw.c sbd->bd->physaddr = htole32(shdr->map->dm_segs[0].ds_addr);
map 1239 dev/pci/if_ipw.c sbd->bd->nfrag = 1 + sbuf->map->dm_nsegs;
map 1258 dev/pci/if_ipw.c for (i = 0; i < sbuf->map->dm_nsegs; i++) {
map 1260 dev/pci/if_ipw.c sbd->bd->physaddr = htole32(sbuf->map->dm_segs[i].ds_addr);
map 1261 dev/pci/if_ipw.c sbd->bd->len = htole32(sbuf->map->dm_segs[i].ds_len);
map 1264 dev/pci/if_ipw.c if (i == sbuf->map->dm_nsegs - 1) {
map 1274 dev/pci/if_ipw.c sbuf->map->dm_segs[i].ds_len));
map 1284 dev/pci/if_ipw.c bus_dmamap_sync(sc->sc_dmat, sbuf->map, 0, sbuf->map->dm_mapsize,
map 1286 dev/pci/if_ipw.c bus_dmamap_sync(sc->sc_dmat, shdr->map, 0, sizeof (struct ipw_hdr),
map 50 dev/pci/if_ipwvar.h bus_dmamap_t map;
map 57 dev/pci/if_ipwvar.h bus_dmamap_t map;
map 400 dev/pci/if_iwi.c BUS_DMA_NOWAIT, &ring->map);
map 425 dev/pci/if_iwi.c error = bus_dmamap_load(sc->sc_dmat, ring->map, ring->desc,
map 451 dev/pci/if_iwi.c if (ring->map != NULL) {
map 453 dev/pci/if_iwi.c bus_dmamap_unload(sc->sc_dmat, ring->map);
map 458 dev/pci/if_iwi.c bus_dmamap_destroy(sc->sc_dmat, ring->map);
map 477 dev/pci/if_iwi.c &ring->map);
map 502 dev/pci/if_iwi.c error = bus_dmamap_load(sc->sc_dmat, ring->map, ring->desc,
map 517 dev/pci/if_iwi.c IWI_MAX_SCATTER, MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map);
map 541 dev/pci/if_iwi.c bus_dmamap_unload(sc->sc_dmat, data->map);
map 557 dev/pci/if_iwi.c if (ring->map != NULL) {
map 559 dev/pci/if_iwi.c bus_dmamap_unload(sc->sc_dmat, ring->map);
map 564 dev/pci/if_iwi.c bus_dmamap_destroy(sc->sc_dmat, ring->map);
map 571 dev/pci/if_iwi.c bus_dmamap_unload(sc->sc_dmat, data->map);
map 574 dev/pci/if_iwi.c bus_dmamap_destroy(sc->sc_dmat, data->map);
map 590 dev/pci/if_iwi.c 0, BUS_DMA_NOWAIT, &data->map);
map 615 dev/pci/if_iwi.c error = bus_dmamap_load(sc->sc_dmat, data->map,
map 648 dev/pci/if_iwi.c bus_dmamap_unload(sc->sc_dmat, data->map);
map 651 dev/pci/if_iwi.c bus_dmamap_destroy(sc->sc_dmat, data->map);
map 904 dev/pci/if_iwi.c bus_dmamap_unload(sc->sc_dmat, data->map);
map 906 dev/pci/if_iwi.c error = bus_dmamap_load(sc->sc_dmat, data->map, mtod(mnew, void *),
map 912 dev/pci/if_iwi.c error = bus_dmamap_load(sc->sc_dmat, data->map,
map 925 dev/pci/if_iwi.c CSR_WRITE_4(sc, data->reg, data->map->dm_segs[0].ds_addr);
map 1104 dev/pci/if_iwi.c bus_dmamap_sync(sc->sc_dmat, data->map, 0, MCLBYTES,
map 1146 dev/pci/if_iwi.c bus_dmamap_unload(sc->sc_dmat, data->map);
map 1238 dev/pci/if_iwi.c bus_dmamap_sync(sc->sc_dmat, sc->cmdq.map,
map 1303 dev/pci/if_iwi.c error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m0,
map 1335 dev/pci/if_iwi.c error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m0,
map 1374 dev/pci/if_iwi.c desc->nseg = htole32(data->map->dm_nsegs);
map 1375 dev/pci/if_iwi.c for (i = 0; i < data->map->dm_nsegs; i++) {
map 1376 dev/pci/if_iwi.c desc->seg_addr[i] = htole32(data->map->dm_segs[i].ds_addr);
map 1377 dev/pci/if_iwi.c desc->seg_len[i] = htole16(data->map->dm_segs[i].ds_len);
map 1380 dev/pci/if_iwi.c bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
map 1382 dev/pci/if_iwi.c bus_dmamap_sync(sc->sc_dmat, txq->map,
map 1387 dev/pci/if_iwi.c letoh16(desc->len), data->map->dm_nsegs));
map 1668 dev/pci/if_iwi.c bus_dmamap_t map;
map 1677 dev/pci/if_iwi.c BUS_DMA_NOWAIT, &map);
map 1700 dev/pci/if_iwi.c error = bus_dmamap_load(sc->sc_dmat, map, virtaddr, size, NULL,
map 1712 dev/pci/if_iwi.c bus_dmamap_sync(sc->sc_dmat, map, 0, size, BUS_DMASYNC_PREWRITE);
map 1722 dev/pci/if_iwi.c src = map->dm_segs[0].ds_addr;
map 1791 dev/pci/if_iwi.c fail5: bus_dmamap_sync(sc->sc_dmat, map, 0, size, BUS_DMASYNC_POSTWRITE);
map 1792 dev/pci/if_iwi.c bus_dmamap_unload(sc->sc_dmat, map);
map 1795 dev/pci/if_iwi.c fail2: bus_dmamap_destroy(sc->sc_dmat, map);
map 2200 dev/pci/if_iwi.c CSR_WRITE_4(sc, IWI_CSR_CMD_BASE, sc->cmdq.map->dm_segs[0].ds_addr);
map 2204 dev/pci/if_iwi.c CSR_WRITE_4(sc, IWI_CSR_TX1_BASE, sc->txq[0].map->dm_segs[0].ds_addr);
map 2208 dev/pci/if_iwi.c CSR_WRITE_4(sc, IWI_CSR_TX2_BASE, sc->txq[1].map->dm_segs[0].ds_addr);
map 2212 dev/pci/if_iwi.c CSR_WRITE_4(sc, IWI_CSR_TX3_BASE, sc->txq[2].map->dm_segs[0].ds_addr);
map 2216 dev/pci/if_iwi.c CSR_WRITE_4(sc, IWI_CSR_TX4_BASE, sc->txq[3].map->dm_segs[0].ds_addr);
map 2222 dev/pci/if_iwi.c CSR_WRITE_4(sc, data->reg, data->map->dm_segs[0].ds_addr);
map 60 dev/pci/if_iwivar.h bus_dmamap_t map;
map 69 dev/pci/if_iwivar.h bus_dmamap_t map;
map 75 dev/pci/if_iwivar.h bus_dmamap_t map;
map 87 dev/pci/if_iwivar.h bus_dmamap_t map;
map 658 dev/pci/if_ixgb.c bus_dmamap_t map;
map 680 dev/pci/if_ixgb.c map = tx_buffer->map;
map 682 dev/pci/if_ixgb.c error = bus_dmamap_load_mbuf(sc->txtag, map,
map 688 dev/pci/if_ixgb.c IXGB_KASSERT(map->dm_nsegs != 0, ("ixgb_encap: empty packet"));
map 690 dev/pci/if_ixgb.c if (map->dm_nsegs > sc->num_tx_desc_avail)
map 700 dev/pci/if_ixgb.c for (j = 0; j < map->dm_nsegs; j++) {
map 704 dev/pci/if_ixgb.c current_tx_desc->buff_addr = htole64(map->dm_segs[j].ds_addr);
map 705 dev/pci/if_ixgb.c current_tx_desc->cmd_type_len = htole32((sc->txd_cmd | map->dm_segs[j].ds_len));
map 713 dev/pci/if_ixgb.c sc->num_tx_desc_avail -= map->dm_nsegs;
map 717 dev/pci/if_ixgb.c bus_dmamap_sync(sc->txtag, map, 0, map->dm_mapsize,
map 738 dev/pci/if_ixgb.c bus_dmamap_unload(sc->txtag, map);
map 1194 dev/pci/if_ixgb.c BUS_DMA_NOWAIT, &tx_buffer->map);
map 1278 dev/pci/if_ixgb.c if (tx_buffer->map != NULL &&
map 1279 dev/pci/if_ixgb.c tx_buffer->map->dm_nsegs > 0) {
map 1280 dev/pci/if_ixgb.c bus_dmamap_sync(sc->txtag, tx_buffer->map,
map 1281 dev/pci/if_ixgb.c 0, tx_buffer->map->dm_mapsize,
map 1284 dev/pci/if_ixgb.c tx_buffer->map);
map 1291 dev/pci/if_ixgb.c if (tx_buffer->map != NULL) {
map 1293 dev/pci/if_ixgb.c tx_buffer->map);
map 1294 dev/pci/if_ixgb.c tx_buffer->map = NULL;
map 1415 dev/pci/if_ixgb.c if (tx_buffer->map->dm_nsegs > 0) {
map 1416 dev/pci/if_ixgb.c bus_dmamap_sync(sc->txtag, tx_buffer->map,
map 1417 dev/pci/if_ixgb.c 0, tx_buffer->map->dm_mapsize,
map 1419 dev/pci/if_ixgb.c bus_dmamap_unload(sc->txtag, tx_buffer->map);
map 1500 dev/pci/if_ixgb.c error = bus_dmamap_load_mbuf(sc->rxtag, rx_buffer->map,
map 1507 dev/pci/if_ixgb.c sc->rx_desc_base[i].buff_addr = htole64(rx_buffer->map->dm_segs[0].ds_addr);
map 1508 dev/pci/if_ixgb.c bus_dmamap_sync(sc->rxtag, rx_buffer->map, 0,
map 1509 dev/pci/if_ixgb.c rx_buffer->map->dm_mapsize, BUS_DMASYNC_PREREAD);
map 1546 dev/pci/if_ixgb.c &rx_buffer->map);
map 1701 dev/pci/if_ixgb.c if (rx_buffer->map != NULL &&
map 1702 dev/pci/if_ixgb.c rx_buffer->map->dm_nsegs > 0) {
map 1703 dev/pci/if_ixgb.c bus_dmamap_sync(sc->rxtag, rx_buffer->map,
map 1704 dev/pci/if_ixgb.c 0, rx_buffer->map->dm_mapsize,
map 1707 dev/pci/if_ixgb.c rx_buffer->map);
map 1713 dev/pci/if_ixgb.c if (rx_buffer->map != NULL) {
map 1715 dev/pci/if_ixgb.c rx_buffer->map);
map 1716 dev/pci/if_ixgb.c rx_buffer->map = NULL;
map 1769 dev/pci/if_ixgb.c bus_dmamap_sync(sc->rxtag, sc->rx_buffer_area[i].map,
map 1770 dev/pci/if_ixgb.c 0, sc->rx_buffer_area[i].map->dm_mapsize,
map 1772 dev/pci/if_ixgb.c bus_dmamap_unload(sc->rxtag, sc->rx_buffer_area[i].map);
map 207 dev/pci/if_ixgb.h bus_dmamap_t map; /* bus_dma map for packet */
map 510 dev/pci/if_lmc.c bus_dmamap_t map;
map 575 dev/pci/if_lmc.c map = LMC_GETCTX(me, bus_dmamap_t);
map 576 dev/pci/if_lmc.c LMC_RXMAP_POSTSYNC(sc, map);
map 577 dev/pci/if_lmc.c bus_dmamap_unload(sc->lmc_dmatag, map);
map 578 dev/pci/if_lmc.c sc->lmc_rxmaps[sc->lmc_rxmaps_free++] = map;
map 606 dev/pci/if_lmc.c map = LMC_GETCTX(me, bus_dmamap_t);
map 607 dev/pci/if_lmc.c bus_dmamap_sync(sc->lmc_dmatag, map, 0, me->m_len,
map 609 dev/pci/if_lmc.c bus_dmamap_unload(sc->lmc_dmatag, map);
map 610 dev/pci/if_lmc.c sc->lmc_rxmaps[sc->lmc_rxmaps_free++] = map;
map 632 dev/pci/if_lmc.c map = LMC_GETCTX(me, bus_dmamap_t);
map 633 dev/pci/if_lmc.c bus_dmamap_unload(sc->lmc_dmatag, map);
map 634 dev/pci/if_lmc.c sc->lmc_rxmaps[sc->lmc_rxmaps_free++] = map;
map 694 dev/pci/if_lmc.c map = sc->lmc_rxmaps[--sc->lmc_rxmaps_free];
map 703 dev/pci/if_lmc.c LMC_SETCTX(ms, map);
map 704 dev/pci/if_lmc.c error = bus_dmamap_load(sc->lmc_dmatag, map,
map 720 dev/pci/if_lmc.c nextout->d_addr1 = htole32(map->dm_segs[0].ds_addr);
map 721 dev/pci/if_lmc.c if (map->dm_nsegs == 2) {
map 722 dev/pci/if_lmc.c nextout->d_addr2 = htole32(map->dm_segs[1].ds_addr);
map 725 dev/pci/if_lmc.c map->dm_segs[0].ds_len,
map 726 dev/pci/if_lmc.c map->dm_segs[1].ds_len));
map 731 dev/pci/if_lmc.c map->dm_segs[0].ds_len, 0));
map 771 dev/pci/if_lmc.c bus_dmamap_t map = LMC_GETCTX(m, bus_dmamap_t);
map 772 dev/pci/if_lmc.c LMC_TXMAP_POSTSYNC(sc, map);
map 773 dev/pci/if_lmc.c sc->lmc_txmaps[sc->lmc_txmaps_free++] = map;
map 991 dev/pci/if_lmc.c bus_dmamap_t map;
map 1034 dev/pci/if_lmc.c map = sc->lmc_txmaps[sc->lmc_txmaps_free-1];
map 1042 dev/pci/if_lmc.c error = bus_dmamap_load_mbuf(sc->lmc_dmatag, map, m, BUS_DMA_NOWAIT);
map 1057 dev/pci/if_lmc.c error = bus_dmamap_load_mbuf(sc->lmc_dmatag, map, m,
map 1069 dev/pci/if_lmc.c if ((free -= (map->dm_nsegs + 1) / 2) <= 0
map 1084 dev/pci/if_lmc.c bus_dmamap_unload(sc->lmc_dmatag, map);
map 1087 dev/pci/if_lmc.c for (; map->dm_nsegs - segcnt > 1; segcnt += 2) {
map 1097 dev/pci/if_lmc.c eop->d_addr1 = htole32(map->dm_segs[segcnt].ds_addr);
map 1098 dev/pci/if_lmc.c eop->d_addr2 = htole32(map->dm_segs[segcnt+1].ds_addr);
map 1100 dev/pci/if_lmc.c map->dm_segs[segcnt].ds_len,
map 1101 dev/pci/if_lmc.c map->dm_segs[segcnt+1].ds_len));
map 1106 dev/pci/if_lmc.c if (segcnt < map->dm_nsegs) {
map 1116 dev/pci/if_lmc.c eop->d_addr1 = htole32(map->dm_segs[segcnt].ds_addr);
map 1119 dev/pci/if_lmc.c map->dm_segs[segcnt].ds_len, 0));
map 1123 dev/pci/if_lmc.c LMC_TXMAP_PRESYNC(sc, map);
map 1124 dev/pci/if_lmc.c LMC_SETCTX(m, map);
map 1125 dev/pci/if_lmc.c map = NULL;
map 271 dev/pci/if_lmc_common.c bus_dmamap_t map;
map 277 dev/pci/if_lmc_common.c map = LMC_GETCTX(m, bus_dmamap_t);
map 278 dev/pci/if_lmc_common.c bus_dmamap_unload(sc->lmc_dmatag, map);
map 279 dev/pci/if_lmc_common.c sc->lmc_txmaps[sc->lmc_txmaps_free++] = map;
map 315 dev/pci/if_lmc_common.c bus_dmamap_t map;
map 320 dev/pci/if_lmc_common.c map = LMC_GETCTX(m, bus_dmamap_t);
map 321 dev/pci/if_lmc_common.c bus_dmamap_unload(sc->lmc_dmatag, map);
map 322 dev/pci/if_lmc_common.c sc->lmc_rxmaps[sc->lmc_rxmaps_free++] = map;
map 349 dev/pci/if_lmc_obsd.c bus_dmamap_t map;
map 351 dev/pci/if_lmc_obsd.c BUS_DMA_NOWAIT, &map);
map 353 dev/pci/if_lmc_obsd.c error = bus_dmamap_load(sc->lmc_dmatag, map, desc,
map 356 dev/pci/if_lmc_obsd.c bus_dmamap_destroy(sc->lmc_dmatag, map);
map 358 dev/pci/if_lmc_obsd.c *map_p = map;
map 391 dev/pci/if_lmc_obsd.c bus_dmamap_t map;
map 392 dev/pci/if_lmc_obsd.c if ((error = LMC_TXMAP_CREATE(sc, &map)) == 0)
map 393 dev/pci/if_lmc_obsd.c sc->lmc_txmaps[sc->lmc_txmaps_free++] = map;
map 416 dev/pci/if_lmc_obsd.c bus_dmamap_t map;
map 417 dev/pci/if_lmc_obsd.c if ((error = LMC_RXMAP_CREATE(sc, &map)) == 0)
map 418 dev/pci/if_lmc_obsd.c sc->lmc_rxmaps[sc->lmc_rxmaps_free++] = map;
map 463 dev/pci/if_lmcvar.h #define LMC_RXMAP_PRESYNC(sc, map) \
map 464 dev/pci/if_lmcvar.h bus_dmamap_sync((sc)->lmc_dmatag, (map), 0, (map)->dm_mapsize, \
map 466 dev/pci/if_lmcvar.h #define LMC_RXMAP_POSTSYNC(sc, map) \
map 467 dev/pci/if_lmcvar.h bus_dmamap_sync((sc)->lmc_dmatag, (map), 0, (map)->dm_mapsize, \
map 482 dev/pci/if_lmcvar.h #define LMC_TXMAP_PRESYNC(sc, map) \
map 483 dev/pci/if_lmcvar.h bus_dmamap_sync((sc)->lmc_dmatag, (map), 0, (map)->dm_mapsize, \
map 485 dev/pci/if_lmcvar.h #define LMC_TXMAP_POSTSYNC(sc, map) \
map 486 dev/pci/if_lmcvar.h bus_dmamap_sync((sc)->lmc_dmatag, (map), 0, (map)->dm_mapsize, \
map 554 dev/pci/if_myx.c bus_dmamap_t map = sc->sc_cmddma.mxm_map;
map 598 dev/pci/if_myx.c mc->mc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
map 599 dev/pci/if_myx.c mc->mc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
map 608 dev/pci/if_myx.c bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
map 633 dev/pci/if_myx.c bus_dmamap_t map = sc->sc_cmddma.mxm_map;
map 637 dev/pci/if_myx.c bc->bc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
map 638 dev/pci/if_myx.c bc->bc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
map 652 dev/pci/if_myx.c bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
map 672 dev/pci/if_myx.c bus_dmamap_t map = sc->sc_cmddma.mxm_map;
map 681 dev/pci/if_myx.c rc.rc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
map 682 dev/pci/if_myx.c rc.rc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr));
map 695 dev/pci/if_myx.c bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
map 963 dev/pci/if_myx.c bus_dmamap_t map = sc->sc_stsdma.mxm_map;
map 968 dev/pci/if_myx.c bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
map 1026 dev/pci/if_myx.c bus_dmamap_t map;
map 1077 dev/pci/if_myx.c map = sc->sc_rxdma.mxm_map;
map 1078 dev/pci/if_myx.c mc.mc_data0 = MYX_ADDRLOW(map->dm_segs[0].ds_addr);
map 1079 dev/pci/if_myx.c mc.mc_data1 = MYX_ADDRHIGH(map->dm_segs[0].ds_addr);
map 1132 dev/pci/if_myx.c map = mb->mb_dmamap;
map 1133 dev/pci/if_myx.c mb->mb_m = myx_getbuf(sc, map, 1);
map 1135 dev/pci/if_myx.c bus_dmamap_destroy(sc->sc_dmat, map);
map 1139 dev/pci/if_myx.c bus_dmamap_sync(sc->sc_dmat, map, 0,
map 1143 dev/pci/if_myx.c htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
map 1145 dev/pci/if_myx.c htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
map 1163 dev/pci/if_myx.c map = mb->mb_dmamap;
map 1164 dev/pci/if_myx.c mb->mb_m = myx_getbuf(sc, map, 1);
map 1166 dev/pci/if_myx.c bus_dmamap_destroy(sc->sc_dmat, map);
map 1170 dev/pci/if_myx.c bus_dmamap_sync(sc->sc_dmat, map, 0,
map 1174 dev/pci/if_myx.c htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
map 1176 dev/pci/if_myx.c htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr));
map 1199 dev/pci/if_myx.c map = sc->sc_stsdma.mxm_map;
map 1202 dev/pci/if_myx.c mc.mc_data0 = MYX_ADDRLOW(map->dm_segs[0].ds_addr);
map 1203 dev/pci/if_myx.c mc.mc_data1 = MYX_ADDRHIGH(map->dm_segs[0].ds_addr);
map 1210 dev/pci/if_myx.c bus_dmamap_sync(sc->sc_dmat, map, 0,
map 1211 dev/pci/if_myx.c map->dm_mapsize, BUS_DMASYNC_PREWRITE);
map 1250 dev/pci/if_myx.c myx_getbuf(struct myx_softc *sc, bus_dmamap_t map, int wait)
map 1263 dev/pci/if_myx.c if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
map 561 dev/pci/if_nfe.c bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
map 569 dev/pci/if_nfe.c bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
map 578 dev/pci/if_nfe.c bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
map 585 dev/pci/if_nfe.c bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
map 591 dev/pci/if_nfe.c bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0,
map 599 dev/pci/if_nfe.c bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
map 606 dev/pci/if_nfe.c bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
map 612 dev/pci/if_nfe.c bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0,
map 619 dev/pci/if_nfe.c bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
map 627 dev/pci/if_nfe.c bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
map 722 dev/pci/if_nfe.c bus_dmamap_sync(sc->sc_dmat, data->map, 0,
map 723 dev/pci/if_nfe.c data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
map 724 dev/pci/if_nfe.c bus_dmamap_unload(sc->sc_dmat, data->map);
map 726 dev/pci/if_nfe.c error = bus_dmamap_load(sc->sc_dmat, data->map,
map 733 dev/pci/if_nfe.c error = bus_dmamap_load(sc->sc_dmat, data->map,
map 744 dev/pci/if_nfe.c physaddr = data->map->dm_segs[0].ds_addr;
map 880 dev/pci/if_nfe.c bus_dmamap_t map;
map 887 dev/pci/if_nfe.c map = sc->txq.data[first].map;
map 889 dev/pci/if_nfe.c error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT);
map 896 dev/pci/if_nfe.c if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) {
map 897 dev/pci/if_nfe.c bus_dmamap_unload(sc->sc_dmat, map);
map 914 dev/pci/if_nfe.c for (i = 0; i < map->dm_nsegs; i++) {
map 921 dev/pci/if_nfe.c htole32(map->dm_segs[i].ds_addr >> 32);
map 924 dev/pci/if_nfe.c htole32(map->dm_segs[i].ds_addr & 0xffffffff);
map 925 dev/pci/if_nfe.c desc64->length = htole16(map->dm_segs[i].ds_len - 1);
map 933 dev/pci/if_nfe.c desc32->physaddr = htole32(map->dm_segs[i].ds_addr);
map 934 dev/pci/if_nfe.c desc32->length = htole16(map->dm_segs[i].ds_len - 1);
map 938 dev/pci/if_nfe.c if (map->dm_nsegs > 1) {
map 979 dev/pci/if_nfe.c data->active = map;
map 981 dev/pci/if_nfe.c bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
map 1201 dev/pci/if_nfe.c NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
map 1224 dev/pci/if_nfe.c error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
map 1233 dev/pci/if_nfe.c ring->physaddr = ring->map->dm_segs[0].ds_addr;
map 1270 dev/pci/if_nfe.c MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map);
map 1284 dev/pci/if_nfe.c error = bus_dmamap_load(sc->sc_dmat, data->map,
map 1292 dev/pci/if_nfe.c physaddr = data->map->dm_segs[0].ds_addr;
map 1311 dev/pci/if_nfe.c bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
map 1335 dev/pci/if_nfe.c bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
map 1357 dev/pci/if_nfe.c bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
map 1358 dev/pci/if_nfe.c ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
map 1359 dev/pci/if_nfe.c bus_dmamap_unload(sc->sc_dmat, ring->map);
map 1368 dev/pci/if_nfe.c if (data->map != NULL) {
map 1369 dev/pci/if_nfe.c bus_dmamap_sync(sc->sc_dmat, data->map, 0,
map 1370 dev/pci/if_nfe.c data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
map 1371 dev/pci/if_nfe.c bus_dmamap_unload(sc->sc_dmat, data->map);
map 1372 dev/pci/if_nfe.c bus_dmamap_destroy(sc->sc_dmat, data->map);
map 1519 dev/pci/if_nfe.c NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
map 1543 dev/pci/if_nfe.c error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
map 1552 dev/pci/if_nfe.c ring->physaddr = ring->map->dm_segs[0].ds_addr;
map 1557 dev/pci/if_nfe.c &ring->data[i].map);
map 1594 dev/pci/if_nfe.c bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
map 1617 dev/pci/if_nfe.c bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
map 1618 dev/pci/if_nfe.c ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
map 1619 dev/pci/if_nfe.c bus_dmamap_unload(sc->sc_dmat, ring->map);
map 1639 dev/pci/if_nfe.c if (data->map == NULL)
map 1641 dev/pci/if_nfe.c bus_dmamap_destroy(sc->sc_dmat, data->map);
map 22 dev/pci/if_nfevar.h bus_dmamap_t map;
map 28 dev/pci/if_nfevar.h bus_dmamap_t map;
map 46 dev/pci/if_nfevar.h bus_dmamap_t map;
map 51 dev/pci/if_nfevar.h bus_dmamap_t map;
map 1110 dev/pci/if_sis.c BUS_DMA_NOWAIT, &sc->sis_ldata->sis_rx_list[i].map) != 0) {
map 1124 dev/pci/if_sis.c &sc->sis_ldata->sis_tx_list[i].map) != 0) {
map 1239 dev/pci/if_sis.c bus_dmamap_t map;
map 1267 dev/pci/if_sis.c map = c->map;
map 1268 dev/pci/if_sis.c c->map = sc->sc_rx_sparemap;
map 1269 dev/pci/if_sis.c sc->sc_rx_sparemap = map;
map 1271 dev/pci/if_sis.c bus_dmamap_sync(sc->sc_dmat, c->map, 0, c->map->dm_mapsize,
map 1277 dev/pci/if_sis.c c->sis_ptr = c->map->dm_segs[0].ds_addr + sizeof(u_int64_t);
map 1330 dev/pci/if_sis.c bus_dmamap_sync(sc->sc_dmat, cur_rx->map, 0,
map 1331 dev/pci/if_sis.c cur_rx->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
map 1425 dev/pci/if_sis.c if (cur_tx->map->dm_nsegs != 0) {
map 1426 dev/pci/if_sis.c bus_dmamap_t map = cur_tx->map;
map 1428 dev/pci/if_sis.c bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
map 1430 dev/pci/if_sis.c bus_dmamap_unload(sc->sc_dmat, map);
map 1547 dev/pci/if_sis.c bus_dmamap_t map;
map 1549 dev/pci/if_sis.c map = sc->sc_tx_sparemap;
map 1550 dev/pci/if_sis.c if (bus_dmamap_load_mbuf(sc->sc_dmat, map,
map 1561 dev/pci/if_sis.c for (i = 0; i < map->dm_nsegs; i++) {
map 1565 dev/pci/if_sis.c f->sis_ctl = SIS_CMDSTS_MORE | map->dm_segs[i].ds_len;
map 1566 dev/pci/if_sis.c f->sis_ptr = map->dm_segs[i].ds_addr;
map 1573 dev/pci/if_sis.c bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
map 2033 dev/pci/if_sis.c if (sc->sis_ldata->sis_rx_list[i].map->dm_nsegs != 0) {
map 2034 dev/pci/if_sis.c bus_dmamap_t map = sc->sis_ldata->sis_rx_list[i].map;
map 2036 dev/pci/if_sis.c bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
map 2038 dev/pci/if_sis.c bus_dmamap_unload(sc->sc_dmat, map);
map 2052 dev/pci/if_sis.c if (sc->sis_ldata->sis_tx_list[i].map->dm_nsegs != 0) {
map 2053 dev/pci/if_sis.c bus_dmamap_t map = sc->sis_ldata->sis_tx_list[i].map;
map 2055 dev/pci/if_sis.c bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
map 2057 dev/pci/if_sis.c bus_dmamap_unload(sc->sc_dmat, map);
map 333 dev/pci/if_sisreg.h bus_dmamap_t map;
map 1269 dev/pci/if_vic.c vic_alloc_mbuf(struct vic_softc *sc, bus_dmamap_t map)
map 1284 dev/pci/if_vic.c if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) {
map 1636 dev/pci/if_vr.c bus_dmamap_t map;
map 1662 dev/pci/if_vr.c map = sc->vr_cdata.vr_rx_chain[i].vr_map;
map 1663 dev/pci/if_vr.c if (map != NULL) {
map 1664 dev/pci/if_vr.c if (map->dm_nsegs > 0)
map 1665 dev/pci/if_vr.c bus_dmamap_unload(sc->sc_dmat, map);
map 1666 dev/pci/if_vr.c bus_dmamap_destroy(sc->sc_dmat, map);
map 1677 dev/pci/if_vr.c bus_dmamap_t map;
map 1683 dev/pci/if_vr.c map = sc->vr_cdata.vr_tx_chain[i].vr_map;
map 1684 dev/pci/if_vr.c if (map != NULL) {
map 1685 dev/pci/if_vr.c if (map->dm_nsegs > 0)
map 1686 dev/pci/if_vr.c bus_dmamap_unload(sc->sc_dmat, map);
map 1687 dev/pci/if_vr.c bus_dmamap_destroy(sc->sc_dmat, map);
map 391 dev/pci/if_wpi.c error = bus_dmamap_create(tag, size, 1, size, 0, flags, &dma->map);
map 404 dev/pci/if_wpi.c error = bus_dmamap_load_raw(tag, dma->map, &dma->seg, 1, size, flags);
map 410 dev/pci/if_wpi.c dma->paddr = dma->map->dm_segs[0].ds_addr;
map 423 dev/pci/if_wpi.c if (dma->map != NULL) {
map 425 dev/pci/if_wpi.c bus_dmamap_unload(dma->tag, dma->map);
map 430 dev/pci/if_wpi.c bus_dmamap_destroy(dma->tag, dma->map);
map 431 dev/pci/if_wpi.c dma->map = NULL;
map 665 dev/pci/if_wpi.c &data->map);
map 704 dev/pci/if_wpi.c bus_dmamap_unload(sc->sc_dmat, data->map);
map 727 dev/pci/if_wpi.c bus_dmamap_unload(sc->sc_dmat, data->map);
map 1348 dev/pci/if_wpi.c bus_dmamap_unload(sc->sc_dmat, data->map);
map 1374 dev/pci/if_wpi.c bus_dmamap_unload(sc->sc_dmat, data->map);
map 1693 dev/pci/if_wpi.c error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m0,
map 1724 dev/pci/if_wpi.c error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m0,
map 1738 dev/pci/if_wpi.c ring->qid, ring->cur, m0->m_pkthdr.len, data->map->dm_nsegs));
map 1742 dev/pci/if_wpi.c (1 + data->map->dm_nsegs) << 24);
map 1746 dev/pci/if_wpi.c for (i = 1; i <= data->map->dm_nsegs; i++) {
map 1748 dev/pci/if_wpi.c htole32(data->map->dm_segs[i - 1].ds_addr);
map 1750 dev/pci/if_wpi.c htole32(data->map->dm_segs[i - 1].ds_len);
map 2317 dev/pci/if_wpi.c error = bus_dmamap_load(sc->sc_dmat, data->map, mtod(m0, void *),
map 2332 dev/pci/if_wpi.c desc->segs[1].addr = htole32(data->map->dm_segs[0].ds_addr);
map 2333 dev/pci/if_wpi.c desc->segs[1].len = htole32(data->map->dm_segs[0].ds_len);
map 2545 dev/pci/if_wpi.c error = bus_dmamap_load(sc->sc_dmat, data->map, cmd, pktlen, NULL,
map 2556 dev/pci/if_wpi.c desc->segs[0].addr = htole32(data->map->dm_segs[0].ds_addr);
map 2557 dev/pci/if_wpi.c desc->segs[0].len = htole32(data->map->dm_segs[0].ds_len);
map 58 dev/pci/if_wpivar.h bus_dmamap_t map;
map 66 dev/pci/if_wpivar.h bus_dmamap_t map;
map 315 dev/pci/musycc.c MCLBYTES, 0, BUS_DMA_NOWAIT, &dd->map)) {
map 858 dev/pci/musycc.c if (dd->map->dm_nsegs != 0) {
map 859 dev/pci/musycc.c bus_dmamap_t map = dd->map;
map 861 dev/pci/musycc.c bus_dmamap_unload(mg->mg_dmat, map);
map 889 dev/pci/musycc.c if (dd->map->dm_nsegs != 0) {
map 890 dev/pci/musycc.c bus_dmamap_t map = dd->map;
map 892 dev/pci/musycc.c bus_dmamap_unload(mg->mg_dmat, map);
map 936 dev/pci/musycc.c bus_dmamap_t map;
map 963 dev/pci/musycc.c map = c->map;
map 964 dev/pci/musycc.c c->map = mg->mg_rx_sparemap;
map 965 dev/pci/musycc.c mg->mg_rx_sparemap = map;
map 967 dev/pci/musycc.c bus_dmamap_sync(mg->mg_dmat, c->map, 0, c->map->dm_mapsize,
map 971 dev/pci/musycc.c c->data = htole32(c->map->dm_segs[0].ds_addr);
map 990 dev/pci/musycc.c bus_dmamap_t map;
map 997 dev/pci/musycc.c map = mg->mg_tx_sparemap;
map 998 dev/pci/musycc.c if (bus_dmamap_load_mbuf(mg->mg_dmat, map, m_head,
map 1008 dev/pci/musycc.c if (map->dm_nsegs + mg->mg_dma_d[c].tx_use >= mg->mg_dma_d[c].tx_cnt) {
map 1015 dev/pci/musycc.c while (i < map->dm_nsegs) {
map 1017 dev/pci/musycc.c MUSYCC_STATUS_LEN(map->dm_segs[i].ds_len);
map 1022 dev/pci/musycc.c cur->data = htole32(map->dm_segs[i].ds_addr);
map 1028 dev/pci/musycc.c if (++i >= map->dm_nsegs)
map 1033 dev/pci/musycc.c bus_dmamap_sync(mg->mg_dmat, map, 0, map->dm_mapsize,
map 1037 dev/pci/musycc.c mg->mg_tx_sparemap = cur->map;
map 1038 dev/pci/musycc.c cur->map = map;
map 1198 dev/pci/musycc.c bus_dmamap_sync(mg->mg_dmat, cur_rx->map, 0,
map 1199 dev/pci/musycc.c cur_rx->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
map 1273 dev/pci/musycc.c if (dd->map->dm_nsegs != 0) {
map 1274 dev/pci/musycc.c bus_dmamap_sync(mg->mg_dmat, dd->map, 0,
map 1275 dev/pci/musycc.c dd->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
map 1276 dev/pci/musycc.c bus_dmamap_unload(mg->mg_dmat, dd->map);
map 49 dev/pci/musyccvar.h bus_dmamap_t map;
map 1467 dev/pci/safe.c for (i = 0; i < op->map->dm_nsegs; i++) {
map 1468 dev/pci/safe.c if (op->map->dm_segs[i].ds_addr & 3)
map 1470 dev/pci/safe.c if (i != (op->map->dm_nsegs - 1) &&
map 1471 dev/pci/safe.c (op->map->dm_segs[i].ds_len & 3))
map 1567 dev/pci/safe.c if (op->map->dm_nsegs <= 0)
map 1570 dev/pci/safe.c for (i = 0; i < op->map->dm_nsegs-1; i++) {
map 1571 dev/pci/safe.c if (op->map->dm_segs[i].ds_len % SAFE_MAX_DSIZE)
map 1573 dev/pci/safe.c if (op->map->dm_segs[i].ds_len != SAFE_MAX_DSIZE)
map 86 dev/pci/safevar.h bus_dmamap_t map;
map 122 dev/pci/safevar.h #define re_src_map re_src.map
map 123 dev/pci/safevar.h #define re_src_nsegs re_src.map->dm_nsegs
map 124 dev/pci/safevar.h #define re_src_segs re_src.map->dm_segs
map 125 dev/pci/safevar.h #define re_src_mapsize re_src.map->dm_mapsize
map 129 dev/pci/safevar.h #define re_dst_map re_dst.map
map 130 dev/pci/safevar.h #define re_dst_nsegs re_dst.map->dm_nsegs
map 131 dev/pci/safevar.h #define re_dst_segs re_dst.map->dm_segs
map 132 dev/pci/safevar.h #define re_dst_mapsize re_dst.map->dm_mapsize
map 80 dev/pci/sv.c bus_dmamap_t map;
map 87 dev/pci/sv.c #define DMAADDR(map) ((map)->segs[0].ds_addr)
map 88 dev/pci/sv.c #define KERNADDR(map) ((void *)((map)->addr))
map 473 dev/pci/sv.c 0, BUS_DMA_NOWAIT, &p->map);
map 477 dev/pci/sv.c error = bus_dmamap_load(sc->sc_dmatag, p->map, p->addr, p->size, NULL,
map 484 dev/pci/sv.c bus_dmamap_destroy(sc->sc_dmatag, p->map);
map 497 dev/pci/sv.c bus_dmamap_unload(sc->sc_dmatag, p->map);
map 498 dev/pci/sv.c bus_dmamap_destroy(sc->sc_dmatag, p->map);
map 1693 dev/pci/ubsec.c ubsec_dmamap_aligned(bus_dmamap_t map)
map 1697 dev/pci/ubsec.c for (i = 0; i < map->dm_nsegs; i++) {
map 1698 dev/pci/ubsec.c if (map->dm_segs[i].ds_addr & 3)
map 1700 dev/pci/ubsec.c if ((i != (map->dm_nsegs - 1)) &&
map 1701 dev/pci/ubsec.c (map->dm_segs[i].ds_len & 3))
map 96 dev/pci/yds.c #define DMAADDR(p) ((p)->map->dm_segs[0].ds_addr)
map 470 dev/pci/yds.c mp, (void *) sc->sc_ctrldata.map->dm_segs[0].ds_addr));
map 509 dev/pci/yds.c bus_dmamap_sync(sc->sc_dmatag, p->map,
map 963 dev/pci/yds.c bus_dmamap_sync(sc->sc_dmatag, sc->sc_ctrldata.map,
map 980 dev/pci/yds.c sc->sc_play.dma->map,
map 994 dev/pci/yds.c sc->sc_play.dma->map,
map 1003 dev/pci/yds.c bus_dmamap_sync(sc->sc_dmatag, sc->sc_ctrldata.map,
map 1020 dev/pci/yds.c sc->sc_rec.dma->map,
map 1034 dev/pci/yds.c sc->sc_rec.dma->map,
map 1066 dev/pci/yds.c 0, BUS_DMA_NOWAIT, &p->map);
map 1070 dev/pci/yds.c error = bus_dmamap_load(sc->sc_dmatag, p->map, p->addr, p->size, NULL,
map 1077 dev/pci/yds.c bus_dmamap_destroy(sc->sc_dmatag, p->map);
map 1090 dev/pci/yds.c bus_dmamap_unload(sc->sc_dmatag, p->map);
map 1091 dev/pci/yds.c bus_dmamap_destroy(sc->sc_dmatag, p->map);
map 1456 dev/pci/yds.c bus_dmamap_sync(sc->sc_dmatag, sc->sc_ctrldata.map,
map 1462 dev/pci/yds.c bus_dmamap_sync(sc->sc_dmatag, p->map, 0, blksize,
map 1548 dev/pci/yds.c bus_dmamap_sync(sc->sc_dmatag, sc->sc_ctrldata.map,
map 1555 dev/pci/yds.c bus_dmamap_sync(sc->sc_dmatag, p->map, 0, blksize,
map 1595 dev/pci/yds.c bus_dmamap_sync(sc->sc_dmatag, sc->sc_ctrldata.map,
map 1606 dev/pci/yds.c bus_dmamap_sync(sc->sc_dmatag, sc->sc_play.dma->map,
map 1625 dev/pci/yds.c bus_dmamap_sync(sc->sc_dmatag, sc->sc_ctrldata.map,
map 1631 dev/pci/yds.c bus_dmamap_sync(sc->sc_dmatag, sc->sc_rec.dma->map,
map 324 dev/pci/ydsreg.h bus_dmamap_t map;
map 982 dev/pckbc/wskbdmap_mfii.c #define KBD_MAP(name, base, map) \
map 983 dev/pckbc/wskbdmap_mfii.c { name, base, sizeof(map)/sizeof(keysym_t), map }
map 263 dev/raidframe/rf_aselect.c (raidPtr->Layout.map->SelectionFunc) (raidPtr, type, asm_p,
map 322 dev/raidframe/rf_aselect.c (raidPtr->Layout.map->SelectionFunc) (raidPtr,
map 401 dev/raidframe/rf_aselect.c (raidPtr->Layout.map->
map 320 dev/raidframe/rf_copyback.c (raidPtr->Layout.map->MapSector) (raidPtr, addr,
map 341 dev/raidframe/rf_copyback.c (raidPtr->Layout.map->MapParity) (raidPtr, stripeAddr,
map 374 dev/raidframe/rf_copyback.c if (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) {
map 376 dev/raidframe/rf_copyback.c raidPtr->Layout.map->MapSector(raidPtr, addr, &spRow,
map 379 dev/raidframe/rf_copyback.c raidPtr->Layout.map->MapParity(raidPtr, addr, &spRow,
map 467 dev/raidframe/rf_copyback.c if (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) {
map 468 dev/raidframe/rf_copyback.c RF_ASSERT(raidPtr->Layout.map->parityConfig == 'D');
map 851 dev/raidframe/rf_dagdegrd.c (raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress,
map 881 dev/raidframe/rf_dagdegrd.c (raidPtr->Layout.map->MapSector) (raidPtr,
map 915 dev/raidframe/rf_dagdegrd.c (raidPtr->Layout.map->MapSector) (raidPtr,
map 932 dev/raidframe/rf_dagdegrd.c (raidPtr->Layout.map->MapSector) (raidPtr,
map 948 dev/raidframe/rf_dagdegrd.c (raidPtr->Layout.map->MapSector) (raidPtr,
map 961 dev/raidframe/rf_dagdegrd.c (raidPtr->Layout.map->MapSector)
map 984 dev/raidframe/rf_dagdegrd.c (raidPtr->Layout.map->MapSector)
map 1004 dev/raidframe/rf_dagdegrd.c (raidPtr->Layout.map->MapSector)
map 1026 dev/raidframe/rf_dagdegrd.c (raidPtr->Layout.map->MapSector)
map 1046 dev/raidframe/rf_dagdegrd.c (raidPtr->Layout.map->MapSector)
map 1069 dev/raidframe/rf_dagdegrd.c (raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress,
map 1099 dev/raidframe/rf_dagdegrd.c (raidPtr->Layout.map->MapSector) (raidPtr,
map 659 dev/raidframe/rf_dagdegwr.c (raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress,
map 686 dev/raidframe/rf_dagdegwr.c (raidPtr->Layout.map->MapSector) (raidPtr,
map 759 dev/raidframe/rf_dagutils.c int ds = (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) ? 1 : 0;
map 778 dev/raidframe/rf_dagutils.c raidPtr->Layout.map->MapSector(raidPtr,
map 798 dev/raidframe/rf_dagutils.c (raidPtr->Layout.map->MapParity) (raidPtr,
map 100 dev/raidframe/rf_decluster.c if (layoutPtr->map->flags & RF_DISTRIBUTE_SPARE) {
map 123 dev/raidframe/rf_decluster.c if ((layoutPtr->map->flags & RF_DISTRIBUTE_SPARE) &&
map 154 dev/raidframe/rf_decluster.c if ((raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE)) {
map 182 dev/raidframe/rf_decluster.c if (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) {
map 273 dev/raidframe/rf_decluster.c if (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) {
map 412 dev/raidframe/rf_decluster.c if (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) {
map 494 dev/raidframe/rf_decluster.c if ((raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE)) {
map 102 dev/raidframe/rf_declusterPQ.c if ((raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) &&
map 132 dev/raidframe/rf_declusterPQ.c if ((raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE)) {
map 159 dev/raidframe/rf_declusterPQ.c if ((raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE)) {
map 248 dev/raidframe/rf_declusterPQ.c if ((raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE)) {
map 360 dev/raidframe/rf_declusterPQ.c if ((raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE)) {
map 443 dev/raidframe/rf_declusterPQ.c if ((raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE)) {
map 501 dev/raidframe/rf_declusterPQ.c if ((raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE)) {
map 741 dev/raidframe/rf_driver.c flags, cbF, cbA, raidPtr->Layout.map->states);
map 760 dev/raidframe/rf_driver.c if (!(raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE)) {
map 774 dev/raidframe/rf_driver.c if (raidPtr->Layout.map->flags & RF_BD_DECLUSTERED)
map 795 dev/raidframe/rf_evenodd_dagfuncs.c (raidPtr->Layout.map->MapSector) (raidPtr,
map 1017 dev/raidframe/rf_evenodd_dagfuncs.c (raidPtr->Layout.map->MapSector) (raidPtr, npda.raidAddress,
map 466 dev/raidframe/rf_layout.c layoutPtr->map = p;
map 496 dev/raidframe/rf_layout.c " with head sep limit %ld.\n", layoutPtr->map->configName,
map 501 dev/raidframe/rf_layout.c " with no head sep limit.\n", layoutPtr->map->configName,
map 524 dev/raidframe/rf_layout.c if ((layoutPtr->SUsPerPU == 1) || !layoutPtr->map->MapSIDToPSID) {
map 528 dev/raidframe/rf_layout.c (layoutPtr->map->MapSIDToPSID) (layoutPtr, stripeID,
map 220 dev/raidframe/rf_layout.h RF_LayoutSW_t *map; /*
map 96 dev/raidframe/rf_map.c int faultsTolerated = layoutPtr->map->faultsTolerated;
map 179 dev/raidframe/rf_map.c (layoutPtr->map->MapSector) (raidPtr, raidAddress,
map 230 dev/raidframe/rf_map.c (layoutPtr->map->MapParity) (raidPtr,
map 259 dev/raidframe/rf_map.c (layoutPtr->map->MapParity) (raidPtr,
map 263 dev/raidframe/rf_map.c (layoutPtr->map->MapQ) (raidPtr,
map 692 dev/raidframe/rf_map.c (layoutPtr->map->IdentifyStripe) (raidPtr, asmap->raidAddress,
map 694 dev/raidframe/rf_map.c (layoutPtr->map->MapParity) (raidPtr, asmap->raidAddress,
map 713 dev/raidframe/rf_map.c layoutPtr->map->MapSector(raidPtr,
map 756 dev/raidframe/rf_map.c (layoutPtr->map->MapSector) (raidPtr, sosAddr + i *
map 968 dev/raidframe/rf_map.c raidPtr->Layout.map->MapSector(raidPtr,
map 972 dev/raidframe/rf_map.c raidPtr->Layout.map->MapParity(raidPtr,
map 1176 dev/raidframe/rf_openbsdkintf.c if (raidPtr->Layout.map->faultsTolerated == 0) {
map 1232 dev/raidframe/rf_openbsdkintf.c if (raidPtr->Layout.map->faultsTolerated == 0) {
map 1357 dev/raidframe/rf_openbsdkintf.c if (raidPtr->Layout.map->faultsTolerated == 0) {
map 1374 dev/raidframe/rf_openbsdkintf.c if (raidPtr->Layout.map->faultsTolerated == 0) {
map 1411 dev/raidframe/rf_openbsdkintf.c if (raidPtr->Layout.map->faultsTolerated == 0) {
map 1447 dev/raidframe/rf_openbsdkintf.c if (raidPtr->Layout.map->faultsTolerated == 0) {
map 3404 dev/raidframe/rf_openbsdkintf.c clabel->parityConfig = raidPtr->Layout.map->parityConfig;
map 3502 dev/raidframe/rf_openbsdkintf.c (raidPtrs[raidID]->Layout).map->configName,
map 988 dev/raidframe/rf_paritylogging.c if (layoutPtr->map->flags &
map 995 dev/raidframe/rf_paritylogging.c (layoutPtr->map->MapParity)
map 1036 dev/raidframe/rf_paritylogging.c (layoutPtr->map
map 71 dev/raidframe/rf_parityscan.c if (raidPtr->Layout.map->faultsTolerated == 0) {
map 149 dev/raidframe/rf_parityscan.c lp = raidPtr->Layout.map;
map 337 dev/raidframe/rf_parityscan.c if (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) {
map 341 dev/raidframe/rf_parityscan.c (raidPtr->Layout.map->MapParity)
map 353 dev/raidframe/rf_parityscan.c (raidPtr->Layout.map->MapSector)
map 271 dev/raidframe/rf_pqdegdags.c (raidPtr->Layout.map->MapSector) (raidPtr,
map 374 dev/raidframe/rf_pqdegdags.c (raidPtr->Layout.map->MapSector) (raidPtr, npda.raidAddress,
map 222 dev/raidframe/rf_raid5.c if (layoutPtr->map->flags &
map 229 dev/raidframe/rf_raid5.c (layoutPtr->map->MapParity)
map 271 dev/raidframe/rf_raid5.c (layoutPtr->map
map 112 dev/raidframe/rf_reconbuffer.c lp = rbuf->raidPtr->Layout.map;
map 338 dev/raidframe/rf_reconstruct.c lp = raidPtr->Layout.map;
map 386 dev/raidframe/rf_reconstruct.c if (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) {
map 481 dev/raidframe/rf_reconstruct.c lp = raidPtr->Layout.map;
map 546 dev/raidframe/rf_reconstruct.c if (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) {
map 881 dev/raidframe/rf_reconstruct.c ds = (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE);
map 1337 dev/raidframe/rf_reconstruct.c (layoutPtr->map->IdentifyStripe) (raidPtr, sosRaidAddress, &diskids,
map 1361 dev/raidframe/rf_reconstruct.c (layoutPtr->map->MapParity) (raidPtr, sosRaidAddress, &prow, &pcol,
map 1403 dev/raidframe/rf_reconstruct.c layoutPtr->map->MapParity(raidPtr, sosRaidAddress + i_offset *
map 1407 dev/raidframe/rf_reconstruct.c layoutPtr->map->MapSector(raidPtr, sosRaidAddress + i_offset *
map 1414 dev/raidframe/rf_reconstruct.c layoutPtr->map->MapParity(raidPtr, sosRaidAddress + j_offset *
map 1418 dev/raidframe/rf_reconstruct.c layoutPtr->map->MapSector(raidPtr, sosRaidAddress + j_offset *
map 1424 dev/raidframe/rf_reconstruct.c if (layoutPtr->map->flags & RF_DISTRIBUTE_SPARE) {
map 1426 dev/raidframe/rf_reconstruct.c layoutPtr->map->MapParity(raidPtr, sosRaidAddress +
map 1430 dev/raidframe/rf_reconstruct.c layoutPtr->map->MapSector(raidPtr, sosRaidAddress +
map 1811 dev/raidframe/rf_reconstruct.c (raidPtr->Layout.map->IdentifyStripe) (raidPtr,
map 70 dev/raidframe/rf_reconutil.c lp = raidPtr->Layout.map;
map 212 dev/raidframe/rf_reconutil.c lp = raidPtr->Layout.map;
map 229 dev/raidframe/rf_reconutil.c lp = raidPtr->Layout.map;
map 354 dev/raidframe/rf_states.c if (!(raidPtr->Layout.map->flags & RF_NO_STRIPE_LOCKS)) {
map 676 dev/raidframe/rf_states.c if (!(raidPtr->Layout.map->flags & RF_NO_STRIPE_LOCKS)) {
map 862 dev/sun/sunkbdmap.c #define KBD_MAP(name, base, map) \
map 863 dev/sun/sunkbdmap.c { name, base, sizeof(map)/sizeof(keysym_t), map }
map 1018 dev/usb/ukbdmap.c #define KBD_MAP(name, base, map) \
map 1019 dev/usb/ukbdmap.c { name, base, sizeof(map)/sizeof(keysym_t), map }
map 157 dev/usb/usb_mem.c 0, BUS_DMA_NOWAIT, &p->map);
map 161 dev/usb/usb_mem.c error = bus_dmamap_load(tag, p->map, p->kaddr, p->size, NULL,
map 170 dev/usb/usb_mem.c bus_dmamap_destroy(tag, p->map);
map 190 dev/usb/usb_mem.c bus_dmamap_unload(p->tag, p->map);
map 191 dev/usb/usb_mem.c bus_dmamap_destroy(p->tag, p->map);
map 44 dev/usb/usb_mem.h bus_dmamap_t map;
map 54 dev/usb/usb_mem.h #define DMAADDR(dma, o) ((dma)->block->map->dm_segs[0].ds_addr + (dma)->offs + (o))
map 175 dev/wscons/wsconsio.h struct wscons_keymap *map; /* map to get or set */
map 1085 dev/wscons/wskbd.c error = copyin(umdp->map, buf, len);
map 1102 dev/wscons/wskbd.c error = copyout(sc->sc_map, umdp->map,
map 363 dev/wscons/wskbdutil.c for (kp = mp->map; kp < mp->map + mp->map_size; kp++) {
map 373 dev/wscons/wskbdutil.c for (l = 0; kp + l < mp->map + mp->map_size;
map 392 dev/wscons/wskbdutil.c wskbd_init_keymap(newlen, map, maplen)
map 394 dev/wscons/wskbdutil.c struct wscons_keymap **map;
map 401 dev/wscons/wskbdutil.c free(*map, M_TEMP);
map 403 dev/wscons/wskbdutil.c *map = malloc(newlen*sizeof(struct wscons_keymap),
map 408 dev/wscons/wskbdutil.c (*map)[i].command = KS_voidSymbol;
map 409 dev/wscons/wskbdutil.c (*map)[i].group1[0] = KS_voidSymbol;
map 410 dev/wscons/wskbdutil.c (*map)[i].group1[1] = KS_voidSymbol;
map 411 dev/wscons/wskbdutil.c (*map)[i].group2[0] = KS_voidSymbol;
map 412 dev/wscons/wskbdutil.c (*map)[i].group2[1] = KS_voidSymbol;
map 417 dev/wscons/wskbdutil.c wskbd_load_keymap(mapdata, map, maplen)
map 419 dev/wscons/wskbdutil.c struct wscons_keymap **map;
map 450 dev/wscons/wskbdutil.c for (kp = mp->map; kp < mp->map + mp->map_size; kp++) {
map 457 dev/wscons/wskbdutil.c wskbd_init_keymap(i + 1, map, maplen);
map 461 dev/wscons/wskbdutil.c for (kp = mp->map; kp < mp->map + mp->map_size; ) {
map 472 dev/wscons/wskbdutil.c (*map)[kc].command = *kp;
map 476 dev/wscons/wskbdutil.c for (i = 0; kp + i < mp->map + mp->map_size; i++) {
map 486 dev/wscons/wskbdutil.c fillmapentry(kp, i, &(*map)[kc]);
map 60 dev/wscons/wsksymvar.h const keysym_t *map; /* the map itself */
map 271 isofs/udf/ecma167-udf.h uint32_t map;
map 1325 isofs/udf/udf_vnops.c letoh32(ump->um_stbl->entries[i].map) +
map 693 msdosfs/msdosfs_fat.c u_int map;
map 699 msdosfs/msdosfs_fat.c map = pmp->pm_inusemap[idx];
map 700 msdosfs/msdosfs_fat.c map &= ~((1 << start) - 1);
map 701 msdosfs/msdosfs_fat.c if (map) {
map 702 msdosfs/msdosfs_fat.c len = ffs(map) - 1 - start;
map 711 msdosfs/msdosfs_fat.c if ((map = pmp->pm_inusemap[idx]) != 0) {
map 712 msdosfs/msdosfs_fat.c len += ffs(map) - 1;
map 781 msdosfs/msdosfs_fat.c u_int map;
map 809 msdosfs/msdosfs_fat.c map = pmp->pm_inusemap[idx];
map 810 msdosfs/msdosfs_fat.c map |= (1 << (cn % N_INUSEBITS)) - 1;
map 811 msdosfs/msdosfs_fat.c if (map != (u_int)-1) {
map 812 msdosfs/msdosfs_fat.c cn = idx * N_INUSEBITS + ffs(map^(u_int)-1) - 1;
map 826 msdosfs/msdosfs_fat.c map = pmp->pm_inusemap[idx];
map 827 msdosfs/msdosfs_fat.c map |= (1 << (cn % N_INUSEBITS)) - 1;
map 828 msdosfs/msdosfs_fat.c if (map != (u_int)-1) {
map 829 msdosfs/msdosfs_fat.c cn = idx * N_INUSEBITS + ffs(map^(u_int)-1) - 1;
map 1534 netbt/hci.h uint8_t map[10]; /* AFH Channel Map */
map 404 ufs/ext2fs/ext2fs_alloc.c int error, start, len, loc, map, i;
map 438 ufs/ext2fs/ext2fs_alloc.c map = ibp[i];
map 441 ufs/ext2fs/ext2fs_alloc.c if ((map & i) == 0) {
map 558 ufs/ext2fs/ext2fs_alloc.c int start, len, loc, i, map;
map 582 ufs/ext2fs/ext2fs_alloc.c map = bbp[i];
map 585 ufs/ext2fs/ext2fs_alloc.c if ((map & i) == 0)
map 1420 ufs/ffs/ffs_alloc.c int i, got, run, bno, bit, map;
map 1472 ufs/ffs/ffs_alloc.c map = *mapp++;
map 1475 ufs/ffs/ffs_alloc.c if ((map & bit) == 0) {
map 1485 ufs/ffs/ffs_alloc.c map = *mapp++;
map 1526 ufs/ffs/ffs_alloc.c int error, start, len, loc, map, i;
map 1605 ufs/ffs/ffs_alloc.c map = cg_inosused(cgp)[i];
map 1608 ufs/ffs/ffs_alloc.c if ((map & i) == 0) {
map 1984 ufs/ffs/ffs_alloc.c int i, start, end, forw, back, map, bit;
map 2005 ufs/ffs/ffs_alloc.c map = *mapp++;
map 2008 ufs/ffs/ffs_alloc.c if ((map & bit) == 0)
map 2013 ufs/ffs/ffs_alloc.c map = *mapp++;
map 2026 ufs/ffs/ffs_alloc.c map = *mapp--;
map 2029 ufs/ffs/ffs_alloc.c if ((map & bit) == 0)
map 2034 ufs/ffs/ffs_alloc.c map = *mapp--;
map 497 ufs/ffs/fs.h #define blkmap(fs, map, loc) \
map 498 ufs/ffs/fs.h (((map)[(loc) / NBBY] >> ((loc) % NBBY)) & (0xff >> (NBBY - (fs)->fs_frag)))
map 581 uvm/uvm_amap.c amap_copy(struct vm_map *map, struct vm_map_entry *entry, int waitf,
map 589 uvm/uvm_amap.c map, entry, waitf, 0);
map 612 uvm/uvm_amap.c UVM_MAP_CLIP_START(map, entry, startva);
map 615 uvm/uvm_amap.c UVM_MAP_CLIP_END(map, entry, endva);
map 750 uvm/uvm_amap.c amap_cow_now(struct vm_map *map, struct vm_map_entry *entry)
map 255 uvm/uvm_fault.c amap_copy(ufi->map, ufi->entry, M_NOWAIT, TRUE,
map 615 uvm/uvm_fault.c if ((ufi.map->flags & VM_MAP_PAGEABLE) == 0)
map 617 uvm/uvm_fault.c ufi.map, vaddr);
map 1771 uvm/uvm_fault.c uvm_fault_wire(map, start, end, access_type)
map 1772 uvm/uvm_fault.c vm_map_t map;
map 1780 uvm/uvm_fault.c pmap = vm_map_pmap(map);
map 1789 uvm/uvm_fault.c rv = uvm_fault(map, va, VM_FAULT_WIRE, access_type);
map 1792 uvm/uvm_fault.c uvm_fault_unwire(map, start, va);
map 1806 uvm/uvm_fault.c uvm_fault_unwire(map, start, end)
map 1807 uvm/uvm_fault.c vm_map_t map;
map 1811 uvm/uvm_fault.c vm_map_lock_read(map);
map 1812 uvm/uvm_fault.c uvm_fault_unwire_locked(map, start, end);
map 1813 uvm/uvm_fault.c vm_map_unlock_read(map);
map 1823 uvm/uvm_fault.c uvm_fault_unwire_locked(map, start, end)
map 1824 uvm/uvm_fault.c vm_map_t map;
map 1828 uvm/uvm_fault.c pmap_t pmap = vm_map_pmap(map);
map 1833 uvm/uvm_fault.c KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
map 1847 uvm/uvm_fault.c KASSERT(start >= vm_map_min(map) && end <= vm_map_max(map));
map 1848 uvm/uvm_fault.c if (uvm_map_lookup_entry(map, start, &entry) == FALSE)
map 1860 uvm/uvm_fault.c KASSERT(entry->next != &map->header &&
map 63 uvm/uvm_fault.h vm_map_t map; /* map (could be a submap) */
map 69 uvm/uvm_fault_i.h vm_map_unlock(ufi->map);
map 71 uvm/uvm_fault_i.h vm_map_unlock_read(ufi->map);
map 122 uvm/uvm_fault_i.h ufi->map = ufi->orig_map;
map 136 uvm/uvm_fault_i.h vm_map_lock(ufi->map);
map 138 uvm/uvm_fault_i.h vm_map_lock_read(ufi->map);
map 144 uvm/uvm_fault_i.h if (!uvm_map_lookup_entry(ufi->map, ufi->orig_rvaddr,
map 163 uvm/uvm_fault_i.h vm_map_unlock(ufi->map);
map 165 uvm/uvm_fault_i.h vm_map_unlock_read(ufi->map);
map 167 uvm/uvm_fault_i.h ufi->map = tmpmap;
map 175 uvm/uvm_fault_i.h ufi->mapv = ufi->map->timestamp;
map 210 uvm/uvm_fault_i.h vm_map_lock_read(ufi->map);
map 211 uvm/uvm_fault_i.h if (ufi->mapv != ufi->map->timestamp) {
map 212 uvm/uvm_fault_i.h vm_map_unlock_read(ufi->map);
map 176 uvm/uvm_glue.c vm_map_t map;
map 180 uvm/uvm_glue.c map = &p->p_vmspace->vm_map;
map 186 uvm/uvm_glue.c rv = uvm_fault_wire(map, start, end, access_type);
map 63 uvm/uvm_io.c uvm_io(vm_map_t map, struct uio *uio, int flags)
map 110 uvm/uvm_io.c error = uvm_map_extract(map, baseva, chunksz, kernel_map, &kva,
map 208 uvm/uvm_km.c uvm_km_suballoc(struct vm_map *map, vaddr_t *min, vaddr_t *max, vsize_t size,
map 219 uvm/uvm_km.c if (uvm_map(map, min, size, NULL, UVM_UNKNOWN_OFFSET, 0,
map 235 uvm/uvm_km.c pmap_reference(vm_map_pmap(map));
map 237 uvm/uvm_km.c submap = uvm_map_create(vm_map_pmap(map), *min, *max, flags);
map 242 uvm/uvm_km.c submap->pmap = vm_map_pmap(map);
map 249 uvm/uvm_km.c if (uvm_map_submap(map, *min, *max, submap) != 0)
map 339 uvm/uvm_km.c uvm_km_kmemalloc(struct vm_map *map, struct uvm_object *obj, vsize_t size,
map 348 uvm/uvm_km.c map, obj, size, flags);
map 349 uvm/uvm_km.c KASSERT(vm_map_pmap(map) == pmap_kernel());
map 356 uvm/uvm_km.c kva = vm_map_min(map); /* hint */
map 362 uvm/uvm_km.c if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
map 407 uvm/uvm_km.c uvm_unmap(map, kva, kva + size);
map 424 uvm/uvm_km.c pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg),
map 442 uvm/uvm_km.c uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size)
map 444 uvm/uvm_km.c uvm_unmap(map, trunc_page(addr), round_page(addr+size));
map 455 uvm/uvm_km.c uvm_km_free_wakeup(struct vm_map *map, vaddr_t addr, vsize_t size)
map 459 uvm/uvm_km.c vm_map_lock(map);
map 460 uvm/uvm_km.c uvm_unmap_remove(map, trunc_page(addr), round_page(addr+size),
map 462 uvm/uvm_km.c wakeup(map);
map 463 uvm/uvm_km.c vm_map_unlock(map);
map 476 uvm/uvm_km.c uvm_km_alloc1(struct vm_map *map, vsize_t size, vsize_t align, boolean_t zeroit)
map 483 uvm/uvm_km.c UVMHIST_LOG(maphist,"(map=%p, size=0x%lx)", map, size,0,0);
map 484 uvm/uvm_km.c KASSERT(vm_map_pmap(map) == pmap_kernel());
map 487 uvm/uvm_km.c kva = vm_map_min(map); /* hint */
map 493 uvm/uvm_km.c if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object,
map 543 uvm/uvm_km.c uvm_unmap(map, kva, loopva - kva);
map 555 uvm/uvm_km.c pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg),
map 562 uvm/uvm_km.c pmap_update(map->pmap);
map 583 uvm/uvm_km.c uvm_km_valloc(struct vm_map *map, vsize_t size)
map 585 uvm/uvm_km.c return(uvm_km_valloc_align(map, size, 0));
map 589 uvm/uvm_km.c uvm_km_valloc_align(struct vm_map *map, vsize_t size, vsize_t align)
map 594 uvm/uvm_km.c UVMHIST_LOG(maphist, "(map=%p, size=0x%lx)", map, size, 0,0);
map 595 uvm/uvm_km.c KASSERT(vm_map_pmap(map) == pmap_kernel());
map 598 uvm/uvm_km.c kva = vm_map_min(map); /* hint */
map 604 uvm/uvm_km.c if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object,
map 624 uvm/uvm_km.c uvm_km_valloc_prefer_wait(struct vm_map *map, vsize_t size, voff_t prefer)
map 629 uvm/uvm_km.c UVMHIST_LOG(maphist, "(map=%p, size=0x%lx)", map, size, 0,0);
map 630 uvm/uvm_km.c KASSERT(vm_map_pmap(map) == pmap_kernel());
map 633 uvm/uvm_km.c if (size > vm_map_max(map) - vm_map_min(map))
map 637 uvm/uvm_km.c kva = vm_map_min(map); /* hint */
map 644 uvm/uvm_km.c if (__predict_true(uvm_map(map, &kva, size, uvm.kernel_object,
map 656 uvm/uvm_km.c tsleep((caddr_t)map, PVM, "vallocwait", 0);
map 662 uvm/uvm_km.c uvm_km_valloc_wait(struct vm_map *map, vsize_t size)
map 664 uvm/uvm_km.c return uvm_km_valloc_prefer_wait(map, size, UVM_UNKNOWN_OFFSET);
map 675 uvm/uvm_km.c uvm_km_alloc_poolpage1(struct vm_map *map, struct uvm_object *obj,
map 710 uvm/uvm_km.c va = uvm_km_kmemalloc(map, obj, PAGE_SIZE, waitok ? 0 : UVM_KMF_NOWAIT);
map 724 uvm/uvm_km.c uvm_km_free_poolpage1(struct vm_map *map, vaddr_t addr)
map 742 uvm/uvm_km.c uvm_km_free(map, addr, PAGE_SIZE);
map 212 uvm/uvm_loan.c uvm_loan(map, start, len, result, flags)
map 213 uvm/uvm_loan.c struct vm_map *map;
map 224 uvm/uvm_loan.c if (map->flags & VM_MAP_INTRSAFE)
map 254 uvm/uvm_loan.c ufi.orig_map = map;
map 143 uvm/uvm_map.c #define uvm_map_entry_link(map, after_where, entry) do { \
map 144 uvm/uvm_map.c (map)->nentries++; \
map 149 uvm/uvm_map.c uvm_rb_insert(map, entry); \
map 157 uvm/uvm_map.c #define uvm_map_entry_unlink(map, entry) do { \
map 158 uvm/uvm_map.c (map)->nentries--; \
map 161 uvm/uvm_map.c uvm_rb_remove(map, entry); \
map 169 uvm/uvm_map.c #define SAVE_HINT(map,check,value) do { \
map 170 uvm/uvm_map.c simple_lock(&(map)->hint_lock); \
map 171 uvm/uvm_map.c if ((map)->hint == (check)) \
map 172 uvm/uvm_map.c (map)->hint = (value); \
map 173 uvm/uvm_map.c simple_unlock(&(map)->hint_lock); \
map 182 uvm/uvm_map.c #define VM_MAP_RANGE_CHECK(map, start, end) do { \
map 183 uvm/uvm_map.c if (start < vm_map_min(map)) \
map 184 uvm/uvm_map.c start = vm_map_min(map); \
map 185 uvm/uvm_map.c if (end > vm_map_max(map)) \
map 186 uvm/uvm_map.c end = vm_map_max(map); \
map 214 uvm/uvm_map.c int _uvm_tree_sanity(struct vm_map *map, const char *name);
map 242 uvm/uvm_map.c uvm_rb_space(struct vm_map *map, struct vm_map_entry *entry)
map 247 uvm/uvm_map.c if ((next = entry->next) == &map->header)
map 248 uvm/uvm_map.c space = map->max_offset - entry->end;
map 278 uvm/uvm_map.c uvm_rb_fixup(struct vm_map *map, struct vm_map_entry *entry)
map 282 uvm/uvm_map.c entry->ownspace = uvm_rb_space(map, entry);
map 288 uvm/uvm_map.c uvm_rb_insert(struct vm_map *map, struct vm_map_entry *entry)
map 290 uvm/uvm_map.c vaddr_t space = uvm_rb_space(map, entry);
map 294 uvm/uvm_map.c tmp = RB_INSERT(uvm_tree, &(map)->rbhead, entry);
map 299 uvm/uvm_map.c uvm_rb_fixup(map, entry);
map 300 uvm/uvm_map.c if (entry->prev != &map->header)
map 301 uvm/uvm_map.c uvm_rb_fixup(map, entry->prev);
map 305 uvm/uvm_map.c uvm_rb_remove(struct vm_map *map, struct vm_map_entry *entry)
map 310 uvm/uvm_map.c RB_REMOVE(uvm_tree, &(map)->rbhead, entry);
map 311 uvm/uvm_map.c if (entry->prev != &map->header)
map 312 uvm/uvm_map.c uvm_rb_fixup(map, entry->prev);
map 314 uvm/uvm_map.c uvm_rb_fixup(map, parent);
map 325 uvm/uvm_map.c _uvm_tree_sanity(struct vm_map *map, const char *name)
map 330 uvm/uvm_map.c RB_FOREACH(tmp, uvm_tree, &map->rbhead) {
map 331 uvm/uvm_map.c if (tmp->ownspace != uvm_rb_space(map, tmp)) {
map 333 uvm/uvm_map.c name, n + 1, map->nentries,
map 334 uvm/uvm_map.c tmp->ownspace, uvm_rb_space(map, tmp),
map 335 uvm/uvm_map.c tmp->next == &map->header ? "(last)" : "");
map 340 uvm/uvm_map.c RB_FOREACH(tmp, uvm_tree, &map->rbhead) {
map 356 uvm/uvm_map.c if (n != map->nentries) {
map 358 uvm/uvm_map.c name, n, map->nentries);
map 362 uvm/uvm_map.c for (tmp = map->header.next; tmp && tmp != &map->header;
map 364 uvm/uvm_map.c trtmp = RB_FIND(uvm_tree, &map->rbhead, tmp);
map 388 uvm/uvm_map.c uvm_mapent_alloc(struct vm_map *map)
map 394 uvm/uvm_map.c if (map->flags & VM_MAP_INTRSAFE || cold) {
map 419 uvm/uvm_map.c } else if (map == kernel_map) {
map 430 uvm/uvm_map.c ((map->flags & VM_MAP_INTRSAFE) != 0 || map == kernel_map), 0, 0);
map 482 uvm/uvm_map.c uvm_map_entry_unwire(struct vm_map *map, struct vm_map_entry *entry)
map 486 uvm/uvm_map.c uvm_fault_unwire_locked(map, entry->start, entry->end);
map 581 uvm/uvm_map.c uvm_map_clip_start(struct vm_map *map, struct vm_map_entry *entry,
map 589 uvm/uvm_map.c uvm_tree_sanity(map, "clip_start entry");
map 597 uvm/uvm_map.c new_entry = uvm_mapent_alloc(map);
map 612 uvm/uvm_map.c uvm_map_entry_link(map, entry->prev, new_entry);
map 625 uvm/uvm_map.c uvm_tree_sanity(map, "clip_start leave");
map 638 uvm/uvm_map.c uvm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, vaddr_t end)
map 643 uvm/uvm_map.c uvm_tree_sanity(map, "clip_end entry");
map 649 uvm/uvm_map.c new_entry = uvm_mapent_alloc(map);
map 660 uvm/uvm_map.c uvm_rb_fixup(map, entry);
map 662 uvm/uvm_map.c uvm_map_entry_link(map, entry, new_entry);
map 674 uvm/uvm_map.c uvm_tree_sanity(map, "clip_end leave");
map 708 uvm/uvm_map.c uvm_map_p(struct vm_map *map, vaddr_t *startp, vsize_t size,
map 722 uvm/uvm_map.c map, *startp, size, flags);
map 725 uvm/uvm_map.c uvm_tree_sanity(map, "map entry");
map 727 uvm/uvm_map.c if ((map->flags & VM_MAP_INTRSAFE) == 0)
map 744 uvm/uvm_map.c if (vm_map_lock_try(map) == FALSE) {
map 747 uvm/uvm_map.c vm_map_lock(map); /* could sleep here */
map 749 uvm/uvm_map.c if ((prev_entry = uvm_map_findspace(map, *startp, size, startp,
map 752 uvm/uvm_map.c vm_map_unlock(map);
map 762 uvm/uvm_map.c if (map == kernel_map && uvm_maxkaddr < (*startp + size))
map 802 uvm/uvm_map.c prev_entry->end == *startp && prev_entry != &map->header &&
map 838 uvm/uvm_map.c vm_map_unlock(map);
map 855 uvm/uvm_map.c uvm_rb_fixup(map, prev_entry);
map 856 uvm/uvm_map.c map->size += size;
map 860 uvm/uvm_map.c uvm_tree_sanity(map, "map leave 2");
map 863 uvm/uvm_map.c vm_map_unlock(map);
map 876 uvm/uvm_map.c prev_entry->next != &map->header &&
map 884 uvm/uvm_map.c new_entry = uvm_mapent_alloc(map);
map 921 uvm/uvm_map.c uvm_map_entry_link(map, prev_entry, new_entry);
map 923 uvm/uvm_map.c map->size += size;
map 932 uvm/uvm_map.c if ((map->first_free == prev_entry) &&
map 934 uvm/uvm_map.c map->first_free = new_entry;
map 936 uvm/uvm_map.c uvm_tree_sanity(map, "map leave");
map 939 uvm/uvm_map.c vm_map_unlock(map);
map 952 uvm/uvm_map.c uvm_map_lookup_entry(struct vm_map *map, vaddr_t address,
map 962 uvm/uvm_map.c map, address, entry, 0);
map 969 uvm/uvm_map.c simple_lock(&map->hint_lock);
map 970 uvm/uvm_map.c cur = map->hint;
map 971 uvm/uvm_map.c simple_unlock(&map->hint_lock);
map 973 uvm/uvm_map.c if (cur == &map->header)
map 989 uvm/uvm_map.c last = &map->header;
map 998 uvm/uvm_map.c if (map->nentries > 30)
map 1005 uvm/uvm_map.c cur = map->header.next;
map 1009 uvm/uvm_map.c uvm_tree_sanity(map, __func__);
map 1012 uvm/uvm_map.c struct vm_map_entry *prev = &map->header;
map 1013 uvm/uvm_map.c cur = RB_ROOT(&map->rbhead);
map 1023 uvm/uvm_map.c SAVE_HINT(map, map->hint, cur);
map 1049 uvm/uvm_map.c SAVE_HINT(map, map->hint, cur);
map 1060 uvm/uvm_map.c SAVE_HINT(map, map->hint, *entry);
map 1072 uvm/uvm_map.c uvm_map_spacefits(struct vm_map *map, vaddr_t *phint, vsize_t length,
map 1092 uvm/uvm_map.c if (end > map->max_offset || end < hint)
map 1094 uvm/uvm_map.c if (after != NULL && after != &map->header && after->start < end)
map 1145 uvm/uvm_map.c uvm_map_findspace(struct vm_map *map, vaddr_t hint, vsize_t length,
map 1157 uvm/uvm_map.c map, hint, length, flags);
map 1161 uvm/uvm_map.c uvm_tree_sanity(map, "map_findspace entry");
map 1170 uvm/uvm_map.c if (hint < map->min_offset) { /* check ranges ... */
map 1175 uvm/uvm_map.c hint = map->min_offset;
map 1177 uvm/uvm_map.c if (hint > map->max_offset) {
map 1179 uvm/uvm_map.c hint, map->min_offset, map->max_offset, 0);
map 1188 uvm/uvm_map.c if ((flags & UVM_FLAG_FIXED) == 0 && hint == map->min_offset) {
map 1189 uvm/uvm_map.c if ((entry = map->first_free) != &map->header)
map 1192 uvm/uvm_map.c if (uvm_map_lookup_entry(map, hint, &tmp)) {
map 1206 uvm/uvm_map.c if (end > map->max_offset || end < hint) {
map 1211 uvm/uvm_map.c if (next == &map->header || next->start >= end)
map 1220 uvm/uvm_map.c if (uvm_map_spacefits(map, &hint, length, entry->next, uoffset, align))
map 1224 uvm/uvm_map.c tmp = RB_ROOT(&map->rbhead);
map 1259 uvm/uvm_map.c if (uvm_map_spacefits(map, &hint, length, tmp->next, uoffset,
map 1270 uvm/uvm_map.c if (uvm_map_spacefits(map, &hint, length, prev->next, uoffset,
map 1291 uvm/uvm_map.c if (uvm_map_spacefits(map, &hint, length, tmp->next, uoffset, align)) {
map 1332 uvm/uvm_map.c if (end > map->max_offset || end < hint) {
map 1337 uvm/uvm_map.c if (next == &map->header || next->start >= end)
map 1341 uvm/uvm_map.c SAVE_HINT(map, map->hint, entry);
map 1351 uvm/uvm_map.c return (uvm_map_findspace(map, orig_hint,
map 1371 uvm/uvm_map.c uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
map 1380 uvm/uvm_map.c map, start, end, 0);
map 1382 uvm/uvm_map.c VM_MAP_RANGE_CHECK(map, start, end);
map 1384 uvm/uvm_map.c uvm_tree_sanity(map, "unmap_remove entry");
map 1386 uvm/uvm_map.c if ((map->flags & VM_MAP_INTRSAFE) == 0)
map 1392 uvm/uvm_map.c if (uvm_map_lookup_entry(map, start, &first_entry) == TRUE) {
map 1395 uvm/uvm_map.c UVM_MAP_CLIP_START(map, entry, start);
map 1397 uvm/uvm_map.c SAVE_HINT(map, entry, entry->prev);
map 1407 uvm/uvm_map.c if (map->first_free->start >= start)
map 1408 uvm/uvm_map.c map->first_free = entry->prev;
map 1434 uvm/uvm_map.c while ((entry != &map->header) && (entry->start < end)) {
map 1436 uvm/uvm_map.c UVM_MAP_CLIP_END(map, entry, end);
map 1448 uvm/uvm_map.c uvm_map_entry_unwire(map, entry);
map 1454 uvm/uvm_map.c if (map->flags & VM_MAP_INTRSAFE) {
map 1459 uvm/uvm_map.c KASSERT(vm_map_pmap(map) == pmap_kernel());
map 1512 uvm/uvm_map.c pmap_remove(map->pmap, entry->start, entry->end);
map 1522 uvm/uvm_map.c SAVE_HINT(map, entry, entry->prev);
map 1524 uvm/uvm_map.c uvm_map_entry_unlink(map, entry);
map 1525 uvm/uvm_map.c map->size -= len;
map 1531 uvm/uvm_map.c pmap_update(vm_map_pmap(map));
map 1535 uvm/uvm_map.c uvm_tree_sanity(map, "unmap_remove leave");
map 1609 uvm/uvm_map.c uvm_map_reserve(struct vm_map *map, vsize_t size, vaddr_t offset,
map 1615 uvm/uvm_map.c map,size,offset,raddr);
map 1618 uvm/uvm_map.c if (*raddr < vm_map_min(map))
map 1619 uvm/uvm_map.c *raddr = vm_map_min(map); /* hint */
map 1625 uvm/uvm_map.c if (uvm_map(map, raddr, size, NULL, offset, 0,
map 1648 uvm/uvm_map.c uvm_map_replace(struct vm_map *map, vaddr_t start, vaddr_t end,
map 1653 uvm/uvm_map.c uvm_tree_sanity(map, "map_replace entry");
map 1659 uvm/uvm_map.c if (!uvm_map_lookup_entry(map, start, &oldent)) {
map 1714 uvm/uvm_map.c SAVE_HINT(map, map->hint, newents);
map 1715 uvm/uvm_map.c if (map->first_free == oldent)
map 1716 uvm/uvm_map.c map->first_free = last;
map 1722 uvm/uvm_map.c uvm_rb_remove(map, oldent);
map 1726 uvm/uvm_map.c map->nentries = map->nentries + (nnewents - 1);
map 1735 uvm/uvm_map.c uvm_rb_insert(map, tmp);
map 1742 uvm/uvm_map.c SAVE_HINT(map, map->hint, oldent->prev);
map 1743 uvm/uvm_map.c if (map->first_free == oldent)
map 1744 uvm/uvm_map.c map->first_free = oldent->prev;
map 1747 uvm/uvm_map.c uvm_map_entry_unlink(map, oldent);
map 1751 uvm/uvm_map.c uvm_tree_sanity(map, "map_replace leave");
map 2119 uvm/uvm_map.c uvm_map_submap(struct vm_map *map, vaddr_t start, vaddr_t end,
map 2125 uvm/uvm_map.c vm_map_lock(map);
map 2127 uvm/uvm_map.c VM_MAP_RANGE_CHECK(map, start, end);
map 2129 uvm/uvm_map.c if (uvm_map_lookup_entry(map, start, &entry)) {
map 2130 uvm/uvm_map.c UVM_MAP_CLIP_START(map, entry, start);
map 2131 uvm/uvm_map.c UVM_MAP_CLIP_END(map, entry, end); /* to be safe */
map 2148 uvm/uvm_map.c vm_map_unlock(map);
map 2165 uvm/uvm_map.c uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
map 2172 uvm/uvm_map.c map, start, end, new_prot);
map 2174 uvm/uvm_map.c vm_map_lock(map);
map 2176 uvm/uvm_map.c VM_MAP_RANGE_CHECK(map, start, end);
map 2178 uvm/uvm_map.c if (uvm_map_lookup_entry(map, start, &entry)) {
map 2179 uvm/uvm_map.c UVM_MAP_CLIP_START(map, entry, start);
map 2189 uvm/uvm_map.c while ((current != &map->header) && (current->start < end)) {
map 2205 uvm/uvm_map.c while ((current != &map->header) && (current->start < end)) {
map 2208 uvm/uvm_map.c UVM_MAP_CLIP_END(map, current, end);
map 2227 uvm/uvm_map.c pmap_protect(map->pmap, current->start, current->end,
map 2237 uvm/uvm_map.c if ((map->flags & VM_MAP_WIREFUTURE) != 0 &&
map 2241 uvm/uvm_map.c if (uvm_map_pageable(map, entry->start, entry->end,
map 2262 uvm/uvm_map.c pmap_update(map->pmap);
map 2265 uvm/uvm_map.c vm_map_unlock(map);
map 2282 uvm/uvm_map.c uvm_map_inherit(struct vm_map *map, vaddr_t start, vaddr_t end,
map 2288 uvm/uvm_map.c map, start, end, new_inheritance);
map 2300 uvm/uvm_map.c vm_map_lock(map);
map 2302 uvm/uvm_map.c VM_MAP_RANGE_CHECK(map, start, end);
map 2304 uvm/uvm_map.c if (uvm_map_lookup_entry(map, start, &temp_entry)) {
map 2306 uvm/uvm_map.c UVM_MAP_CLIP_START(map, entry, start);
map 2311 uvm/uvm_map.c while ((entry != &map->header) && (entry->start < end)) {
map 2312 uvm/uvm_map.c UVM_MAP_CLIP_END(map, entry, end);
map 2317 uvm/uvm_map.c vm_map_unlock(map);
map 2329 uvm/uvm_map.c uvm_map_advice(struct vm_map *map, vaddr_t start, vaddr_t end, int new_advice)
map 2334 uvm/uvm_map.c map, start, end, new_advice);
map 2336 uvm/uvm_map.c vm_map_lock(map);
map 2337 uvm/uvm_map.c VM_MAP_RANGE_CHECK(map, start, end);
map 2338 uvm/uvm_map.c if (uvm_map_lookup_entry(map, start, &temp_entry)) {
map 2340 uvm/uvm_map.c UVM_MAP_CLIP_START(map, entry, start);
map 2349 uvm/uvm_map.c while ((entry != &map->header) && (entry->start < end)) {
map 2350 uvm/uvm_map.c UVM_MAP_CLIP_END(map, entry, end);
map 2360 uvm/uvm_map.c vm_map_unlock(map);
map 2368 uvm/uvm_map.c vm_map_unlock(map);
map 2388 uvm/uvm_map.c uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end,
map 2398 uvm/uvm_map.c map, start, end, new_pageable);
map 2399 uvm/uvm_map.c KASSERT(map->flags & VM_MAP_PAGEABLE);
map 2402 uvm/uvm_map.c vm_map_lock(map);
map 2404 uvm/uvm_map.c VM_MAP_RANGE_CHECK(map, start, end);
map 2414 uvm/uvm_map.c if (uvm_map_lookup_entry(map, start, &start_entry) == FALSE) {
map 2416 uvm/uvm_map.c vm_map_unlock(map);
map 2428 uvm/uvm_map.c UVM_MAP_CLIP_START(map, entry, start);
map 2435 uvm/uvm_map.c while ((entry != &map->header) && (entry->start < end)) {
map 2438 uvm/uvm_map.c (entry->next == &map->header ||
map 2441 uvm/uvm_map.c vm_map_unlock(map);
map 2456 uvm/uvm_map.c while ((entry != &map->header) && (entry->start < end)) {
map 2457 uvm/uvm_map.c UVM_MAP_CLIP_END(map, entry, end);
map 2459 uvm/uvm_map.c uvm_map_entry_unwire(map, entry);
map 2463 uvm/uvm_map.c vm_map_unlock(map);
map 2489 uvm/uvm_map.c while ((entry != &map->header) && (entry->start < end)) {
map 2504 uvm/uvm_map.c amap_copy(map, entry, M_WAITOK, TRUE,
map 2510 uvm/uvm_map.c UVM_MAP_CLIP_START(map, entry, start);
map 2511 uvm/uvm_map.c UVM_MAP_CLIP_END(map, entry, end);
map 2520 uvm/uvm_map.c (entry->next == &map->header ||
map 2528 uvm/uvm_map.c while (entry != &map->header && entry->end > start) {
map 2533 uvm/uvm_map.c vm_map_unlock(map);
map 2545 uvm/uvm_map.c timestamp_save = map->timestamp;
map 2547 uvm/uvm_map.c vm_map_busy(map);
map 2548 uvm/uvm_map.c vm_map_downgrade(map);
map 2552 uvm/uvm_map.c while (entry != &map->header && entry->start < end) {
map 2554 uvm/uvm_map.c rv = uvm_fault_wire(map, entry->start, entry->end,
map 2574 uvm/uvm_map.c vm_map_upgrade(map);
map 2575 uvm/uvm_map.c vm_map_unbusy(map);
map 2578 uvm/uvm_map.c if (timestamp_save != map->timestamp)
map 2588 uvm/uvm_map.c while (entry != &map->header && entry->start < end) {
map 2602 uvm/uvm_map.c uvm_map_entry_unwire(map, entry);
map 2606 uvm/uvm_map.c vm_map_unlock(map);
map 2613 uvm/uvm_map.c vm_map_unbusy(map);
map 2614 uvm/uvm_map.c vm_map_unlock_read(map);
map 2621 uvm/uvm_map.c vm_map_upgrade(map);
map 2622 uvm/uvm_map.c vm_map_unbusy(map);
map 2639 uvm/uvm_map.c uvm_map_pageable_all(struct vm_map *map, int flags, vsize_t limit)
map 2648 uvm/uvm_map.c UVMHIST_LOG(maphist,"(map=%p,flags=0x%lx)", map, flags, 0, 0);
map 2650 uvm/uvm_map.c KASSERT(map->flags & VM_MAP_PAGEABLE);
map 2652 uvm/uvm_map.c vm_map_lock(map);
map 2663 uvm/uvm_map.c for (entry = map->header.next; entry != &map->header;
map 2666 uvm/uvm_map.c uvm_map_entry_unwire(map, entry);
map 2668 uvm/uvm_map.c vm_map_modflags(map, 0, VM_MAP_WIREFUTURE);
map 2669 uvm/uvm_map.c vm_map_unlock(map);
map 2682 uvm/uvm_map.c vm_map_modflags(map, VM_MAP_WIREFUTURE, 0);
map 2690 uvm/uvm_map.c vm_map_unlock(map);
map 2717 uvm/uvm_map.c for (size = 0, entry = map->header.next; entry != &map->header;
map 2726 uvm/uvm_map.c vm_map_unlock(map);
map 2733 uvm/uvm_map.c (size + ptoa(pmap_wired_count(vm_map_pmap(map))) > limit)) {
map 2734 uvm/uvm_map.c vm_map_unlock(map);
map 2743 uvm/uvm_map.c for (entry = map->header.next; entry != &map->header;
map 2759 uvm/uvm_map.c amap_copy(map, entry, M_WAITOK, TRUE,
map 2773 uvm/uvm_map.c timestamp_save = map->timestamp;
map 2775 uvm/uvm_map.c vm_map_busy(map);
map 2776 uvm/uvm_map.c vm_map_downgrade(map);
map 2778 uvm/uvm_map.c for (error = 0, entry = map->header.next;
map 2779 uvm/uvm_map.c entry != &map->header && error == 0;
map 2782 uvm/uvm_map.c error = uvm_fault_wire(map, entry->start, entry->end,
map 2791 uvm/uvm_map.c vm_map_upgrade(map);
map 2792 uvm/uvm_map.c vm_map_unbusy(map);
map 2795 uvm/uvm_map.c if (timestamp_save != map->timestamp)
map 2806 uvm/uvm_map.c for (/* nothing */; entry != &map->header;
map 2819 uvm/uvm_map.c for (entry = map->header.next; entry != failed_entry;
map 2825 uvm/uvm_map.c uvm_map_entry_unwire(map, entry);
map 2827 uvm/uvm_map.c vm_map_unlock(map);
map 2833 uvm/uvm_map.c vm_map_unbusy(map);
map 2834 uvm/uvm_map.c vm_map_unlock_read(map);
map 2859 uvm/uvm_map.c uvm_map_clean(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
map 2872 uvm/uvm_map.c map, start, end, flags);
map 2876 uvm/uvm_map.c vm_map_lock_read(map);
map 2877 uvm/uvm_map.c VM_MAP_RANGE_CHECK(map, start, end);
map 2878 uvm/uvm_map.c if (uvm_map_lookup_entry(map, start, &entry) == FALSE) {
map 2879 uvm/uvm_map.c vm_map_unlock_read(map);
map 2889 uvm/uvm_map.c vm_map_unlock_read(map);
map 2892 uvm/uvm_map.c if (end > current->end && (current->next == &map->header ||
map 2894 uvm/uvm_map.c vm_map_unlock_read(map);
map 3039 uvm/uvm_map.c vm_map_unlock_read(map);
map 3052 uvm/uvm_map.c uvm_map_checkprot(struct vm_map *map, vaddr_t start, vaddr_t end,
map 3058 uvm/uvm_map.c if (!uvm_map_lookup_entry(map, start, &tmp_entry)) {
map 3063 uvm/uvm_map.c if (entry == &map->header) {
map 3190 uvm/uvm_map.c struct vm_map *map = &ovm->vm_map;
map 3217 uvm/uvm_map.c vm_map_lock(map);
map 3218 uvm/uvm_map.c vm_map_modflags(map, 0, VM_MAP_WIREFUTURE);
map 3219 uvm/uvm_map.c vm_map_unlock(map);
map 3224 uvm/uvm_map.c uvm_unmap(map, map->min_offset, map->max_offset);
map 3229 uvm/uvm_map.c vm_map_lock(map);
map 3230 uvm/uvm_map.c map->min_offset = start;
map 3231 uvm/uvm_map.c uvm_tree_sanity(map, "resize enter");
map 3232 uvm/uvm_map.c map->max_offset = end;
map 3233 uvm/uvm_map.c if (map->header.prev != &map->header)
map 3234 uvm/uvm_map.c uvm_rb_fixup(map, map->header.prev);
map 3235 uvm/uvm_map.c uvm_tree_sanity(map, "resize leave");
map 3236 uvm/uvm_map.c vm_map_unlock(map);
map 3247 uvm/uvm_map.c (map->flags & VM_MAP_PAGEABLE) ? TRUE : FALSE);
map 3615 uvm/uvm_map.c uvm_map_printit(struct vm_map *map, boolean_t full,
map 3620 uvm/uvm_map.c (*pr)("MAP %p: [0x%lx->0x%lx]\n", map, map->min_offset,map->max_offset);
map 3622 uvm/uvm_map.c map->nentries, map->size, map->ref_count, map->timestamp,
map 3623 uvm/uvm_map.c map->flags);
map 3625 uvm/uvm_map.c (*pr)("\tpmap=%p(resident=%d)\n", map->pmap,
map 3626 uvm/uvm_map.c pmap_resident_count(map->pmap));
map 3629 uvm/uvm_map.c (*pr)("\tpmap=%p(resident=<<NOT SUPPORTED!!!>>)\n", map->pmap);
map 3633 uvm/uvm_map.c for (entry = map->header.next; entry != &map->header;
map 246 uvm/uvm_map.h #define vm_map_modflags(map, set, clear) \
map 248 uvm/uvm_map.h (map)->flags = ((map)->flags | (set)) & ~(clear); \
map 358 uvm/uvm_map.h vm_map_lock_try(struct vm_map *map)
map 362 uvm/uvm_map.h if (map->flags & VM_MAP_INTRSAFE) {
map 365 uvm/uvm_map.h if (map->flags & VM_MAP_BUSY) {
map 368 uvm/uvm_map.h rv = (rw_enter(&map->lock, RW_WRITE|RW_NOSLEEP) == 0);
map 372 uvm/uvm_map.h map->timestamp++;
map 378 uvm/uvm_map.h vm_map_lock(struct vm_map *map)
map 380 uvm/uvm_map.h if (map->flags & VM_MAP_INTRSAFE)
map 384 uvm/uvm_map.h while (map->flags & VM_MAP_BUSY) {
map 385 uvm/uvm_map.h map->flags |= VM_MAP_WANTLOCK;
map 386 uvm/uvm_map.h tsleep(&map->flags, PVM, (char *)vmmapbsy, 0);
map 388 uvm/uvm_map.h } while (rw_enter(&map->lock, RW_WRITE|RW_SLEEPFAIL) != 0);
map 390 uvm/uvm_map.h map->timestamp++;
map 393 uvm/uvm_map.h #define vm_map_lock_read(map) rw_enter_read(&(map)->lock)
map 395 uvm/uvm_map.h #define vm_map_unlock(map) \
map 397 uvm/uvm_map.h if (((map)->flags & VM_MAP_INTRSAFE) == 0) \
map 398 uvm/uvm_map.h rw_exit(&(map)->lock); \
map 401 uvm/uvm_map.h #define vm_map_unlock_read(map) rw_exit_read(&(map)->lock)
map 403 uvm/uvm_map.h #define vm_map_downgrade(map) rw_enter(&(map)->lock, RW_DOWNGRADE)
map 405 uvm/uvm_map.h #define vm_map_upgrade(map) \
map 407 uvm/uvm_map.h rw_exit_read(&(map)->lock); \
map 408 uvm/uvm_map.h rw_enter_write(&(map)->lock); \
map 411 uvm/uvm_map.h #define vm_map_busy(map) \
map 413 uvm/uvm_map.h (map)->flags |= VM_MAP_BUSY; \
map 416 uvm/uvm_map.h #define vm_map_unbusy(map) \
map 420 uvm/uvm_map.h oflags = (map)->flags; \
map 421 uvm/uvm_map.h (map)->flags &= ~(VM_MAP_BUSY|VM_MAP_WANTLOCK); \
map 423 uvm/uvm_map.h wakeup(&(map)->flags); \
map 430 uvm/uvm_map.h #define vm_map_min(map) ((map)->min_offset)
map 431 uvm/uvm_map.h #define vm_map_max(map) ((map)->max_offset)
map 432 uvm/uvm_map.h #define vm_map_pmap(map) ((map)->pmap)
map 108 uvm/uvm_map_i.h uvm_map_setup(map, min, max, flags)
map 109 uvm/uvm_map_i.h vm_map_t map;
map 114 uvm/uvm_map_i.h RB_INIT(&map->rbhead);
map 115 uvm/uvm_map_i.h map->header.next = map->header.prev = &map->header;
map 116 uvm/uvm_map_i.h map->nentries = 0;
map 117 uvm/uvm_map_i.h map->size = 0;
map 118 uvm/uvm_map_i.h map->ref_count = 1;
map 119 uvm/uvm_map_i.h map->min_offset = min;
map 120 uvm/uvm_map_i.h map->max_offset = max;
map 121 uvm/uvm_map_i.h map->flags = flags;
map 122 uvm/uvm_map_i.h map->first_free = &map->header;
map 123 uvm/uvm_map_i.h map->hint = &map->header;
map 124 uvm/uvm_map_i.h map->timestamp = 0;
map 125 uvm/uvm_map_i.h rw_init(&map->lock, "vmmaplk");
map 126 uvm/uvm_map_i.h simple_lock_init(&map->ref_lock);
map 127 uvm/uvm_map_i.h simple_lock_init(&map->hint_lock);
map 143 uvm/uvm_map_i.h uvm_unmap_p(map, start, end, p)
map 144 uvm/uvm_map_i.h vm_map_t map;
map 152 uvm/uvm_map_i.h map, start, end, 0);
map 157 uvm/uvm_map_i.h vm_map_lock(map);
map 158 uvm/uvm_map_i.h uvm_unmap_remove(map, start, end, &dead_entries, p);
map 159 uvm/uvm_map_i.h vm_map_unlock(map);
map 175 uvm/uvm_map_i.h uvm_map_reference(map)
map 176 uvm/uvm_map_i.h vm_map_t map;
map 178 uvm/uvm_map_i.h simple_lock(&map->ref_lock);
map 179 uvm/uvm_map_i.h map->ref_count++;
map 180 uvm/uvm_map_i.h simple_unlock(&map->ref_lock);
map 191 uvm/uvm_map_i.h uvm_map_deallocate(map)
map 192 uvm/uvm_map_i.h vm_map_t map;
map 196 uvm/uvm_map_i.h simple_lock(&map->ref_lock);
map 197 uvm/uvm_map_i.h c = --map->ref_count;
map 198 uvm/uvm_map_i.h simple_unlock(&map->ref_lock);
map 207 uvm/uvm_map_i.h uvm_unmap(map, map->min_offset, map->max_offset);
map 208 uvm/uvm_map_i.h pmap_destroy(map->pmap);
map 209 uvm/uvm_map_i.h FREE(map, M_VMMAP);
map 235 uvm/uvm_meter.c struct vm_map *map;
map 279 uvm/uvm_meter.c vm_map_lock(map);
map 280 uvm/uvm_meter.c for (map = &p->p_vmspace->vm_map, entry = map->header.next;
map 281 uvm/uvm_meter.c entry != &map->header; entry = entry->next) {
map 287 uvm/uvm_meter.c vm_map_unlock(map);
map 256 uvm/uvm_mmap.c vm_map_t map;
map 260 uvm/uvm_mmap.c map = &p->p_vmspace->vm_map;
map 282 uvm/uvm_mmap.c vm_map_lock_read(map);
map 284 uvm/uvm_mmap.c if (uvm_map_lookup_entry(map, start, &entry) == FALSE) {
map 290 uvm/uvm_mmap.c entry != &map->header && entry->start < end;
map 297 uvm/uvm_mmap.c (entry->next == &map->header ||
map 364 uvm/uvm_mmap.c vm_map_unlock_read(map);
map 635 uvm/uvm_mmap.c vm_map_t map;
map 665 uvm/uvm_mmap.c map = &p->p_vmspace->vm_map;
map 680 uvm/uvm_mmap.c vm_map_lock_read(map);
map 681 uvm/uvm_mmap.c rv = uvm_map_lookup_entry(map, addr, &entry);
map 686 uvm/uvm_mmap.c vm_map_unlock_read(map);
map 702 uvm/uvm_mmap.c return (uvm_map_clean(map, addr, addr+size, uvmflags));
map 721 uvm/uvm_mmap.c vm_map_t map;
map 747 uvm/uvm_mmap.c map = &p->p_vmspace->vm_map;
map 750 uvm/uvm_mmap.c vm_map_lock(map); /* lock map so we can checkprot */
map 757 uvm/uvm_mmap.c if (!uvm_map_checkprot(map, addr, addr + size, VM_PROT_NONE)) {
map 758 uvm/uvm_mmap.c vm_map_unlock(map);
map 765 uvm/uvm_mmap.c uvm_unmap_remove(map, addr, addr + size, &dead_entries, p);
map 767 uvm/uvm_mmap.c vm_map_unlock(map); /* and unlock */
map 1089 uvm/uvm_mmap.c uvm_mmap(map, addr, size, prot, maxprot, flags, handle, foff, locklimit, p)
map 1090 uvm/uvm_mmap.c vm_map_t map;
map 1129 uvm/uvm_mmap.c uvm_unmap_p(map, *addr, *addr + size, p); /* zap! */
map 1223 uvm/uvm_mmap.c error = uvm_map_p(map, addr, size, uobj, foff, align, uvmflag, p);
map 1237 uvm/uvm_mmap.c vm_map_lock(map);
map 1239 uvm/uvm_mmap.c if (map->flags & VM_MAP_WIREFUTURE) {
map 1243 uvm/uvm_mmap.c ptoa(pmap_wired_count(vm_map_pmap(map)))) >
map 1248 uvm/uvm_mmap.c vm_map_unlock(map);
map 1250 uvm/uvm_mmap.c uvm_unmap(map, *addr, *addr + size);
map 1257 uvm/uvm_mmap.c error = uvm_map_pageable(map, *addr, *addr + size,
map 1261 uvm/uvm_mmap.c uvm_unmap(map, *addr, *addr + size);
map 1267 uvm/uvm_mmap.c vm_map_unlock(map);
map 187 uvm/uvm_unix.c vm_map_t map = &vm->vm_map;
map 196 uvm/uvm_unix.c for (entry = map->header.next; entry != &map->header;
map 54 uvm/uvm_user.c uvm_deallocate(struct vm_map *map, vaddr_t start, vsize_t size)
map 57 uvm/uvm_user.c if (map == NULL)
map 63 uvm/uvm_user.c uvm_unmap(map, trunc_page(start), round_page(start+size));