avail_end 230 arch/i386/i386/machdep.c paddr_t avail_end; avail_end 393 arch/i386/i386/machdep.c pa = avail_end; avail_end 555 arch/i386/i386/machdep.c bufpages = btoc(avail_end) * bufcachepercent / 100; avail_end 2966 arch/i386/i386/machdep.c avail_end = 0; avail_end 3023 arch/i386/i386/machdep.c avail_end = max(avail_end, e); avail_end 3027 arch/i386/i386/machdep.c avail_end -= round_page(MSGBUFSIZE); avail_end 3030 arch/i386/i386/machdep.c printf(": %lx\n", avail_end); avail_end 3058 arch/i386/i386/machdep.c if (e > atop(avail_end)) avail_end 3059 arch/i386/i386/machdep.c e = atop(avail_end); avail_end 3887 arch/i386/i386/machdep.c segs, nsegs, rsegs, flags, 0, trunc_page(avail_end))); avail_end 759 arch/i386/i386/pmap.c extern paddr_t avail_end; avail_end 781 arch/i386/i386/pmap.c avail_end -= round_page(MSGBUFSIZE); avail_end 785 arch/i386/i386/pmap.c avail_end -= round_page(bootargc); avail_end 775 arch/i386/i386/pmapae.c extern paddr_t avail_end, avail_end2; avail_end 784 arch/i386/i386/pmapae.c if (!cpu_pae || avail_end >= avail_end2 || !(cpu_feature & CPUID_PAE)){ avail_end 785 arch/i386/i386/pmapae.c avail_end2 = avail_end; avail_end 859 arch/i386/i386/pmapae.c vm_physmem[i].avail_end = vm_physmem[i].end; avail_end 108 arch/i386/isa/isa_machdep.c extern paddr_t avail_end; avail_end 702 arch/i386/isa/isa_machdep.c if ((avail_end > ISA_DMA_BOUNCE_THRESHOLD && avail_end 955 arch/i386/isa/isa_machdep.c if (avail_end > ISA_DMA_BOUNCE_THRESHOLD) avail_end 958 arch/i386/isa/isa_machdep.c high = trunc_page(avail_end); avail_end 83 arch/i386/pci/pci_addr_fixup.c extern paddr_t avail_end; avail_end 134 arch/i386/pci/pci_addr_fixup.c start = round_page(avail_end + 1); avail_end 140 arch/i386/pci/pci_addr_fixup.c "space start: 0x%08x\n", avail_end, sc->mem_alloc_start)); avail_end 327 uvm/uvm_page.c atop(paddr) <= vm_physmem[lcv].avail_end) { avail_end 523 uvm/uvm_page.c vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) { avail_end 541 uvm/uvm_page.c if (vm_physmem[lcv].avail_end == vm_physmem[lcv].end && avail_end 542 uvm/uvm_page.c vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) { avail_end 543 uvm/uvm_page.c *paddrp = ptoa(vm_physmem[lcv].avail_end - 1); avail_end 544 uvm/uvm_page.c vm_physmem[lcv].avail_end--; avail_end 547 uvm/uvm_page.c if (vm_physmem[lcv].avail_end == avail_end 570 uvm/uvm_page.c if (vm_physmem[lcv].avail_start >= vm_physmem[lcv].avail_end) avail_end 618 uvm/uvm_page.c uvm_page_physload(start, end, avail_start, avail_end, free_list) avail_end 619 uvm/uvm_page.c paddr_t start, end, avail_start, avail_end; avail_end 683 uvm/uvm_page.c atop(paddr) <= avail_end) avail_end 746 uvm/uvm_page.c ps->avail_end = avail_end; avail_end 785 uvm/uvm_page.c (vm_physmem[lcv].avail_end - vm_physmem[lcv].avail_start); avail_end 870 uvm/uvm_page.c (long long)vm_physmem[lcv].avail_end); avail_end 210 uvm/uvm_page.h paddr_t avail_end; /* (PF# of last free page in segment) +1 */