kernel_object 2028 kern/subr_pool.c return ((void *)uvm_km_alloc_poolpage1(kernel_map, uvm.kernel_object, kernel_object 120 uvm/uvm.h struct uvm_object *kernel_object; kernel_object 175 uvm/uvm_km.c uvm.kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS - kernel_object 493 uvm/uvm_km.c if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object, kernel_object 513 uvm/uvm_km.c simple_lock(&uvm.kernel_object->vmobjlock); kernel_object 514 uvm/uvm_km.c pg = uvm_pagelookup(uvm.kernel_object, offset); kernel_object 524 uvm/uvm_km.c UVM_UNLOCK_AND_WAIT(pg, &uvm.kernel_object->vmobjlock, kernel_object 530 uvm/uvm_km.c pg = uvm_pagealloc(uvm.kernel_object, offset, NULL, 0); kernel_object 535 uvm/uvm_km.c simple_unlock(&uvm.kernel_object->vmobjlock); kernel_object 604 uvm/uvm_km.c if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object, kernel_object 644 uvm/uvm_km.c if (__predict_true(uvm_map(map, &kva, size, uvm.kernel_object, kernel_object 259 uvm/uvm_stat.c (*pr)(" objs(kern)=%p\n", uvm.kernel_object);