ufi              1740 dev/usb/umass.c 				sc->sbl.ufi.asc, sc->sbl.ufi.ascq));
ufi              1742 dev/usb/umass.c 			if ((sc->sbl.ufi.asc == 0 && sc->sbl.ufi.ascq == 0) ||
ufi               125 dev/usb/umassvar.h 	} ufi;
ufi               374 uvm/uvm_device.c udv_fault(ufi, vaddr, pps, npages, centeridx, fault_type, access_type, flags)
ufi               375 uvm/uvm_device.c 	struct uvm_faultinfo *ufi;
ufi               382 uvm/uvm_device.c 	struct vm_map_entry *entry = ufi->entry;
ufi               403 uvm/uvm_device.c 		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
ufi               445 uvm/uvm_device.c 		mapprot = ufi->entry->protection;
ufi               448 uvm/uvm_device.c 		    ufi->orig_map->pmap, curr_va, (u_long)paddr, mapprot);
ufi               449 uvm/uvm_device.c 		if (pmap_enter(ufi->orig_map->pmap, curr_va, paddr,
ufi               461 uvm/uvm_device.c 			uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
ufi               465 uvm/uvm_device.c 			pmap_update(ufi->orig_map->pmap);      
ufi               471 uvm/uvm_device.c 	uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
ufi               472 uvm/uvm_device.c 	pmap_update(ufi->orig_map->pmap);
ufi               233 uvm/uvm_fault.c uvmfault_amapcopy(ufi)
ufi               234 uvm/uvm_fault.c 	struct uvm_faultinfo *ufi;
ufi               247 uvm/uvm_fault.c 		if (uvmfault_lookup(ufi, TRUE) == FALSE)
ufi               254 uvm/uvm_fault.c 		if (UVM_ET_ISNEEDSCOPY(ufi->entry))
ufi               255 uvm/uvm_fault.c 			amap_copy(ufi->map, ufi->entry, M_NOWAIT, TRUE, 
ufi               256 uvm/uvm_fault.c 				ufi->orig_rvaddr, ufi->orig_rvaddr + 1);
ufi               262 uvm/uvm_fault.c 		if (UVM_ET_ISNEEDSCOPY(ufi->entry)) {
ufi               263 uvm/uvm_fault.c 			uvmfault_unlockmaps(ufi, TRUE);
ufi               272 uvm/uvm_fault.c 		uvmfault_unlockmaps(ufi, TRUE);
ufi               295 uvm/uvm_fault.c uvmfault_anonget(ufi, amap, anon)
ufi               296 uvm/uvm_fault.c 	struct uvm_faultinfo *ufi;
ufi               357 uvm/uvm_fault.c 				uvmfault_unlockall(ufi, amap, NULL, anon);
ufi               365 uvm/uvm_fault.c 				uvmfault_unlockall(ufi, amap, NULL, NULL);
ufi               382 uvm/uvm_fault.c 				uvmfault_unlockall(ufi, amap, NULL, anon);
ufi               393 uvm/uvm_fault.c 				uvmfault_unlockall(ufi, amap, NULL, anon);
ufi               418 uvm/uvm_fault.c 		locked = uvmfault_relock(ufi);
ufi               454 uvm/uvm_fault.c 					uvmfault_unlockall(ufi, amap, NULL,
ufi               486 uvm/uvm_fault.c 					uvmfault_unlockall(ufi, amap, NULL,
ufi               519 uvm/uvm_fault.c 		if (ufi != NULL &&
ufi               520 uvm/uvm_fault.c 		    amap_lookup(&ufi->entry->aref, 
ufi               521 uvm/uvm_fault.c 				ufi->orig_rvaddr - ufi->entry->start) != anon) {
ufi               523 uvm/uvm_fault.c 			uvmfault_unlockall(ufi, amap, NULL, anon);
ufi               565 uvm/uvm_fault.c 	struct uvm_faultinfo ufi;
ufi               590 uvm/uvm_fault.c 	ufi.orig_map = orig_map;
ufi               591 uvm/uvm_fault.c 	ufi.orig_rvaddr = trunc_page(vaddr);
ufi               592 uvm/uvm_fault.c 	ufi.orig_size = PAGE_SIZE;	/* can't get any smaller than this */
ufi               608 uvm/uvm_fault.c 	if (uvmfault_lookup(&ufi, FALSE) == FALSE) {
ufi               615 uvm/uvm_fault.c 	if ((ufi.map->flags & VM_MAP_PAGEABLE) == 0)
ufi               617 uvm/uvm_fault.c 		    ufi.map, vaddr);
ufi               624 uvm/uvm_fault.c 	if ((ufi.entry->protection & access_type) != access_type) {
ufi               627 uvm/uvm_fault.c 		    ufi.entry->protection, access_type, 0, 0);
ufi               628 uvm/uvm_fault.c 		uvmfault_unlockmaps(&ufi, FALSE);
ufi               639 uvm/uvm_fault.c 	enter_prot = ufi.entry->protection;
ufi               640 uvm/uvm_fault.c 	wired = VM_MAPENT_ISWIRED(ufi.entry) || (fault_type == VM_FAULT_WIRE);
ufi               651 uvm/uvm_fault.c 	if (UVM_ET_ISNEEDSCOPY(ufi.entry)) {
ufi               653 uvm/uvm_fault.c 		    (ufi.entry->object.uvm_obj == NULL)) {
ufi               657 uvm/uvm_fault.c 			uvmfault_unlockmaps(&ufi, FALSE);
ufi               658 uvm/uvm_fault.c 			uvmfault_amapcopy(&ufi);
ufi               677 uvm/uvm_fault.c 	amap = ufi.entry->aref.ar_amap;		/* top layer */
ufi               678 uvm/uvm_fault.c 	uobj = ufi.entry->object.uvm_obj;	/* bottom layer */
ufi               686 uvm/uvm_fault.c 		uvmfault_unlockmaps(&ufi, FALSE);
ufi               701 uvm/uvm_fault.c 		KASSERT(uvmadvice[ufi.entry->advice].advice ==
ufi               702 uvm/uvm_fault.c 			 ufi.entry->advice);
ufi               703 uvm/uvm_fault.c 		nback = min(uvmadvice[ufi.entry->advice].nback,
ufi               704 uvm/uvm_fault.c 			    (ufi.orig_rvaddr - ufi.entry->start) >> PAGE_SHIFT);
ufi               705 uvm/uvm_fault.c 		startva = ufi.orig_rvaddr - (nback << PAGE_SHIFT);
ufi               706 uvm/uvm_fault.c 		nforw = min(uvmadvice[ufi.entry->advice].nforw,
ufi               707 uvm/uvm_fault.c 			    ((ufi.entry->end - ufi.orig_rvaddr) >>
ufi               722 uvm/uvm_fault.c 		startva = ufi.orig_rvaddr;
ufi               731 uvm/uvm_fault.c 	UVMHIST_LOG(maphist, "  entry=%p, amap=%p, obj=%p", ufi.entry,
ufi               740 uvm/uvm_fault.c 		amap_lookups(&ufi.entry->aref, startva - ufi.entry->start,
ufi               753 uvm/uvm_fault.c 	if (ufi.entry->advice == MADV_SEQUENTIAL && nback != 0) {
ufi               763 uvm/uvm_fault.c 			uoff = (startva - ufi.entry->start) + ufi.entry->offset;
ufi               795 uvm/uvm_fault.c 		    pmap_extract(ufi.orig_map->pmap, currva, &pa)) {
ufi               827 uvm/uvm_fault.c 			    ufi.orig_map->pmap, currva, anon->an_page, 0);
ufi               836 uvm/uvm_fault.c 			(void) pmap_enter(ufi.orig_map->pmap, currva,
ufi               841 uvm/uvm_fault.c 			     (VM_MAPENT_ISWIRED(ufi.entry) ? PMAP_WIRED : 0));
ufi               844 uvm/uvm_fault.c 		pmap_update(ufi.orig_map->pmap);
ufi               872 uvm/uvm_fault.c 		result = uobj->pgops->pgo_fault(&ufi, startva, pages, npages,
ufi               906 uvm/uvm_fault.c 		(void) uobj->pgops->pgo_get(uobj, ufi.entry->offset +
ufi               907 uvm/uvm_fault.c 				(startva - ufi.entry->start),
ufi               909 uvm/uvm_fault.c 				access_type & MASK(ufi.entry),
ufi               910 uvm/uvm_fault.c 				ufi.entry->advice, PGO_LOCKED);
ufi               959 uvm/uvm_fault.c 				  ufi.orig_map->pmap, currva, pages[lcv], 0);
ufi               969 uvm/uvm_fault.c 				(void) pmap_enter(ufi.orig_map->pmap, currva,
ufi               971 uvm/uvm_fault.c 				    enter_prot & MASK(ufi.entry),
ufi               985 uvm/uvm_fault.c 			pmap_update(ufi.orig_map->pmap);
ufi              1044 uvm/uvm_fault.c 	result = uvmfault_anonget(&ufi, amap, anon);
ufi              1111 uvm/uvm_fault.c 					uvmfault_unlockall(&ufi, amap, uobj,
ufi              1182 uvm/uvm_fault.c 			uvmfault_unlockall(&ufi, amap, uobj, oanon);
ufi              1202 uvm/uvm_fault.c 		amap_add(&ufi.entry->aref, ufi.orig_rvaddr - ufi.entry->start,
ufi              1234 uvm/uvm_fault.c 	    ufi.orig_map->pmap, ufi.orig_rvaddr, pg, 0);
ufi              1235 uvm/uvm_fault.c 	if (pmap_enter(ufi.orig_map->pmap, ufi.orig_rvaddr, VM_PAGE_TO_PHYS(pg),
ufi              1245 uvm/uvm_fault.c 		uvmfault_unlockall(&ufi, amap, uobj, oanon);
ufi              1286 uvm/uvm_fault.c 	uvmfault_unlockall(&ufi, amap, uobj, oanon);
ufi              1287 uvm/uvm_fault.c 	pmap_update(ufi.orig_map->pmap);
ufi              1314 uvm/uvm_fault.c 		     UVM_ET_ISCOPYONWRITE(ufi.entry);
ufi              1336 uvm/uvm_fault.c 		uvmfault_unlockall(&ufi, amap, NULL, NULL);
ufi              1341 uvm/uvm_fault.c 		uoff = (ufi.orig_rvaddr - ufi.entry->start) + ufi.entry->offset;
ufi              1343 uvm/uvm_fault.c 		    0, access_type & MASK(ufi.entry), ufi.entry->advice,
ufi              1374 uvm/uvm_fault.c 		locked = uvmfault_relock(&ufi);
ufi              1388 uvm/uvm_fault.c 		    amap_lookup(&ufi.entry->aref,
ufi              1389 uvm/uvm_fault.c 		      ufi.orig_rvaddr - ufi.entry->start))) {
ufi              1391 uvm/uvm_fault.c 				uvmfault_unlockall(&ufi, amap, NULL, NULL);
ufi              1466 uvm/uvm_fault.c 		if (UVM_ET_ISCOPYONWRITE(ufi.entry))
ufi              1505 uvm/uvm_fault.c 					uvmfault_unlockall(&ufi, amap, uobj,
ufi              1602 uvm/uvm_fault.c 			uvmfault_unlockall(&ufi, amap, uobj, NULL);
ufi              1668 uvm/uvm_fault.c 		amap_add(&ufi.entry->aref, ufi.orig_rvaddr - ufi.entry->start,
ufi              1686 uvm/uvm_fault.c 	    ufi.orig_map->pmap, ufi.orig_rvaddr, pg, promote);
ufi              1687 uvm/uvm_fault.c 	if (pmap_enter(ufi.orig_map->pmap, ufi.orig_rvaddr, VM_PAGE_TO_PHYS(pg),
ufi              1709 uvm/uvm_fault.c 		uvmfault_unlockall(&ufi, amap, uobj, NULL);
ufi              1753 uvm/uvm_fault.c 	uvmfault_unlockall(&ufi, amap, uobj, NULL);
ufi              1754 uvm/uvm_fault.c 	pmap_update(ufi.orig_map->pmap);
ufi                55 uvm/uvm_fault_i.h uvmfault_unlockmaps(ufi, write_locked)
ufi                56 uvm/uvm_fault_i.h 	struct uvm_faultinfo *ufi;
ufi                64 uvm/uvm_fault_i.h 	if (ufi == NULL) {
ufi                69 uvm/uvm_fault_i.h 		vm_map_unlock(ufi->map);
ufi                71 uvm/uvm_fault_i.h 		vm_map_unlock_read(ufi->map);
ufi                82 uvm/uvm_fault_i.h uvmfault_unlockall(ufi, amap, uobj, anon)
ufi                83 uvm/uvm_fault_i.h 	struct uvm_faultinfo *ufi;
ufi                93 uvm/uvm_fault_i.h 	uvmfault_unlockmaps(ufi, FALSE);
ufi               112 uvm/uvm_fault_i.h uvmfault_lookup(ufi, write_lock)
ufi               113 uvm/uvm_fault_i.h 	struct uvm_faultinfo *ufi;
ufi               122 uvm/uvm_fault_i.h 	ufi->map = ufi->orig_map;
ufi               123 uvm/uvm_fault_i.h 	ufi->size = ufi->orig_size;
ufi               136 uvm/uvm_fault_i.h 			vm_map_lock(ufi->map);
ufi               138 uvm/uvm_fault_i.h 			vm_map_lock_read(ufi->map);
ufi               144 uvm/uvm_fault_i.h 		if (!uvm_map_lookup_entry(ufi->map, ufi->orig_rvaddr, 
ufi               145 uvm/uvm_fault_i.h 								&ufi->entry)) {
ufi               146 uvm/uvm_fault_i.h 			uvmfault_unlockmaps(ufi, write_lock);
ufi               153 uvm/uvm_fault_i.h 		if (ufi->entry->end - ufi->orig_rvaddr < ufi->size)
ufi               154 uvm/uvm_fault_i.h 			ufi->size = ufi->entry->end - ufi->orig_rvaddr;
ufi               160 uvm/uvm_fault_i.h 		if (UVM_ET_ISSUBMAP(ufi->entry)) {
ufi               161 uvm/uvm_fault_i.h 			tmpmap = ufi->entry->object.sub_map;
ufi               163 uvm/uvm_fault_i.h 				vm_map_unlock(ufi->map);
ufi               165 uvm/uvm_fault_i.h 				vm_map_unlock_read(ufi->map);
ufi               167 uvm/uvm_fault_i.h 			ufi->map = tmpmap;
ufi               175 uvm/uvm_fault_i.h 		ufi->mapv = ufi->map->timestamp;
ufi               191 uvm/uvm_fault_i.h uvmfault_relock(ufi)
ufi               192 uvm/uvm_fault_i.h 	struct uvm_faultinfo *ufi;
ufi               199 uvm/uvm_fault_i.h 	if (ufi == NULL) {
ufi               210 uvm/uvm_fault_i.h 	vm_map_lock_read(ufi->map);
ufi               211 uvm/uvm_fault_i.h 	if (ufi->mapv != ufi->map->timestamp) {
ufi               212 uvm/uvm_fault_i.h 		vm_map_unlock_read(ufi->map);
ufi               130 uvm/uvm_loan.c uvm_loanentry(ufi, output, flags)
ufi               131 uvm/uvm_loan.c 	struct uvm_faultinfo *ufi;
ufi               135 uvm/uvm_loan.c 	vaddr_t curaddr = ufi->orig_rvaddr;
ufi               136 uvm/uvm_loan.c 	vsize_t togo = ufi->size;
ufi               137 uvm/uvm_loan.c 	struct vm_aref *aref = &ufi->entry->aref;
ufi               138 uvm/uvm_loan.c 	struct uvm_object *uobj = ufi->entry->object.uvm_obj;
ufi               158 uvm/uvm_loan.c 			anon = amap_lookup(aref, curaddr - ufi->entry->start);
ufi               164 uvm/uvm_loan.c 			rv = uvm_loananon(ufi, output, flags, anon);
ufi               166 uvm/uvm_loan.c 			rv = uvm_loanuobj(ufi, output, flags, curaddr);
ufi               167 uvm/uvm_loan.c 		} else if (UVM_ET_ISCOPYONWRITE(ufi->entry)) {
ufi               168 uvm/uvm_loan.c 			rv = uvm_loanzero(ufi, output, flags);
ufi               192 uvm/uvm_loan.c 	uvmfault_unlockall(ufi, aref->ar_amap, uobj, NULL);
ufi               219 uvm/uvm_loan.c 	struct uvm_faultinfo ufi;
ufi               254 uvm/uvm_loan.c 		ufi.orig_map = map;
ufi               255 uvm/uvm_loan.c 		ufi.orig_rvaddr = start;
ufi               256 uvm/uvm_loan.c 		ufi.orig_size = len;
ufi               263 uvm/uvm_loan.c 		if (!uvmfault_lookup(&ufi, FALSE)) 
ufi               269 uvm/uvm_loan.c 		rv = uvm_loanentry(&ufi, &output, flags);
ufi               319 uvm/uvm_loan.c uvm_loananon(ufi, output, flags, anon)
ufi               320 uvm/uvm_loan.c 	struct uvm_faultinfo *ufi;
ufi               353 uvm/uvm_loan.c 	result = uvmfault_anonget(ufi, ufi->entry->aref.ar_amap, anon);
ufi               408 uvm/uvm_loan.c uvm_loanuobj(ufi, output, flags, va)
ufi               409 uvm/uvm_loan.c 	struct uvm_faultinfo *ufi;
ufi               414 uvm/uvm_loan.c 	struct vm_amap *amap = ufi->entry->aref.ar_amap;
ufi               415 uvm/uvm_loan.c 	struct uvm_object *uobj = ufi->entry->object.uvm_obj;
ufi               430 uvm/uvm_loan.c 		result = uobj->pgops->pgo_get(uobj, va - ufi->entry->start,
ufi               442 uvm/uvm_loan.c 		uvmfault_unlockall(ufi, amap, uobj, NULL);
ufi               451 uvm/uvm_loan.c 		uvmfault_unlockall(ufi, amap, NULL, NULL);
ufi               455 uvm/uvm_loan.c 		result = uobj->pgops->pgo_get(uobj, va - ufi->entry->start,
ufi               475 uvm/uvm_loan.c 		locked = uvmfault_relock(ufi);
ufi               485 uvm/uvm_loan.c 		    (locked && amap && amap_lookup(&ufi->entry->aref,
ufi               486 uvm/uvm_loan.c 		    ufi->orig_rvaddr - ufi->entry->start))) {
ufi               489 uvm/uvm_loan.c 				uvmfault_unlockall(ufi, amap, NULL, NULL);
ufi               580 uvm/uvm_loan.c 		uvmfault_unlockall(ufi, amap, uobj, NULL);
ufi               611 uvm/uvm_loan.c uvm_loanzero(ufi, output, flags)
ufi               612 uvm/uvm_loan.c 	struct uvm_faultinfo *ufi;
ufi               623 uvm/uvm_loan.c 			uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, 
ufi               624 uvm/uvm_loan.c 			    ufi->entry->object.uvm_obj, NULL);
ufi               626 uvm/uvm_loan.c 			if (!uvmfault_relock(ufi))
ufi               628 uvm/uvm_loan.c 			if (ufi->entry->object.uvm_obj)
ufi               630 uvm/uvm_loan.c 				    &ufi->entry->object.uvm_obj->vmobjlock);
ufi               652 uvm/uvm_loan.c 		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
ufi               653 uvm/uvm_loan.c 		       ufi->entry->object.uvm_obj, NULL);
ufi               662 uvm/uvm_loan.c 		if (!uvmfault_relock(ufi))
ufi               667 uvm/uvm_loan.c 		if (ufi->entry->object.uvm_obj)
ufi               668 uvm/uvm_loan.c 			simple_lock(&ufi->entry->object.uvm_obj->vmobjlock);