1 /* $OpenBSD: uvm_device.c,v 1.27 2006/07/31 11:51:29 mickey Exp $ */
2 /* $NetBSD: uvm_device.c,v 1.30 2000/11/25 06:27:59 chs Exp $ */
3
4 /*
5 *
6 * Copyright (c) 1997 Charles D. Cranor and Washington University.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by Charles D. Cranor and
20 * Washington University.
21 * 4. The name of the author may not be used to endorse or promote products
22 * derived from this software without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
26 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
27 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
29 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
30 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
31 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
33 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 *
35 * from: Id: uvm_device.c,v 1.1.2.9 1998/02/06 05:11:47 chs Exp
36 */
37
38 /*
39 * uvm_device.c: the device pager.
40 */
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/conf.h>
45 #include <sys/proc.h>
46 #include <sys/malloc.h>
47 #include <sys/vnode.h>
48
49 #include <uvm/uvm.h>
50 #include <uvm/uvm_device.h>
51
52 /*
53 * private global data structure
54 *
55 * we keep a list of active device objects in the system.
56 */
57
58 LIST_HEAD(udv_list_struct, uvm_device);
59 static struct udv_list_struct udv_list;
60 static simple_lock_data_t udv_lock;
61
62 /*
63 * functions
64 */
65
66 static void udv_init(void);
67 static void udv_reference(struct uvm_object *);
68 static void udv_detach(struct uvm_object *);
69 static int udv_fault(struct uvm_faultinfo *, vaddr_t,
70 vm_page_t *, int, int, vm_fault_t,
71 vm_prot_t, int);
72 static boolean_t udv_flush(struct uvm_object *, voff_t, voff_t,
73 int);
74
75 /*
76 * master pager structure
77 */
78
79 struct uvm_pagerops uvm_deviceops = {
80 udv_init,
81 udv_reference,
82 udv_detach,
83 udv_fault,
84 udv_flush,
85 };
86
87 /*
88 * the ops!
89 */
90
91 /*
92 * udv_init
93 *
94 * init pager private data structures.
95 */
96
97 void
98 udv_init()
99 {
100
101 LIST_INIT(&udv_list);
102 simple_lock_init(&udv_lock);
103 }
104
105 /*
106 * udv_attach
107 *
108 * get a VM object that is associated with a device. allocate a new
109 * one if needed.
110 *
111 * => caller must _not_ already be holding the lock on the uvm_object.
112 * => in fact, nothing should be locked so that we can sleep here.
113 */
114 struct uvm_object *
115 udv_attach(arg, accessprot, off, size)
116 void *arg;
117 vm_prot_t accessprot;
118 voff_t off; /* used only for access check */
119 vsize_t size; /* used only for access check */
120 {
121 dev_t device = *((dev_t *)arg);
122 struct uvm_device *udv, *lcv;
123 paddr_t (*mapfn)(dev_t, off_t, int);
124 UVMHIST_FUNC("udv_attach"); UVMHIST_CALLED(maphist);
125
126 UVMHIST_LOG(maphist, "(device=0x%lx)", device,0,0,0);
127
128 /*
129 * before we do anything, ensure this device supports mmap
130 */
131
132 mapfn = cdevsw[major(device)].d_mmap;
133 if (mapfn == NULL ||
134 mapfn == (paddr_t (*)(dev_t, off_t, int)) enodev ||
135 mapfn == (paddr_t (*)(dev_t, off_t, int)) nullop)
136 return(NULL);
137
138 /*
139 * Negative offsets on the object are not allowed.
140 */
141
142 if (off < 0)
143 return(NULL);
144
145 /*
146 * Check that the specified range of the device allows the
147 * desired protection.
148 *
149 * XXX assumes VM_PROT_* == PROT_*
150 * XXX clobbers off and size, but nothing else here needs them.
151 */
152
153 while (size != 0) {
154 if ((*mapfn)(device, off, accessprot) == -1)
155 return (NULL);
156 off += PAGE_SIZE; size -= PAGE_SIZE;
157 }
158
159 /*
160 * keep looping until we get it
161 */
162
163 for (;;) {
164
165 /*
166 * first, attempt to find it on the main list
167 */
168
169 simple_lock(&udv_lock);
170 LIST_FOREACH(lcv, &udv_list, u_list) {
171 if (device == lcv->u_device)
172 break;
173 }
174
175 /*
176 * got it on main list. put a hold on it and unlock udv_lock.
177 */
178
179 if (lcv) {
180
181 /*
182 * if someone else has a hold on it, sleep and start
183 * over again.
184 */
185
186 if (lcv->u_flags & UVM_DEVICE_HOLD) {
187 lcv->u_flags |= UVM_DEVICE_WANTED;
188 UVM_UNLOCK_AND_WAIT(lcv, &udv_lock, FALSE,
189 "udv_attach",0);
190 continue;
191 }
192
193 /* we are now holding it */
194 lcv->u_flags |= UVM_DEVICE_HOLD;
195 simple_unlock(&udv_lock);
196
197 /*
198 * bump reference count, unhold, return.
199 */
200
201 simple_lock(&lcv->u_obj.vmobjlock);
202 lcv->u_obj.uo_refs++;
203 simple_unlock(&lcv->u_obj.vmobjlock);
204
205 simple_lock(&udv_lock);
206 if (lcv->u_flags & UVM_DEVICE_WANTED)
207 wakeup(lcv);
208 lcv->u_flags &= ~(UVM_DEVICE_WANTED|UVM_DEVICE_HOLD);
209 simple_unlock(&udv_lock);
210 return(&lcv->u_obj);
211 }
212
213 /*
214 * did not find it on main list. need to malloc a new one.
215 */
216
217 simple_unlock(&udv_lock);
218 /* NOTE: we could sleep in the following malloc() */
219 MALLOC(udv, struct uvm_device *, sizeof(*udv), M_TEMP,
220 M_WAITOK);
221 simple_lock(&udv_lock);
222
223 /*
224 * now we have to double check to make sure no one added it
225 * to the list while we were sleeping...
226 */
227
228 LIST_FOREACH(lcv, &udv_list, u_list) {
229 if (device == lcv->u_device)
230 break;
231 }
232
233 /*
234 * did we lose a race to someone else?
235 * free our memory and retry.
236 */
237
238 if (lcv) {
239 simple_unlock(&udv_lock);
240 FREE(udv, M_TEMP);
241 continue;
242 }
243
244 /*
245 * we have it! init the data structures, add to list
246 * and return.
247 */
248
249 simple_lock_init(&udv->u_obj.vmobjlock);
250 udv->u_obj.pgops = &uvm_deviceops;
251 TAILQ_INIT(&udv->u_obj.memq);
252 udv->u_obj.uo_npages = 0;
253 udv->u_obj.uo_refs = 1;
254 udv->u_flags = 0;
255 udv->u_device = device;
256 LIST_INSERT_HEAD(&udv_list, udv, u_list);
257 simple_unlock(&udv_lock);
258 return(&udv->u_obj);
259 }
260 /*NOTREACHED*/
261 }
262
263 /*
264 * udv_reference
265 *
266 * add a reference to a VM object. Note that the reference count must
267 * already be one (the passed in reference) so there is no chance of the
268 * udv being released or locked out here.
269 *
270 * => caller must call with object unlocked.
271 */
272
273 static void
274 udv_reference(uobj)
275 struct uvm_object *uobj;
276 {
277 UVMHIST_FUNC("udv_reference"); UVMHIST_CALLED(maphist);
278
279 simple_lock(&uobj->vmobjlock);
280 uobj->uo_refs++;
281 UVMHIST_LOG(maphist, "<- done (uobj=%p, ref = %ld)",
282 uobj, uobj->uo_refs,0,0);
283 simple_unlock(&uobj->vmobjlock);
284 }
285
286 /*
287 * udv_detach
288 *
289 * remove a reference to a VM object.
290 *
291 * => caller must call with object unlocked and map locked.
292 */
293
294 static void
295 udv_detach(uobj)
296 struct uvm_object *uobj;
297 {
298 struct uvm_device *udv = (struct uvm_device *)uobj;
299 UVMHIST_FUNC("udv_detach"); UVMHIST_CALLED(maphist);
300
301 /*
302 * loop until done
303 */
304 again:
305 simple_lock(&uobj->vmobjlock);
306 if (uobj->uo_refs > 1) {
307 uobj->uo_refs--;
308 simple_unlock(&uobj->vmobjlock);
309 UVMHIST_LOG(maphist," <- done, uobj=%p, ref=%ld",
310 uobj,uobj->uo_refs,0,0);
311 return;
312 }
313 KASSERT(uobj->uo_npages == 0 && TAILQ_EMPTY(&uobj->memq));
314
315 /*
316 * is it being held? if so, wait until others are done.
317 */
318
319 simple_lock(&udv_lock);
320 if (udv->u_flags & UVM_DEVICE_HOLD) {
321 udv->u_flags |= UVM_DEVICE_WANTED;
322 simple_unlock(&uobj->vmobjlock);
323 UVM_UNLOCK_AND_WAIT(udv, &udv_lock, FALSE, "udv_detach",0);
324 goto again;
325 }
326
327 /*
328 * got it! nuke it now.
329 */
330
331 LIST_REMOVE(udv, u_list);
332 if (udv->u_flags & UVM_DEVICE_WANTED)
333 wakeup(udv);
334 simple_unlock(&udv_lock);
335 simple_unlock(&uobj->vmobjlock);
336 FREE(udv, M_TEMP);
337 UVMHIST_LOG(maphist," <- done, freed uobj=%p", uobj,0,0,0);
338 }
339
340
341 /*
342 * udv_flush
343 *
344 * flush pages out of a uvm object. a no-op for devices.
345 */
346
347 static boolean_t
348 udv_flush(uobj, start, stop, flags)
349 struct uvm_object *uobj;
350 voff_t start, stop;
351 int flags;
352 {
353
354 return(TRUE);
355 }
356
357 /*
358 * udv_fault: non-standard fault routine for device "pages"
359 *
360 * => rather than having a "get" function, we have a fault routine
361 * since we don't return vm_pages we need full control over the
362 * pmap_enter map in
363 * => all the usual fault data structured are locked by the caller
364 * (i.e. maps(read), amap (if any), uobj)
365 * => on return, we unlock all fault data structures
366 * => flags: PGO_ALLPAGES: get all of the pages
367 * PGO_LOCKED: fault data structures are locked
368 * XXX: currently PGO_LOCKED is always required ... consider removing
369 * it as a flag
370 * => NOTE: vaddr is the VA of pps[0] in ufi->entry, _NOT_ pps[centeridx]
371 */
372
373 static int
374 udv_fault(ufi, vaddr, pps, npages, centeridx, fault_type, access_type, flags)
375 struct uvm_faultinfo *ufi;
376 vaddr_t vaddr;
377 vm_page_t *pps;
378 int npages, centeridx, flags;
379 vm_fault_t fault_type;
380 vm_prot_t access_type;
381 {
382 struct vm_map_entry *entry = ufi->entry;
383 struct uvm_object *uobj = entry->object.uvm_obj;
384 struct uvm_device *udv = (struct uvm_device *)uobj;
385 vaddr_t curr_va;
386 off_t curr_offset;
387 paddr_t paddr, mdpgno;
388 int lcv, retval;
389 dev_t device;
390 paddr_t (*mapfn)(dev_t, off_t, int);
391 vm_prot_t mapprot;
392 UVMHIST_FUNC("udv_fault"); UVMHIST_CALLED(maphist);
393 UVMHIST_LOG(maphist," flags=%ld", flags,0,0,0);
394
395 /*
396 * we do not allow device mappings to be mapped copy-on-write
397 * so we kill any attempt to do so here.
398 */
399
400 if (UVM_ET_ISCOPYONWRITE(entry)) {
401 UVMHIST_LOG(maphist, "<- failed -- COW entry (etype=0x%lx)",
402 entry->etype, 0,0,0);
403 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
404 return(VM_PAGER_ERROR);
405 }
406
407 /*
408 * get device map function.
409 */
410
411 device = udv->u_device;
412 mapfn = cdevsw[major(device)].d_mmap;
413
414 /*
415 * now we must determine the offset in udv to use and the VA to
416 * use for pmap_enter. note that we always use orig_map's pmap
417 * for pmap_enter (even if we have a submap). since virtual
418 * addresses in a submap must match the main map, this is ok.
419 */
420
421 /* udv offset = (offset from start of entry) + entry's offset */
422 curr_offset = entry->offset + (vaddr - entry->start);
423 /* pmap va = vaddr (virtual address of pps[0]) */
424 curr_va = vaddr;
425
426 /*
427 * loop over the page range entering in as needed
428 */
429
430 retval = VM_PAGER_OK;
431 for (lcv = 0 ; lcv < npages ; lcv++, curr_offset += PAGE_SIZE,
432 curr_va += PAGE_SIZE) {
433 if ((flags & PGO_ALLPAGES) == 0 && lcv != centeridx)
434 continue;
435
436 if (pps[lcv] == PGO_DONTCARE)
437 continue;
438
439 mdpgno = (*mapfn)(device, curr_offset, access_type);
440 if (mdpgno == -1) {
441 retval = VM_PAGER_ERROR;
442 break;
443 }
444 paddr = pmap_phys_address(mdpgno);
445 mapprot = ufi->entry->protection;
446 UVMHIST_LOG(maphist,
447 " MAPPING: device: pm=%p, va=0x%lx, pa=0x%lx, at=%ld",
448 ufi->orig_map->pmap, curr_va, (u_long)paddr, mapprot);
449 if (pmap_enter(ufi->orig_map->pmap, curr_va, paddr,
450 mapprot, PMAP_CANFAIL | mapprot) != 0) {
451 /*
452 * pmap_enter() didn't have the resource to
453 * enter this mapping. Unlock everything,
454 * wait for the pagedaemon to free up some
455 * pages, and then tell uvm_fault() to start
456 * the fault again.
457 *
458 * XXX Needs some rethinking for the PGO_ALLPAGES
459 * XXX case.
460 */
461 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
462 uobj, NULL);
463
464 /* sync what we have so far */
465 pmap_update(ufi->orig_map->pmap);
466 uvm_wait("udv_fault");
467 return (VM_PAGER_REFAULT);
468 }
469 }
470
471 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
472 pmap_update(ufi->orig_map->pmap);
473 return (retval);
474 }