shmmap_s 163 kern/sysv_shm.c shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s) shmmap_s 169 kern/sysv_shm.c segnum = IPCID_TO_IX(shmmap_s->shmid); shmmap_s 174 kern/sysv_shm.c uvm_deallocate(&vm->vm_map, shmmap_s->va, size); shmmap_s 175 kern/sysv_shm.c shmmap_s->shmid = -1; shmmap_s 193 kern/sysv_shm.c struct shmmap_state *shmmap_s; shmmap_s 200 kern/sysv_shm.c for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg; shmmap_s 201 kern/sysv_shm.c i++, shmmap_s++) shmmap_s 202 kern/sysv_shm.c if (shmmap_s->shmid != -1 && shmmap_s 203 kern/sysv_shm.c shmmap_s->va == (vaddr_t)SCARG(uap, shmaddr)) shmmap_s 207 kern/sysv_shm.c return (shm_delete_mapping(p->p_vmspace, shmmap_s)); shmmap_s 222 kern/sysv_shm.c struct shmmap_state *shmmap_s; shmmap_s 234 kern/sysv_shm.c for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg; shmmap_s 235 kern/sysv_shm.c i++, shmmap_s++) shmmap_s 236 kern/sysv_shm.c shmmap_s->shmid = -1; shmmap_s 246 kern/sysv_shm.c for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg; i++) { shmmap_s 247 kern/sysv_shm.c if (shmmap_s->shmid == -1) shmmap_s 249 kern/sysv_shm.c shmmap_s++; shmmap_s 281 kern/sysv_shm.c shmmap_s->va = attach_va; shmmap_s 282 kern/sysv_shm.c shmmap_s->shmid = SCARG(uap, shmid); shmmap_s 490 kern/sysv_shm.c struct shmmap_state *shmmap_s; shmmap_s 504 kern/sysv_shm.c for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg; shmmap_s 505 kern/sysv_shm.c i++, shmmap_s++) { shmmap_s 506 kern/sysv_shm.c if (shmmap_s->shmid != -1 && shmmap_s 507 kern/sysv_shm.c (shmseg = shmsegs[IPCID_TO_IX(shmmap_s->shmid)]) != NULL) shmmap_s 516 kern/sysv_shm.c struct shmmap_state *shmmap_s; shmmap_s 522 kern/sysv_shm.c for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg; shmmap_s 523 kern/sysv_shm.c i++, shmmap_s++) shmmap_s 524 kern/sysv_shm.c if (shmmap_s->shmid != -1) shmmap_s 525 kern/sysv_shm.c shm_delete_mapping(vm, shmmap_s);