pp                402 arch/i386/i386/ioapic.c 	struct ioapic_pin *pp;
pp                405 arch/i386/i386/ioapic.c 	pp = &sc->sc_pins[pin];
pp                406 arch/i386/i386/ioapic.c 	map = pp->ip_map;
pp                413 arch/i386/i386/ioapic.c 	else if (pp->ip_handler == NULL) {
pp                416 arch/i386/i386/ioapic.c 		redlo |= (pp->ip_vector & 0xff);
pp                433 arch/i386/i386/ioapic.c 		if (pp->ip_type == IST_LEVEL)
pp                438 arch/i386/i386/ioapic.c 			if (pp->ip_type == IST_LEVEL)
pp                479 arch/i386/i386/ioapic.c 	struct ioapic_pin *pp = &sc->sc_pins[pin];
pp                483 arch/i386/i386/ioapic.c 	ovector = pp->ip_vector;
pp                487 arch/i386/i386/ioapic.c 		pp->ip_minlevel = 0xff; /* XXX magic */
pp                488 arch/i386/i386/ioapic.c 		pp->ip_maxlevel = 0; /* XXX magic */
pp                489 arch/i386/i386/ioapic.c 		pp->ip_vector = 0;
pp                490 arch/i386/i386/ioapic.c 	} else if (maxlevel != pp->ip_maxlevel) {
pp                522 arch/i386/i386/ioapic.c 		pp->ip_vector = nvector;
pp                523 arch/i386/i386/ioapic.c 		pp->ip_minlevel = minlevel;
pp                524 arch/i386/i386/ioapic.c 		pp->ip_maxlevel = maxlevel;
pp                526 arch/i386/i386/ioapic.c 	apic_intrhand[pp->ip_vector] = pp->ip_handler;
pp                149 arch/i386/stand/pxeboot/net.c 	u_int *pp = parts;
pp                173 arch/i386/stand/pxeboot/net.c 			if (pp >= parts + 3 || val > 0xff)
pp                175 arch/i386/stand/pxeboot/net.c 			*pp++ = val, cp++;
pp                189 arch/i386/stand/pxeboot/net.c 	n = pp - parts + 1;
pp                270 ddb/db_input.c 			char *pp, *pc;
pp                275 ddb/db_input.c 			for (pp = db_history_prev, pc = db_lbuf_start;
pp                276 ddb/db_input.c 			     pc != db_le && *pp; ) {
pp                277 ddb/db_input.c 				if (*pp != *pc)
pp                279 ddb/db_input.c 				if (++pp == db_history + db_history_size)
pp                280 ddb/db_input.c 					pp = db_history;
pp                283 ddb/db_input.c 			if (!*pp && pc == db_le) {
pp                144 dev/acpi/acpiprt.c 	struct aml_value res, *pp;
pp                167 dev/acpi/acpiprt.c 	pp = v->v_package[2];
pp                168 dev/acpi/acpiprt.c 	if (pp->type == AML_OBJTYPE_NAMEREF) {
pp                169 dev/acpi/acpiprt.c 		node = aml_searchname(sc->sc_devnode, pp->v_nameref);
pp                174 dev/acpi/acpiprt.c 		pp = node->value;
pp                176 dev/acpi/acpiprt.c 	if (pp->type == AML_OBJTYPE_OBJREF) {
pp                177 dev/acpi/acpiprt.c 		pp = pp->v_objref.ref;
pp                179 dev/acpi/acpiprt.c 	if (pp->type == AML_OBJTYPE_DEVICE) {
pp                180 dev/acpi/acpiprt.c 		node = pp->node;
pp                206 dev/acpi/acpiprt.c 	    DEVNAME(sc), aml_nodename(pp->node), addr, pin, irq);
pp                743 dev/acpi/dsdt.c 	struct aml_node *node, **pp;
pp                777 dev/acpi/dsdt.c 			for (pp = &root->child; *pp; pp = &(*pp)->sibling)
pp                781 dev/acpi/dsdt.c 			*pp = node;
pp               2469 dev/audio.c    	struct audio_params pp, rp;
pp               2485 dev/audio.c    	pp = sc->sc_pparams;	/* Temporary encoding storage in */
pp               2490 dev/audio.c    		pp.sample_rate = p->sample_rate;
pp               2498 dev/audio.c    		pp.encoding = p->encoding;
pp               2506 dev/audio.c    		pp.precision = p->precision;
pp               2514 dev/audio.c    		pp.channels = p->channels;
pp               2525 dev/audio.c    	    audio_print_params("Setting play params", &pp);
pp               2529 dev/audio.c    	if (np && (error = audio_check_params(&pp)))
pp               2544 dev/audio.c    		pp.sw_code = 0;
pp               2545 dev/audio.c    		pp.factor = 1;
pp               2565 dev/audio.c    				pp = rp;
pp               2567 dev/audio.c    				rp = pp;
pp               2570 dev/audio.c    		    sc->sc_mode & (AUMODE_PLAY | AUMODE_RECORD), &pp, &rp);
pp               2575 dev/audio.c    				pp.sample_rate = rp.sample_rate;
pp               2576 dev/audio.c    				pp.encoding    = rp.encoding;
pp               2577 dev/audio.c    				pp.channels    = rp.channels;
pp               2578 dev/audio.c    				pp.precision   = rp.precision;
pp               2580 dev/audio.c    				rp.sample_rate = pp.sample_rate;
pp               2581 dev/audio.c    				rp.encoding    = pp.encoding;
pp               2582 dev/audio.c    				rp.channels    = pp.channels;
pp               2583 dev/audio.c    				rp.precision   = pp.precision;
pp               2587 dev/audio.c    		sc->sc_pparams = pp;
pp                712 dev/ccd.c      	struct partition *pp;
pp                724 dev/ccd.c      	pp = &cs->sc_dkdev.dk_label->d_partitions[DISKPART(bp->b_dev)];
pp                725 dev/ccd.c      	bn += DL_GETPOFFSET(pp);
pp               1202 dev/pci/autri.c 	struct autri_dma **pp, *p;
pp               1204 dev/pci/autri.c 	for (pp = &sc->sc_dmas; (p = *pp) != NULL; pp = &p->next) {
pp               1207 dev/pci/autri.c 			*pp = p->next;
pp                763 dev/pci/auvia.c 	struct auvia_dma **pp, *p;
pp                765 dev/pci/auvia.c 	for (pp = &(sc->sc_dmas); (p = *pp) != NULL; pp = &p->next)
pp                772 dev/pci/auvia.c 			*pp = p->next;
pp               1455 dev/pci/cs4280.c 	struct cs4280_dma **pp, *p;
pp               1457 dev/pci/cs4280.c 	for (pp = &sc->sc_dmas; (p = *pp) != NULL; pp = &p->next) {
pp               1460 dev/pci/cs4280.c 			*pp = p->next;
pp               1381 dev/pci/cs4281.c 	struct cs4281_dma **pp, *p;
pp               1384 dev/pci/cs4281.c 	for (pp = &sc->sc_dmas; (p = *pp) != NULL; pp = &p->next) {
pp               1391 dev/pci/cs4281.c 			*pp = p->next;
pp               1634 dev/pci/eap.c  	struct eap_dma **pp, *p;
pp               1636 dev/pci/eap.c  	for (pp = &sc->sc_dmas; (p = *pp) != NULL; pp = &p->next) {
pp               1639 dev/pci/eap.c  			*pp = p->next;
pp                518 dev/pci/esa.c  	struct esa_dma **pp;
pp                520 dev/pci/esa.c  	for (pp = &vc->dma; (p = *pp) != NULL; pp = &p->next)
pp                523 dev/pci/esa.c  			*pp = p->next;
pp               1576 dev/pci/eso.c  	struct eso_dma *p, **pp;
pp               1578 dev/pci/eso.c  	for (pp = &sc->sc_dmas; (p = *pp) != NULL; pp = &p->ed_next) {
pp               1581 dev/pci/eso.c  			*pp = p->ed_next;
pp                774 dev/pci/fms.c  	struct fms_dma **pp, *p;
pp                776 dev/pci/fms.c  	for (pp = &(sc->sc_dmas); (p = *pp) != NULL; pp = &p->next)
pp                783 dev/pci/fms.c  			*pp = p->next;
pp               1825 dev/pci/if_sk.c 	u_int8_t *pp;
pp               1829 dev/pci/if_sk.c 	pp = mtod(m, u_int8_t *);
pp               1833 dev/pci/if_sk.c 	eh = (struct ether_header *)pp;
pp               1837 dev/pci/if_sk.c 		u_int16_t *xp = (u_int16_t *)pp;
pp               1839 dev/pci/if_sk.c 		xp = (u_int16_t *)pp;
pp               1844 dev/pci/if_sk.c 		xp = (u_int16_t *)(pp + sizeof(struct ip));
pp               1847 dev/pci/if_sk.c 		pp += EVL_ENCAPLEN;
pp               1851 dev/pci/if_sk.c 	pp += sizeof(*eh);
pp               1854 dev/pci/if_sk.c 	ip = (struct ip *)pp;
pp               1889 dev/pci/if_sk.c 	pp += hlen;
pp               1893 dev/pci/if_sk.c 		struct udphdr *uh = (struct udphdr *)pp;
pp                125 dev/pci/if_wi_pci.c 	const struct wi_pci_product *pp;
pp                127 dev/pci/if_wi_pci.c 	for (pp = wi_pci_products; pp->pp_product != 0; pp++) {
pp                128 dev/pci/if_wi_pci.c 		if (PCI_VENDOR(pa->pa_id) == pp->pp_vendor && 
pp                129 dev/pci/if_wi_pci.c 		    PCI_PRODUCT(pa->pa_id) == pp->pp_product)
pp                130 dev/pci/if_wi_pci.c 			return (pp);
pp                148 dev/pci/if_wi_pci.c 	const struct wi_pci_product *pp;
pp                150 dev/pci/if_wi_pci.c 	pp = wi_pci_lookup(pa);
pp                151 dev/pci/if_wi_pci.c 	if (pp->pp_attach(pa, sc) != 0)
pp               1175 dev/pci/pciide.c 	const struct pciide_product_desc *pp;
pp               1188 dev/pci/pciide.c 	for (pp = vp->ide_products, i = 0; i < vp->ide_nproducts; pp++, i++)
pp               1189 dev/pci/pciide.c 		if (PCI_PRODUCT(id) == pp->ide_product)
pp               1194 dev/pci/pciide.c 	return (pp);
pp               1201 dev/pci/pciide.c 	const struct pciide_product_desc *pp;
pp               1215 dev/pci/pciide.c 	pp = pciide_lookup_product(pa->pa_id);
pp               1216 dev/pci/pciide.c 	if (pp  && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE))
pp               1236 dev/pci/pciide.c 			if (pp)
pp                 66 dev/pci/siop_pci.c 	const struct siop_product_desc *pp;
pp                 69 dev/pci/siop_pci.c 	pp = siop_lookup_product(pa->pa_id, PCI_REVISION(pa->pa_class));
pp                 70 dev/pci/siop_pci.c 	if (pp)
pp                196 dev/pci/siop_pci_common.c 	const struct siop_product_desc *pp;
pp                202 dev/pci/siop_pci_common.c 	for (pp = siop_products; pp->product != 0; pp++) {
pp                203 dev/pci/siop_pci_common.c 		if (PCI_PRODUCT(id) == pp->product && pp->revision <= rev)
pp                204 dev/pci/siop_pci_common.c 			if (rp == NULL || pp->revision > rp->revision)
pp                205 dev/pci/siop_pci_common.c 				rp = pp;
pp               1722 dev/pci/yds.c  	struct yds_dma **pp, *p;
pp               1724 dev/pci/yds.c  	for (pp = &sc->sc_dmas; (p = *pp) != NULL; pp = &p->next) {
pp               1727 dev/pci/yds.c  			*pp = p->next;
pp                333 dev/pcmcia/if_wi_pcmcia.c 	const struct wi_pcmcia_product *pp;
pp                341 dev/pcmcia/if_wi_pcmcia.c 	for (pp = wi_pcmcia_products; pp < epp; pp++) {
pp                343 dev/pcmcia/if_wi_pcmcia.c 		    pp->pp_cisinfo[0] != NULL &&
pp                344 dev/pcmcia/if_wi_pcmcia.c 		    strcmp(pa->card->cis1_info[0], pp->pp_cisinfo[0]) == 0 &&
pp                346 dev/pcmcia/if_wi_pcmcia.c 		    pp->pp_cisinfo[1] != NULL &&
pp                347 dev/pcmcia/if_wi_pcmcia.c 		    strcmp(pa->card->cis1_info[1], pp->pp_cisinfo[1]) == 0)
pp                348 dev/pcmcia/if_wi_pcmcia.c 			return (pp);
pp                352 dev/pcmcia/if_wi_pcmcia.c 	for (pp = wi_pcmcia_products; pp < epp; pp++) {
pp                354 dev/pcmcia/if_wi_pcmcia.c 		    pa->manufacturer == pp->pp_vendor &&
pp                356 dev/pcmcia/if_wi_pcmcia.c 		    pa->product == pp->pp_product)
pp                357 dev/pcmcia/if_wi_pcmcia.c 			return (pp);
pp               1716 dev/raidframe/rf_openbsdkintf.c 	struct partition *pp;
pp               1755 dev/raidframe/rf_openbsdkintf.c 			pp = &rs->sc_dkdev.dk_label->d_partitions[DISKPART(bp->b_dev)];
pp               1756 dev/raidframe/rf_openbsdkintf.c 			blocknum += DL_GETPOFFSET(pp);
pp               2132 dev/raidframe/rf_openbsdkintf.c 	struct partition *pp;
pp               2168 dev/raidframe/rf_openbsdkintf.c 		pp = &lp->d_partitions[i];
pp               2169 dev/raidframe/rf_openbsdkintf.c 		if (DL_GETPOFFSET(pp) + DL_GETPSIZE(pp) > rs->sc_size)
pp               1467 dev/sbus/cs4231.c 	struct cs_dma *p, **pp;
pp               1469 dev/sbus/cs4231.c 	for (pp = &sc->sc_dmas; (p = *pp) != NULL; pp = &(*pp)->next) {
pp               1476 dev/sbus/cs4231.c 		*pp = p->next;
pp                 70 dev/usb/usbf_subr.c usbf_realloc(void **pp, size_t *sizep, size_t newsize)
pp                 77 dev/usb/usbf_subr.c 			free(*pp, M_USB);
pp                 78 dev/usb/usbf_subr.c 		*pp = NULL;
pp                 89 dev/usb/usbf_subr.c 		bcopy(*pp, p, oldsize);
pp                 90 dev/usb/usbf_subr.c 	*pp = p;
pp                536 kern/exec_elf.c 	Elf_Phdr *ph, *pp;
pp                579 kern/exec_elf.c 		pp = &ph[i];
pp                580 kern/exec_elf.c 		if (pp->p_type == PT_INTERP) {
pp                581 kern/exec_elf.c 			if (pp->p_filesz >= MAXPATHLEN)
pp                585 kern/exec_elf.c 			    pp->p_offset, interp, pp->p_filesz)) != 0) {
pp                638 kern/exec_elf.c 		pp = &ph[i];
pp                702 kern/exec_elf.c 			phdr = pp->p_vaddr;
pp                297 kern/kern_exit.c 		struct proc *pp = p->p_pptr;
pp                304 kern/kern_exit.c 		if (LIST_EMPTY(&pp->p_children))
pp                305 kern/kern_exit.c 			wakeup(pp);
pp                402 kern/kern_proc.c 	struct proc *p, *pp;
pp                437 kern/kern_proc.c 		pp = p->p_pptr;
pp                453 kern/kern_proc.c 				    pp ? pp->p_pid : -1, p->p_pgrp->pg_id,
pp                219 kern/subr_disk.c 	struct partition *pp;
pp                295 kern/subr_disk.c 			pp = &dlp->d_partitions[i];
pp                296 kern/subr_disk.c 			pp->p_size = swap32(pp->p_size);
pp                297 kern/subr_disk.c 			pp->p_offset = swap32(pp->p_offset);
pp                299 kern/subr_disk.c 				v0pp = (struct __partitionv0 *)pp;
pp                302 kern/subr_disk.c 				pp->p_offseth = swap16(pp->p_offseth);
pp                303 kern/subr_disk.c 				pp->p_sizeh = swap16(pp->p_sizeh);
pp                305 kern/subr_disk.c 			pp->p_cpg = swap16(pp->p_cpg);
pp                329 kern/subr_disk.c 		pp = lp->d_partitions;
pp                330 kern/subr_disk.c 		for (i = 0; i < lp->d_npartitions; i++, pp++, v0pp++) {
pp                331 kern/subr_disk.c 			pp->p_fragblock = DISKLABELV1_FFS_FRAGBLOCK(v0pp->
pp                333 kern/subr_disk.c 			pp->p_offseth = 0;
pp                334 kern/subr_disk.c 			pp->p_sizeh = 0;
pp                442 kern/subr_disk.c 			struct partition *pp = &lp->d_partitions[8+n];
pp                453 kern/subr_disk.c 				DL_SETPOFFSET(pp,
pp                456 kern/subr_disk.c 			DL_SETPSIZE(pp, letoh32(dp2->dp_size));
pp                460 kern/subr_disk.c 				pp->p_fstype = FS_UNUSED;
pp                465 kern/subr_disk.c 				pp->p_fstype = FS_EXT2FS;
pp                475 kern/subr_disk.c 				pp->p_fstype = FS_MSDOS;
pp                488 kern/subr_disk.c 				pp->p_fstype = FS_OTHER;
pp                108 kern/subr_pool.c #define	POOL_NEEDS_CATCHUP(pp)						\
pp                109 kern/subr_pool.c 	((pp)->pr_nitems < (pp)->pr_minitems)
pp                198 kern/subr_pool.c pr_log(struct pool *pp, void *v, int action, const char *file, long line)
pp                200 kern/subr_pool.c 	int n = pp->pr_curlogentry;
pp                203 kern/subr_pool.c 	if ((pp->pr_roflags & PR_LOGGING) == 0)
pp                210 kern/subr_pool.c 	pl = &pp->pr_log[n];
pp                215 kern/subr_pool.c 	if (++n >= pp->pr_logsize)
pp                217 kern/subr_pool.c 	pp->pr_curlogentry = n;
pp                221 kern/subr_pool.c pr_printlog(struct pool *pp, struct pool_item *pi,
pp                224 kern/subr_pool.c 	int i = pp->pr_logsize;
pp                225 kern/subr_pool.c 	int n = pp->pr_curlogentry;
pp                227 kern/subr_pool.c 	if ((pp->pr_roflags & PR_LOGGING) == 0)
pp                234 kern/subr_pool.c 		struct pool_log *pl = &pp->pr_log[n];
pp                245 kern/subr_pool.c 		if (++n >= pp->pr_logsize)
pp                251 kern/subr_pool.c pr_enter(struct pool *pp, const char *file, long line)
pp                254 kern/subr_pool.c 	if (__predict_false(pp->pr_entered_file != NULL)) {
pp                256 kern/subr_pool.c 		    pp->pr_wchan, file, line);
pp                258 kern/subr_pool.c 		    pp->pr_entered_file, pp->pr_entered_line);
pp                262 kern/subr_pool.c 	pp->pr_entered_file = file;
pp                263 kern/subr_pool.c 	pp->pr_entered_line = line;
pp                267 kern/subr_pool.c pr_leave(struct pool *pp)
pp                270 kern/subr_pool.c 	if (__predict_false(pp->pr_entered_file == NULL)) {
pp                271 kern/subr_pool.c 		printf("pool %s not entered?\n", pp->pr_wchan);
pp                275 kern/subr_pool.c 	pp->pr_entered_file = NULL;
pp                276 kern/subr_pool.c 	pp->pr_entered_line = 0;
pp                280 kern/subr_pool.c pr_enter_check(struct pool *pp, int (*pr)(const char *, ...))
pp                283 kern/subr_pool.c 	if (pp->pr_entered_file != NULL)
pp                285 kern/subr_pool.c 		    pp->pr_entered_file, pp->pr_entered_line);
pp                288 kern/subr_pool.c #define	pr_log(pp, v, action, file, line)
pp                289 kern/subr_pool.c #define	pr_printlog(pp, pi, pr)
pp                290 kern/subr_pool.c #define	pr_enter(pp, file, line)
pp                291 kern/subr_pool.c #define	pr_leave(pp)
pp                292 kern/subr_pool.c #define	pr_enter_check(pp, pr)
pp                313 kern/subr_pool.c pr_find_pagehead(struct pool *pp, caddr_t page)
pp                317 kern/subr_pool.c 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
pp                318 kern/subr_pool.c 		return ((struct pool_item_header *)(page + pp->pr_phoffset));
pp                321 kern/subr_pool.c 	ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
pp                329 kern/subr_pool.c pr_rmpage(struct pool *pp, struct pool_item_header *ph,
pp                339 kern/subr_pool.c 		if (pp->pr_nidle == 0)
pp                341 kern/subr_pool.c 		if (pp->pr_nitems < pp->pr_itemsperpage)
pp                344 kern/subr_pool.c 		pp->pr_nidle--;
pp                347 kern/subr_pool.c 	pp->pr_nitems -= pp->pr_itemsperpage;
pp                356 kern/subr_pool.c 		pool_allocator_free(pp, ph->ph_page);
pp                357 kern/subr_pool.c 		if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
pp                358 kern/subr_pool.c 			SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
pp                364 kern/subr_pool.c 	pp->pr_npages--;
pp                365 kern/subr_pool.c 	pp->pr_npagefree++;
pp                367 kern/subr_pool.c 	pool_update_curpage(pp);
pp                377 kern/subr_pool.c pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
pp                427 kern/subr_pool.c 	LIST_INIT(&pp->pr_emptypages);
pp                428 kern/subr_pool.c 	LIST_INIT(&pp->pr_fullpages);
pp                429 kern/subr_pool.c 	LIST_INIT(&pp->pr_partpages);
pp                430 kern/subr_pool.c 	TAILQ_INIT(&pp->pr_cachelist);
pp                431 kern/subr_pool.c 	pp->pr_curpage = NULL;
pp                432 kern/subr_pool.c 	pp->pr_npages = 0;
pp                433 kern/subr_pool.c 	pp->pr_minitems = 0;
pp                434 kern/subr_pool.c 	pp->pr_minpages = 0;
pp                435 kern/subr_pool.c 	pp->pr_maxpages = 8;
pp                436 kern/subr_pool.c 	pp->pr_roflags = flags;
pp                437 kern/subr_pool.c 	pp->pr_flags = 0;
pp                438 kern/subr_pool.c 	pp->pr_size = size;
pp                439 kern/subr_pool.c 	pp->pr_align = align;
pp                440 kern/subr_pool.c 	pp->pr_wchan = wchan;
pp                441 kern/subr_pool.c 	pp->pr_alloc = palloc;
pp                442 kern/subr_pool.c 	pp->pr_nitems = 0;
pp                443 kern/subr_pool.c 	pp->pr_nout = 0;
pp                444 kern/subr_pool.c 	pp->pr_hardlimit = UINT_MAX;
pp                445 kern/subr_pool.c 	pp->pr_hardlimit_warning = NULL;
pp                446 kern/subr_pool.c 	pp->pr_hardlimit_ratecap.tv_sec = 0;
pp                447 kern/subr_pool.c 	pp->pr_hardlimit_ratecap.tv_usec = 0;
pp                448 kern/subr_pool.c 	pp->pr_hardlimit_warning_last.tv_sec = 0;
pp                449 kern/subr_pool.c 	pp->pr_hardlimit_warning_last.tv_usec = 0;
pp                450 kern/subr_pool.c 	pp->pr_serial = ++pool_serial;
pp                461 kern/subr_pool.c 	if (pp->pr_size < palloc->pa_pagesz/16) {
pp                463 kern/subr_pool.c 		pp->pr_roflags |= PR_PHINPAGE;
pp                464 kern/subr_pool.c 		pp->pr_phoffset = off = palloc->pa_pagesz -
pp                468 kern/subr_pool.c 		pp->pr_phoffset = 0;
pp                470 kern/subr_pool.c 		SPLAY_INIT(&pp->pr_phtree);
pp                480 kern/subr_pool.c 	pp->pr_itemoffset = ioff = ioff % align;
pp                481 kern/subr_pool.c 	pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
pp                482 kern/subr_pool.c 	KASSERT(pp->pr_itemsperpage != 0);
pp                488 kern/subr_pool.c 	slack = off - pp->pr_itemsperpage * pp->pr_size;
pp                489 kern/subr_pool.c 	pp->pr_maxcolor = (slack / align) * align;
pp                490 kern/subr_pool.c 	pp->pr_curcolor = 0;
pp                492 kern/subr_pool.c 	pp->pr_nget = 0;
pp                493 kern/subr_pool.c 	pp->pr_nfail = 0;
pp                494 kern/subr_pool.c 	pp->pr_nput = 0;
pp                495 kern/subr_pool.c 	pp->pr_npagealloc = 0;
pp                496 kern/subr_pool.c 	pp->pr_npagefree = 0;
pp                497 kern/subr_pool.c 	pp->pr_hiwat = 0;
pp                498 kern/subr_pool.c 	pp->pr_nidle = 0;
pp                503 kern/subr_pool.c 		    (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
pp                505 kern/subr_pool.c 			pp->pr_roflags &= ~PR_LOGGING;
pp                506 kern/subr_pool.c 		pp->pr_curlogentry = 0;
pp                507 kern/subr_pool.c 		pp->pr_logsize = pool_logsize;
pp                511 kern/subr_pool.c 	pp->pr_entered_file = NULL;
pp                512 kern/subr_pool.c 	pp->pr_entered_line = 0;
pp                514 kern/subr_pool.c 	simple_lock_init(&pp->pr_slock);
pp                516 kern/subr_pool.c 	pp->pr_ipl = -1;
pp                534 kern/subr_pool.c 	TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
pp                539 kern/subr_pool.c 	TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
pp                545 kern/subr_pool.c pool_setipl(struct pool *pp, int ipl)
pp                547 kern/subr_pool.c 	pp->pr_ipl = ipl;
pp                555 kern/subr_pool.c pool_destroy(struct pool *pp)
pp                561 kern/subr_pool.c 	simple_lock(&pp->pr_alloc->pa_slock);
pp                562 kern/subr_pool.c 	TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
pp                563 kern/subr_pool.c 	simple_unlock(&pp->pr_alloc->pa_slock);
pp                566 kern/subr_pool.c 	while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL)
pp                570 kern/subr_pool.c 	if (pp->pr_nout != 0) {
pp                571 kern/subr_pool.c 		pr_printlog(pp, NULL, printf);
pp                573 kern/subr_pool.c 		    pp->pr_nout);
pp                578 kern/subr_pool.c 	while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
pp                579 kern/subr_pool.c 		pr_rmpage(pp, ph, NULL);
pp                580 kern/subr_pool.c 	KASSERT(LIST_EMPTY(&pp->pr_fullpages));
pp                581 kern/subr_pool.c 	KASSERT(LIST_EMPTY(&pp->pr_partpages));
pp                585 kern/subr_pool.c 	TAILQ_REMOVE(&pool_head, pp, pr_poollist);
pp                589 kern/subr_pool.c 	if ((pp->pr_roflags & PR_LOGGING) != 0)
pp                590 kern/subr_pool.c 		free(pp->pr_log, M_TEMP);
pp                595 kern/subr_pool.c pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)
pp                600 kern/subr_pool.c 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0);
pp                602 kern/subr_pool.c 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
pp                603 kern/subr_pool.c 		ph = (struct pool_item_header *) (storage + pp->pr_phoffset);
pp                618 kern/subr_pool.c _pool_get(struct pool *pp, int flags, const char *file, long line)
pp                620 kern/subr_pool.c pool_get(struct pool *pp, int flags)
pp                630 kern/subr_pool.c 	if (pp->pr_ipl != -1)
pp                631 kern/subr_pool.c 		splassert(pp->pr_ipl);
pp                634 kern/subr_pool.c 		panic("pool_get: %s:must have NOWAIT", pp->pr_wchan);
pp                643 kern/subr_pool.c 	if (pp->pr_roflags & PR_DEBUG) {
pp                647 kern/subr_pool.c 		debug_malloc(pp->pr_size, M_DEBUG,
pp                653 kern/subr_pool.c 	simple_lock(&pp->pr_slock);
pp                654 kern/subr_pool.c 	pr_enter(pp, file, line);
pp                663 kern/subr_pool.c 	if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
pp                664 kern/subr_pool.c 		pr_leave(pp);
pp                665 kern/subr_pool.c 		simple_unlock(&pp->pr_slock);
pp                666 kern/subr_pool.c 		panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
pp                669 kern/subr_pool.c 	if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
pp                675 kern/subr_pool.c 			pp->pr_flags |= PR_WANTED;
pp                676 kern/subr_pool.c 			pr_leave(pp);
pp                677 kern/subr_pool.c 			ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
pp                678 kern/subr_pool.c 			pr_enter(pp, file, line);
pp                685 kern/subr_pool.c 		if (pp->pr_hardlimit_warning != NULL &&
pp                686 kern/subr_pool.c 		    ratecheck(&pp->pr_hardlimit_warning_last,
pp                687 kern/subr_pool.c 			      &pp->pr_hardlimit_ratecap))
pp                688 kern/subr_pool.c 			log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
pp                690 kern/subr_pool.c 		pp->pr_nfail++;
pp                692 kern/subr_pool.c 		pr_leave(pp);
pp                693 kern/subr_pool.c 		simple_unlock(&pp->pr_slock);
pp                703 kern/subr_pool.c 	if ((ph = pp->pr_curpage) == NULL) {
pp                705 kern/subr_pool.c 		if (pp->pr_nitems != 0) {
pp                706 kern/subr_pool.c 			simple_unlock(&pp->pr_slock);
pp                708 kern/subr_pool.c 			    pp->pr_wchan, pp->pr_nitems);
pp                718 kern/subr_pool.c 		pr_leave(pp);
pp                719 kern/subr_pool.c 		simple_unlock(&pp->pr_slock);
pp                720 kern/subr_pool.c 		v = pool_allocator_alloc(pp, flags);
pp                722 kern/subr_pool.c 			ph = pool_alloc_item_header(pp, v, flags);
pp                723 kern/subr_pool.c 		simple_lock(&pp->pr_slock);
pp                724 kern/subr_pool.c 		pr_enter(pp, file, line);
pp                728 kern/subr_pool.c 				pool_allocator_free(pp, v);
pp                736 kern/subr_pool.c 			if (pp->pr_curpage != NULL)
pp                740 kern/subr_pool.c 				pp->pr_nfail++;
pp                741 kern/subr_pool.c 				pr_leave(pp);
pp                742 kern/subr_pool.c 				simple_unlock(&pp->pr_slock);
pp                752 kern/subr_pool.c 			pp->pr_flags |= PR_WANTED;
pp                754 kern/subr_pool.c 			pr_leave(pp);
pp                755 kern/subr_pool.c 			ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
pp                756 kern/subr_pool.c 			pr_enter(pp, file, line);
pp                761 kern/subr_pool.c 		pool_prime_page(pp, v, ph);
pp                762 kern/subr_pool.c 		pp->pr_npagealloc++;
pp                768 kern/subr_pool.c 		pr_leave(pp);
pp                769 kern/subr_pool.c 		simple_unlock(&pp->pr_slock);
pp                770 kern/subr_pool.c 		panic("pool_get: %s: page empty", pp->pr_wchan);
pp                773 kern/subr_pool.c 	if (__predict_false(pp->pr_nitems == 0)) {
pp                774 kern/subr_pool.c 		pr_leave(pp);
pp                775 kern/subr_pool.c 		simple_unlock(&pp->pr_slock);
pp                777 kern/subr_pool.c 		    pp->pr_wchan, pp->pr_nitems);
pp                783 kern/subr_pool.c 	pr_log(pp, v, PRLOG_GET, file, line);
pp                788 kern/subr_pool.c 		pr_printlog(pp, pi, printf);
pp                791 kern/subr_pool.c 			pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
pp                799 kern/subr_pool.c 	pp->pr_nitems--;
pp                800 kern/subr_pool.c 	pp->pr_nout++;
pp                803 kern/subr_pool.c 		if (__predict_false(pp->pr_nidle == 0))
pp                806 kern/subr_pool.c 		pp->pr_nidle--;
pp                813 kern/subr_pool.c 		LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
pp                818 kern/subr_pool.c 		if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) {
pp                819 kern/subr_pool.c 			pr_leave(pp);
pp                820 kern/subr_pool.c 			simple_unlock(&pp->pr_slock);
pp                822 kern/subr_pool.c 			    pp->pr_wchan);
pp                830 kern/subr_pool.c 		LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
pp                831 kern/subr_pool.c 		pool_update_curpage(pp);
pp                834 kern/subr_pool.c 	pp->pr_nget++;
pp                840 kern/subr_pool.c 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
pp                848 kern/subr_pool.c 	pr_leave(pp);
pp                849 kern/subr_pool.c 	simple_unlock(&pp->pr_slock);
pp                857 kern/subr_pool.c pool_do_put(struct pool *pp, void *v)
pp                864 kern/subr_pool.c 	if (pp->pr_roflags & PR_DEBUG) {
pp                870 kern/subr_pool.c 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
pp                872 kern/subr_pool.c 	page = (caddr_t)((vaddr_t)v & pp->pr_alloc->pa_pagemask);
pp                875 kern/subr_pool.c 	if (pp->pr_ipl != -1)
pp                876 kern/subr_pool.c 		splassert(pp->pr_ipl);
pp                878 kern/subr_pool.c 	if (__predict_false(pp->pr_nout == 0)) {
pp                880 kern/subr_pool.c 		    pp->pr_wchan);
pp                885 kern/subr_pool.c 	if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
pp                886 kern/subr_pool.c 		pr_printlog(pp, NULL, printf);
pp                887 kern/subr_pool.c 		panic("pool_put: %s: page header missing", pp->pr_wchan);
pp                894 kern/subr_pool.c 	simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
pp                907 kern/subr_pool.c 		for (i = 0; i < pp->pr_size / sizeof(int); i++) {
pp                915 kern/subr_pool.c 	pp->pr_nput++;
pp                916 kern/subr_pool.c 	pp->pr_nitems++;
pp                917 kern/subr_pool.c 	pp->pr_nout--;
pp                920 kern/subr_pool.c 	if (pp->pr_curpage == NULL)
pp                921 kern/subr_pool.c 		pp->pr_curpage = ph;
pp                923 kern/subr_pool.c 	if (pp->pr_flags & PR_WANTED) {
pp                924 kern/subr_pool.c 		pp->pr_flags &= ~PR_WANTED;
pp                926 kern/subr_pool.c 			pp->pr_nidle++;
pp                927 kern/subr_pool.c 		wakeup(pp);
pp                943 kern/subr_pool.c 		pp->pr_nidle++;
pp                944 kern/subr_pool.c 		if (pp->pr_nidle > pp->pr_maxpages ||
pp                945 kern/subr_pool.c 		    (pp->pr_alloc->pa_flags & PA_WANT) != 0) {
pp                946 kern/subr_pool.c 			pr_rmpage(pp, ph, NULL);
pp                949 kern/subr_pool.c 			LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
pp                951 kern/subr_pool.c 		pool_update_curpage(pp);
pp                960 kern/subr_pool.c 	else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
pp                962 kern/subr_pool.c 		LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
pp                963 kern/subr_pool.c 		pp->pr_curpage = ph;
pp                972 kern/subr_pool.c _pool_put(struct pool *pp, void *v, const char *file, long line)
pp                975 kern/subr_pool.c 	simple_lock(&pp->pr_slock);
pp                976 kern/subr_pool.c 	pr_enter(pp, file, line);
pp                978 kern/subr_pool.c 	pr_log(pp, v, PRLOG_PUT, file, line);
pp                980 kern/subr_pool.c 	pool_do_put(pp, v);
pp                982 kern/subr_pool.c 	pr_leave(pp);
pp                983 kern/subr_pool.c 	simple_unlock(&pp->pr_slock);
pp                989 kern/subr_pool.c pool_put(struct pool *pp, void *v)
pp                992 kern/subr_pool.c 	simple_lock(&pp->pr_slock);
pp                994 kern/subr_pool.c 	pool_do_put(pp, v);
pp                996 kern/subr_pool.c 	simple_unlock(&pp->pr_slock);
pp               1007 kern/subr_pool.c pool_prime(struct pool *pp, int n)
pp               1013 kern/subr_pool.c 	simple_lock(&pp->pr_slock);
pp               1015 kern/subr_pool.c 	newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
pp               1018 kern/subr_pool.c 		simple_unlock(&pp->pr_slock);
pp               1019 kern/subr_pool.c 		cp = pool_allocator_alloc(pp, PR_NOWAIT);
pp               1021 kern/subr_pool.c 			ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
pp               1022 kern/subr_pool.c 		simple_lock(&pp->pr_slock);
pp               1026 kern/subr_pool.c 				pool_allocator_free(pp, cp);
pp               1030 kern/subr_pool.c 		pool_prime_page(pp, cp, ph);
pp               1031 kern/subr_pool.c 		pp->pr_npagealloc++;
pp               1032 kern/subr_pool.c 		pp->pr_minpages++;
pp               1035 kern/subr_pool.c 	if (pp->pr_minpages >= pp->pr_maxpages)
pp               1036 kern/subr_pool.c 		pp->pr_maxpages = pp->pr_minpages + 1;	/* XXX */
pp               1038 kern/subr_pool.c 	simple_unlock(&pp->pr_slock);
pp               1048 kern/subr_pool.c pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
pp               1052 kern/subr_pool.c 	unsigned int align = pp->pr_align;
pp               1053 kern/subr_pool.c 	unsigned int ioff = pp->pr_itemoffset;
pp               1057 kern/subr_pool.c 	if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
pp               1058 kern/subr_pool.c 		panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
pp               1064 kern/subr_pool.c 	LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
pp               1068 kern/subr_pool.c 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
pp               1069 kern/subr_pool.c 		SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
pp               1071 kern/subr_pool.c 	pp->pr_nidle++;
pp               1076 kern/subr_pool.c 	cp = (caddr_t)(cp + pp->pr_curcolor);
pp               1077 kern/subr_pool.c 	if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
pp               1078 kern/subr_pool.c 		pp->pr_curcolor = 0;
pp               1089 kern/subr_pool.c 	n = pp->pr_itemsperpage;
pp               1090 kern/subr_pool.c 	pp->pr_nitems += n;
pp               1102 kern/subr_pool.c 		cp = (caddr_t)(cp + pp->pr_size);
pp               1108 kern/subr_pool.c 	if (pp->pr_curpage == NULL)
pp               1109 kern/subr_pool.c 		pp->pr_curpage = ph;
pp               1111 kern/subr_pool.c 	if (++pp->pr_npages > pp->pr_hiwat)
pp               1112 kern/subr_pool.c 		pp->pr_hiwat = pp->pr_npages;
pp               1125 kern/subr_pool.c pool_catchup(struct pool *pp)
pp               1131 kern/subr_pool.c 	while (POOL_NEEDS_CATCHUP(pp)) {
pp               1138 kern/subr_pool.c 		simple_unlock(&pp->pr_slock);
pp               1139 kern/subr_pool.c 		cp = pool_allocator_alloc(pp, PR_NOWAIT);
pp               1141 kern/subr_pool.c 			ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
pp               1142 kern/subr_pool.c 		simple_lock(&pp->pr_slock);
pp               1145 kern/subr_pool.c 				pool_allocator_free(pp, cp);
pp               1149 kern/subr_pool.c 		pool_prime_page(pp, cp, ph);
pp               1150 kern/subr_pool.c 		pp->pr_npagealloc++;
pp               1157 kern/subr_pool.c pool_update_curpage(struct pool *pp)
pp               1160 kern/subr_pool.c 	pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
pp               1161 kern/subr_pool.c 	if (pp->pr_curpage == NULL) {
pp               1162 kern/subr_pool.c 		pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
pp               1167 kern/subr_pool.c pool_setlowat(struct pool *pp, int n)
pp               1170 kern/subr_pool.c 	simple_lock(&pp->pr_slock);
pp               1172 kern/subr_pool.c 	pp->pr_minitems = n;
pp               1173 kern/subr_pool.c 	pp->pr_minpages = (n == 0)
pp               1175 kern/subr_pool.c 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
pp               1178 kern/subr_pool.c 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
pp               1186 kern/subr_pool.c 	simple_unlock(&pp->pr_slock);
pp               1190 kern/subr_pool.c pool_sethiwat(struct pool *pp, int n)
pp               1193 kern/subr_pool.c 	simple_lock(&pp->pr_slock);
pp               1195 kern/subr_pool.c 	pp->pr_maxpages = (n == 0)
pp               1197 kern/subr_pool.c 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
pp               1199 kern/subr_pool.c 	simple_unlock(&pp->pr_slock);
pp               1203 kern/subr_pool.c pool_sethardlimit(struct pool *pp, unsigned n, const char *warnmess, int ratecap)
pp               1207 kern/subr_pool.c 	simple_lock(&pp->pr_slock);
pp               1209 kern/subr_pool.c 	if (n < pp->pr_nout) {
pp               1214 kern/subr_pool.c 	pp->pr_hardlimit = n;
pp               1215 kern/subr_pool.c 	pp->pr_hardlimit_warning = warnmess;
pp               1216 kern/subr_pool.c 	pp->pr_hardlimit_ratecap.tv_sec = ratecap;
pp               1217 kern/subr_pool.c 	pp->pr_hardlimit_warning_last.tv_sec = 0;
pp               1218 kern/subr_pool.c 	pp->pr_hardlimit_warning_last.tv_usec = 0;
pp               1224 kern/subr_pool.c 	pp->pr_maxpages = (n == 0 || n == UINT_MAX)
pp               1226 kern/subr_pool.c 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
pp               1229 kern/subr_pool.c 	simple_unlock(&pp->pr_slock);
pp               1241 kern/subr_pool.c _pool_reclaim(struct pool *pp, const char *file, long line)
pp               1243 kern/subr_pool.c pool_reclaim(struct pool *pp)
pp               1251 kern/subr_pool.c 	if (simple_lock_try(&pp->pr_slock) == 0)
pp               1253 kern/subr_pool.c 	pr_enter(pp, file, line);
pp               1260 kern/subr_pool.c 	TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
pp               1263 kern/subr_pool.c 	for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
pp               1267 kern/subr_pool.c 		if (pp->pr_npages <= pp->pr_minpages)
pp               1276 kern/subr_pool.c 		if ((pp->pr_nitems - pp->pr_itemsperpage) <
pp               1277 kern/subr_pool.c 		    pp->pr_minitems)
pp               1280 kern/subr_pool.c 		pr_rmpage(pp, ph, &pq);
pp               1283 kern/subr_pool.c 	pr_leave(pp);
pp               1284 kern/subr_pool.c 	simple_unlock(&pp->pr_slock);
pp               1289 kern/subr_pool.c 		pool_allocator_free(pp, ph->ph_page);
pp               1290 kern/subr_pool.c 		if (pp->pr_roflags & PR_PHINPAGE) {
pp               1293 kern/subr_pool.c 		SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
pp               1311 kern/subr_pool.c pool_printit(struct pool *pp, const char *modif, int (*pr)(const char *, ...))
pp               1316 kern/subr_pool.c 	if (simple_lock_try(&pp->pr_slock) == 0) {
pp               1318 kern/subr_pool.c 		    pp->pr_wchan);
pp               1322 kern/subr_pool.c 	pool_print1(pp, modif, pr);
pp               1323 kern/subr_pool.c 	simple_unlock(&pp->pr_slock);
pp               1350 kern/subr_pool.c pool_print1(struct pool *pp, const char *modif, int (*pr)(const char *, ...))
pp               1369 kern/subr_pool.c 	    pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
pp               1370 kern/subr_pool.c 	    pp->pr_roflags);
pp               1371 kern/subr_pool.c 	(*pr)("\talloc %p\n", pp->pr_alloc);
pp               1373 kern/subr_pool.c 	    pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
pp               1375 kern/subr_pool.c 	    pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
pp               1378 kern/subr_pool.c 	    pp->pr_nget, pp->pr_nfail, pp->pr_nput);
pp               1380 kern/subr_pool.c 	    pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
pp               1385 kern/subr_pool.c 	if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
pp               1387 kern/subr_pool.c 	pool_print_pagelist(&pp->pr_emptypages, pr);
pp               1388 kern/subr_pool.c 	if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
pp               1390 kern/subr_pool.c 	pool_print_pagelist(&pp->pr_fullpages, pr);
pp               1391 kern/subr_pool.c 	if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
pp               1393 kern/subr_pool.c 	pool_print_pagelist(&pp->pr_partpages, pr);
pp               1395 kern/subr_pool.c 	if (pp->pr_curpage == NULL)
pp               1398 kern/subr_pool.c 		(*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
pp               1405 kern/subr_pool.c 	if ((pp->pr_roflags & PR_LOGGING) == 0)
pp               1408 kern/subr_pool.c 		pr_printlog(pp, NULL, pr);
pp               1414 kern/subr_pool.c 	TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
pp               1427 kern/subr_pool.c 	pr_enter_check(pp, pr);
pp               1433 kern/subr_pool.c 	struct pool *pp;
pp               1462 kern/subr_pool.c 	TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
pp               1464 kern/subr_pool.c 			db_printf("%-10s %18p %18p\n", pp->pr_wchan, pp,
pp               1465 kern/subr_pool.c 			    pp->pr_alloc);
pp               1469 kern/subr_pool.c 		if (!pp->pr_nget)
pp               1472 kern/subr_pool.c 		if (pp->pr_maxpages == UINT_MAX)
pp               1475 kern/subr_pool.c 			snprintf(maxp, sizeof maxp, "%u", pp->pr_maxpages);
pp               1487 kern/subr_pool.c 		PRWORD(ovflw, "%-*s", 10, 0, pp->pr_wchan);
pp               1488 kern/subr_pool.c 		PRWORD(ovflw, " %*u", 4, 1, pp->pr_size);
pp               1489 kern/subr_pool.c 		PRWORD(ovflw, " %*lu", 9, 1, pp->pr_nget);
pp               1490 kern/subr_pool.c 		PRWORD(ovflw, " %*lu", 5, 1, pp->pr_nfail);
pp               1491 kern/subr_pool.c 		PRWORD(ovflw, " %*lu", 9, 1, pp->pr_nput);
pp               1492 kern/subr_pool.c 		PRWORD(ovflw, " %*lu", 6, 1, pp->pr_npagealloc);
pp               1493 kern/subr_pool.c 		PRWORD(ovflw, " %*lu", 6, 1, pp->pr_npagefree);
pp               1494 kern/subr_pool.c 		PRWORD(ovflw, " %*d", 6, 1, pp->pr_npages);
pp               1495 kern/subr_pool.c 		PRWORD(ovflw, " %*d", 6, 1, pp->pr_hiwat);
pp               1496 kern/subr_pool.c 		PRWORD(ovflw, " %*d", 6, 1, pp->pr_minpages);
pp               1498 kern/subr_pool.c 		PRWORD(ovflw, " %*lu\n", 5, 1, pp->pr_nidle);
pp               1503 kern/subr_pool.c pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
pp               1509 kern/subr_pool.c 	page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask);
pp               1511 kern/subr_pool.c 	    (pp->pr_roflags & PR_PHINPAGE) != 0) {
pp               1515 kern/subr_pool.c 		       " at page head addr %p (p %p)\n", pp,
pp               1516 kern/subr_pool.c 			pp->pr_wchan, ph->ph_page,
pp               1532 kern/subr_pool.c 				pp->pr_wchan, pi->pi_magic, ph->ph_page,
pp               1538 kern/subr_pool.c 		    (caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask);
pp               1545 kern/subr_pool.c 		       " item ordinal %d; addr %p (p %p)\n", pp,
pp               1546 kern/subr_pool.c 			pp->pr_wchan, ph->ph_page,
pp               1554 kern/subr_pool.c pool_chk(struct pool *pp, const char *label)
pp               1559 kern/subr_pool.c 	simple_lock(&pp->pr_slock);
pp               1560 kern/subr_pool.c 	LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
pp               1561 kern/subr_pool.c 		r = pool_chk_page(pp, label, ph);
pp               1566 kern/subr_pool.c 	LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
pp               1567 kern/subr_pool.c 		r = pool_chk_page(pp, label, ph);
pp               1572 kern/subr_pool.c 	LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
pp               1573 kern/subr_pool.c 		r = pool_chk_page(pp, label, ph);
pp               1580 kern/subr_pool.c 	simple_unlock(&pp->pr_slock);
pp               1594 kern/subr_pool.c pool_cache_init(struct pool_cache *pc, struct pool *pp,
pp               1605 kern/subr_pool.c 	pc->pc_pool = pp;
pp               1618 kern/subr_pool.c 	simple_lock(&pp->pr_slock);
pp               1619 kern/subr_pool.c 	TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist);
pp               1620 kern/subr_pool.c 	simple_unlock(&pp->pr_slock);
pp               1631 kern/subr_pool.c 	struct pool *pp = pc->pc_pool;
pp               1637 kern/subr_pool.c 	simple_lock(&pp->pr_slock);
pp               1638 kern/subr_pool.c 	TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist);
pp               1639 kern/subr_pool.c 	simple_unlock(&pp->pr_slock);
pp               1876 kern/subr_pool.c 	struct pool *pp, *foundpool = NULL;
pp               1905 kern/subr_pool.c 	TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
pp               1907 kern/subr_pool.c 		if (lookfor == pp->pr_serial) {
pp               1908 kern/subr_pool.c 			foundpool = pp;
pp               1971 kern/subr_pool.c pool_allocator_alloc(struct pool *pp, int flags)
pp               1974 kern/subr_pool.c 	return (pp->pr_alloc->pa_alloc(pp, flags));
pp               1978 kern/subr_pool.c pool_allocator_free(struct pool *pp, void *v)
pp               1980 kern/subr_pool.c 	struct pool_allocator *pa = pp->pr_alloc;
pp               1983 kern/subr_pool.c 	(*pa->pa_free)(pp, v);
pp               1993 kern/subr_pool.c 	TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
pp               1994 kern/subr_pool.c 		simple_lock(&pp->pr_slock);
pp               1995 kern/subr_pool.c 		if ((pp->pr_flags & PR_WANTED) != 0) {
pp               1996 kern/subr_pool.c 			pp->pr_flags &= ~PR_WANTED;
pp               1997 kern/subr_pool.c 			wakeup(pp);
pp               1999 kern/subr_pool.c 		simple_unlock(&pp->pr_slock);
pp               2007 kern/subr_pool.c pool_page_alloc(struct pool *pp, int flags)
pp               2015 kern/subr_pool.c pool_page_free(struct pool *pp, void *v)
pp               2022 kern/subr_pool.c pool_page_alloc_oldnointr(struct pool *pp, int flags)
pp               2033 kern/subr_pool.c pool_page_free_oldnointr(struct pool *pp, void *v)
pp                368 kern/sys_process.c 			struct proc *pp;
pp                370 kern/sys_process.c 			pp = pfind(t->p_oppid);
pp                371 kern/sys_process.c 			proc_reparent(t, pp ? pp : initproc);
pp                 73 lib/libsa/cd9660.c #define	PTSIZE(pp)	roundup(PTFIXSZ + isonum_711((pp)->namlen), 2)
pp                 78 lib/libsa/cd9660.c pnmatch(char *path, struct ptable_ent *pp)
pp                 83 lib/libsa/cd9660.c 	cp = pp->name;
pp                 84 lib/libsa/cd9660.c 	for (i = isonum_711(pp->namlen); --i >= 0; path++, cp++) {
pp                139 lib/libsa/cd9660.c 	struct ptable_ent *pp;
pp                188 lib/libsa/cd9660.c 	pp = (struct ptable_ent *)buf;
pp                190 lib/libsa/cd9660.c 	bno = isonum_732(pp->block) + isonum_711(pp->extlen);
pp                200 lib/libsa/cd9660.c 		if ((void *)pp >= buf + psize)
pp                202 lib/libsa/cd9660.c 		if (isonum_722(pp->parent) != parent)
pp                204 lib/libsa/cd9660.c 		if (!pnmatch(path, pp)) {
pp                205 lib/libsa/cd9660.c 			pp = (struct ptable_ent *)((void *)pp + PTSIZE(pp));
pp                209 lib/libsa/cd9660.c 		path += isonum_711(pp->namlen) + 1;
pp                211 lib/libsa/cd9660.c 		bno = isonum_732(pp->block) + isonum_711(pp->extlen);
pp                212 lib/libsa/cd9660.c 		while ((void *)pp < buf + psize) {
pp                213 lib/libsa/cd9660.c 			if (isonum_722(pp->parent) == parent)
pp                215 lib/libsa/cd9660.c 			pp = (struct ptable_ent *)((void *)pp + PTSIZE(pp));
pp                331 lib/libsa/net.c 	u_int *pp = parts;
pp                355 lib/libsa/net.c 			if (pp >= parts + 3 || val > 0xff)
pp                357 lib/libsa/net.c 			*pp++ = val, cp++;
pp                371 lib/libsa/net.c 	n = pp - parts + 1;
pp                191 miscfs/procfs/procfs_ctl.c 			struct proc *pp;
pp                193 miscfs/procfs/procfs_ctl.c 			pp = pfind(p->p_oppid);
pp                194 miscfs/procfs/procfs_ctl.c 			if (pp)
pp                195 miscfs/procfs/procfs_ctl.c 				proc_reparent(p, pp);
pp                160 net/pf_ioctl.c 	pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
pp               1875 net/pf_ioctl.c 		    pf_pool_limits[pl->index].pp == NULL) {
pp               1879 net/pf_ioctl.c 		if (pool_sethardlimit(pf_pool_limits[pl->index].pp,
pp               2061 net/pf_ioctl.c 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
pp               2064 net/pf_ioctl.c 		pp->ticket = ++ticket_pabuf;
pp               2069 net/pf_ioctl.c 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
pp               2071 net/pf_ioctl.c 		if (pp->ticket != ticket_pabuf) {
pp               2076 net/pf_ioctl.c 		if (pp->af == AF_INET) {
pp               2082 net/pf_ioctl.c 		if (pp->af == AF_INET6) {
pp               2087 net/pf_ioctl.c 		if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
pp               2088 net/pf_ioctl.c 		    pp->addr.addr.type != PF_ADDR_DYNIFTL &&
pp               2089 net/pf_ioctl.c 		    pp->addr.addr.type != PF_ADDR_TABLE) {
pp               2098 net/pf_ioctl.c 		bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr));
pp               2108 net/pf_ioctl.c 		if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
pp               2120 net/pf_ioctl.c 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
pp               2122 net/pf_ioctl.c 		pp->nr = 0;
pp               2123 net/pf_ioctl.c 		pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
pp               2124 net/pf_ioctl.c 		    pp->r_num, 0, 1, 0);
pp               2130 net/pf_ioctl.c 			pp->nr++;
pp               2135 net/pf_ioctl.c 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
pp               2138 net/pf_ioctl.c 		pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
pp               2139 net/pf_ioctl.c 		    pp->r_num, 0, 1, 1);
pp               2145 net/pf_ioctl.c 		while ((pa != NULL) && (nr < pp->nr)) {
pp               2153 net/pf_ioctl.c 		bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr));
pp               2154 net/pf_ioctl.c 		pfi_dynaddr_copyout(&pp->addr.addr);
pp               2155 net/pf_ioctl.c 		pf_tbladdr_copyout(&pp->addr.addr);
pp               2156 net/pf_ioctl.c 		pf_rtlabel_copyout(&pp->addr.addr);
pp                173 net/pfkeyv2.c  	struct pfkeyv2_socket **pp;
pp                175 net/pfkeyv2.c  	for (pp = &pfkeyv2_sockets; *pp && ((*pp)->socket != socket);
pp                176 net/pfkeyv2.c  	    pp = &((*pp)->next))
pp                179 net/pfkeyv2.c  	if (*pp) {
pp                182 net/pfkeyv2.c  		pfkeyv2_socket = *pp;
pp                183 net/pfkeyv2.c  		*pp = (*pp)->next;
pp               1715 net/pfvar.h    	void		*pp;
pp                789 uvm/uvm_aobj.c 	struct vm_page *pp, *ppnext;
pp                835 uvm/uvm_aobj.c 		pp = TAILQ_FIRST(&uobj->memq);
pp                838 uvm/uvm_aobj.c 		pp = uvm_pagelookup(uobj, curoff);
pp                845 uvm/uvm_aobj.c 	for ( ; (by_list && pp != NULL) ||
pp                846 uvm/uvm_aobj.c 	    (!by_list && curoff < stop) ; pp = ppnext) {
pp                848 uvm/uvm_aobj.c 			ppnext = TAILQ_NEXT(pp, listq);
pp                851 uvm/uvm_aobj.c 			if (pp->offset < start || pp->offset >= stop)
pp                859 uvm/uvm_aobj.c 			if (pp == NULL)
pp                875 uvm/uvm_aobj.c 			if (pp->loan_count != 0 ||
pp                876 uvm/uvm_aobj.c 			    pp->wire_count != 0)
pp                881 uvm/uvm_aobj.c 			pmap_clear_reference(pp);
pp                884 uvm/uvm_aobj.c 			pmap_page_protect(pp, VM_PROT_NONE);
pp                888 uvm/uvm_aobj.c 			uvm_pagedeactivate(pp);
pp                901 uvm/uvm_aobj.c 			if (pp->loan_count != 0 ||
pp                902 uvm/uvm_aobj.c 			    pp->wire_count != 0)
pp                908 uvm/uvm_aobj.c 			if (pp->pg_flags & PG_BUSY) {
pp                909 uvm/uvm_aobj.c 				atomic_setbits_int(&pp->pg_flags, PG_RELEASED);
pp                914 uvm/uvm_aobj.c 			pmap_page_protect(pp, VM_PROT_NONE);
pp                916 uvm/uvm_aobj.c 			uao_dropswap(uobj, pp->offset >> PAGE_SHIFT);
pp                917 uvm/uvm_aobj.c 			uvm_pagefree(pp);
pp                264 uvm/uvm_km.c   	struct vm_page *pp;
pp                271 uvm/uvm_km.c   		pp = uvm_pagelookup(uobj, curoff);
pp                272 uvm/uvm_km.c   		if (pp == NULL)
pp                275 uvm/uvm_km.c   		UVMHIST_LOG(maphist,"  page %p, busy=%ld", pp,
pp                276 uvm/uvm_km.c   		    pp->pg_flags & PG_BUSY, 0, 0);
pp                278 uvm/uvm_km.c   		if (pp->pg_flags & PG_BUSY) {
pp                280 uvm/uvm_km.c   			atomic_setbits_int(&pp->pg_flags, PG_RELEASED);
pp                290 uvm/uvm_km.c   			uvm_pagefree(pp);
pp                123 uvm/uvm_pager.c 	struct vm_page *pp;
pp                173 uvm/uvm_pager.c 		pp = *pps++;
pp                174 uvm/uvm_pager.c 		KASSERT(pp);
pp                175 uvm/uvm_pager.c 		KASSERT(pp->pg_flags & PG_BUSY);
pp                176 uvm/uvm_pager.c 		pmap_enter(vm_map_pmap(pager_map), cva, VM_PAGE_TO_PHYS(pp),
pp                585 uvm/uvm_vnode.c 		struct vm_page *pp;
pp                586 uvm/uvm_vnode.c 		TAILQ_FOREACH(pp, &uvn->u_obj.memq, listq) {
pp                587 uvm/uvm_vnode.c 			if ((pp->pg_flags & PG_BUSY) == 0)
pp                811 uvm/uvm_vnode.c 	struct vm_page *pp, *ppnext, *ptmp;
pp                860 uvm/uvm_vnode.c 			TAILQ_FOREACH(pp, &uobj->memq, listq) {
pp                862 uvm/uvm_vnode.c 				    (pp->offset < start || pp->offset >= stop))
pp                864 uvm/uvm_vnode.c 				atomic_clearbits_int(&pp->pg_flags,
pp                871 uvm/uvm_vnode.c 				pp = uvm_pagelookup(uobj, curoff);
pp                872 uvm/uvm_vnode.c 				if (pp)
pp                873 uvm/uvm_vnode.c 					atomic_clearbits_int(&pp->pg_flags,
pp                886 uvm/uvm_vnode.c 		pp = TAILQ_FIRST(&uobj->memq);
pp                889 uvm/uvm_vnode.c 		pp = uvm_pagelookup(uobj, curoff);
pp                897 uvm/uvm_vnode.c 	for ( ; (by_list && pp != NULL) ||
pp                898 uvm/uvm_vnode.c 	  (!by_list && curoff < stop) ; pp = ppnext) {
pp                907 uvm/uvm_vnode.c 			    (pp->offset < start || pp->offset >= stop)) {
pp                908 uvm/uvm_vnode.c 				ppnext = TAILQ_NEXT(pp, listq);
pp                919 uvm/uvm_vnode.c 			if (pp == NULL) {
pp                938 uvm/uvm_vnode.c 		if ((flags & PGO_CLEANIT) == 0 || (pp->pg_flags & PG_BUSY) != 0) {
pp                940 uvm/uvm_vnode.c 			if ((pp->pg_flags & PG_BUSY) != 0 &&
pp                949 uvm/uvm_vnode.c 			if ((pp->pg_flags & PG_CLEAN) != 0 &&
pp                951 uvm/uvm_vnode.c 			    (pp->pg_flags & PQ_ACTIVE) != 0)
pp                952 uvm/uvm_vnode.c 				pmap_page_protect(pp, VM_PROT_NONE);
pp                953 uvm/uvm_vnode.c 			if ((pp->pg_flags & PG_CLEAN) != 0 &&
pp                954 uvm/uvm_vnode.c 			    pmap_is_modified(pp))
pp                955 uvm/uvm_vnode.c 				atomic_clearbits_int(&pp->pg_flags, PG_CLEAN);
pp                956 uvm/uvm_vnode.c 			atomic_setbits_int(&pp->pg_flags, PG_CLEANCHK);
pp                958 uvm/uvm_vnode.c 			needs_clean = ((pp->pg_flags & PG_CLEAN) == 0);
pp                967 uvm/uvm_vnode.c 				ppnext = TAILQ_NEXT(pp, listq);
pp                975 uvm/uvm_vnode.c 				if ((pp->pg_flags & PQ_INACTIVE) == 0 &&
pp                976 uvm/uvm_vnode.c 				    pp->wire_count == 0) {
pp                977 uvm/uvm_vnode.c 					pmap_page_protect(pp, VM_PROT_NONE);
pp                978 uvm/uvm_vnode.c 					uvm_pagedeactivate(pp);
pp                982 uvm/uvm_vnode.c 				if (pp->pg_flags & PG_BUSY) {
pp                984 uvm/uvm_vnode.c 					atomic_setbits_int(&pp->pg_flags,
pp                987 uvm/uvm_vnode.c 					pmap_page_protect(pp, VM_PROT_NONE);
pp                989 uvm/uvm_vnode.c 					uvm_pagefree(pp);
pp               1005 uvm/uvm_vnode.c 		atomic_setbits_int(&pp->pg_flags, PG_BUSY);
pp               1006 uvm/uvm_vnode.c 		UVM_PAGE_OWN(pp, "uvn_flush");
pp               1007 uvm/uvm_vnode.c 		pmap_page_protect(pp, VM_PROT_READ);
pp               1008 uvm/uvm_vnode.c 		pp_version = pp->pg_version;
pp               1014 uvm/uvm_vnode.c 		result = uvm_pager_put(uobj, pp, &ppsp, &npages,
pp               1072 uvm/uvm_vnode.c 					if (pp->pg_version == pp_version)
pp               1073 uvm/uvm_vnode.c 						ppnext = TAILQ_NEXT(pp, listq);
pp               1103 uvm/uvm_vnode.c 				if (ppsp[lcv] == pp)
pp               1107 uvm/uvm_vnode.c 				ptmp = pp;
pp               1111 uvm/uvm_vnode.c 					if (pp->pg_version == pp_version)
pp               1112 uvm/uvm_vnode.c 						ppnext = TAILQ_NEXT(pp, listq);
pp               1167 uvm/uvm_vnode.c 				if ((pp->pg_flags & PQ_INACTIVE) == 0 &&
pp               1168 uvm/uvm_vnode.c 				    pp->wire_count == 0) {
pp               1185 uvm/uvm_vnode.c 						    pp->uobject,
pp               1186 uvm/uvm_vnode.c 						    (long long)pp->offset);
pp                342 xfs/xfs_dev-common.c 	d_thread_t *pp = xfs_uio_to_thread(uiop);
pp                344 xfs/xfs_dev-common.c 	d_thread_t *pp = xfs_uio_to_proc(uiop);
pp                351 xfs/xfs_dev-common.c 				     pp);