bucket            150 dev/raidframe/rf_debugMem.c 	unsigned long bucket = HASHADDR(addr);
bucket            156 dev/raidframe/rf_debugMem.c 	for (p = mh_table[bucket]; p && (p->address != addr); p = p->next);
bucket            160 dev/raidframe/rf_debugMem.c 		p->next = mh_table[bucket];
bucket            161 dev/raidframe/rf_debugMem.c 		mh_table[bucket] = p;
bucket            182 dev/raidframe/rf_debugMem.c 	unsigned long bucket = HASHADDR(addr);
bucket            186 dev/raidframe/rf_debugMem.c 	for (p = mh_table[bucket]; p && (p->address != addr); p = p->next);
bucket             77 kern/kern_malloc.c struct kmembuckets bucket[MINBUCKET + 16];
bucket            178 kern/kern_malloc.c 	kbp = &bucket[indx];
bucket            368 kern/kern_malloc.c 	kbp = &bucket[kup->ku_indx];
bucket            530 kern/kern_malloc.c 			bucket[indx].kb_elmpercl = 1;
bucket            532 kern/kern_malloc.c 			bucket[indx].kb_elmpercl = PAGE_SIZE / (1 << indx);
bucket            533 kern/kern_malloc.c 		bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
bucket            576 kern/kern_malloc.c 		bcopy(&bucket[BUCKETINDX(name[1])], &kb, sizeof(kb));
bucket            269 kern/kern_timeout.c db_show_callout_bucket(struct circq *bucket)
bucket            276 kern/kern_timeout.c 	for (p = CIRCQ_FIRST(bucket); p != bucket; p = CIRCQ_FIRST(p)) {
bucket            281 kern/kern_timeout.c 		    (bucket - timeout_wheel) / WHEELSIZE,
bucket            282 kern/kern_timeout.c 		    bucket - timeout_wheel, to->to_arg, name);
bucket            404 sys/malloc.h   	struct kmembuckets *kbp = &bucket[BUCKETINDX(kbp_size)]; \
bucket            422 sys/malloc.h   		kbp = &bucket[kup->ku_indx]; \
bucket            437 sys/malloc.h   extern struct kmembuckets bucket[];
bucket           1412 uvm/uvm_aobj.c 		int bucket;
bucket           1415 uvm/uvm_aobj.c 		for (bucket = aobj->u_swhashmask; bucket >= 0; bucket--) {
bucket           1416 uvm/uvm_aobj.c 			for (elt = LIST_FIRST(&aobj->u_swhash[bucket]);