curr_md 389 arch/i386/i386/i686_mem.c struct mem_range_desc *first_md, *last_md, *curr_md;
curr_md 398 arch/i386/i386/i686_mem.c for (curr_md = first_md; curr_md <= last_md; curr_md++) {
curr_md 399 arch/i386/i386/i686_mem.c if ((curr_md->mr_flags & MDF_ATTRMASK) == MDF_UNKNOWN)
curr_md 404 arch/i386/i386/i686_mem.c for (curr_md = first_md; curr_md <= last_md; curr_md++) {
curr_md 405 arch/i386/i386/i686_mem.c curr_md->mr_flags = mrcopyflags(curr_md->mr_flags & ~MDF_FIRMWARE, mrd->mr_flags);
curr_md 406 arch/i386/i386/i686_mem.c bcopy(mrd->mr_owner, curr_md->mr_owner, sizeof(mrd->mr_owner));
curr_md 422 arch/i386/i386/i686_mem.c struct mem_range_desc *curr_md, *free_md;
curr_md 433 arch/i386/i386/i686_mem.c curr_md = sc->mr_desc + i;
curr_md 435 arch/i386/i386/i686_mem.c for (; i < sc->mr_ndesc; i++, curr_md++) {
curr_md 436 arch/i386/i386/i686_mem.c if (curr_md->mr_flags & MDF_ACTIVE) {
curr_md 438 arch/i386/i386/i686_mem.c if ((curr_md->mr_base == mrd->mr_base) &&
curr_md 439 arch/i386/i386/i686_mem.c (curr_md->mr_len == mrd->mr_len)) {
curr_md 441 arch/i386/i386/i686_mem.c if (curr_md->mr_flags & MDF_BUSY)
curr_md 445 arch/i386/i386/i686_mem.c ((curr_md->mr_flags & MDF_ATTRMASK)
curr_md 449 arch/i386/i386/i686_mem.c free_md = curr_md;
curr_md 453 arch/i386/i386/i686_mem.c if (mroverlap(curr_md, mrd)) {
curr_md 455 arch/i386/i386/i686_mem.c if (i686_mtrrconflict(curr_md->mr_flags,
curr_md 460 arch/i386/i386/i686_mem.c free_md = curr_md;