me 3195 dev/pci/if_de.c struct mbuf *ms = NULL, *me = NULL; me 3221 dev/pci/if_de.c me = ms; me 3260 dev/pci/if_de.c for (me = ms; total_len > 0; total_len--) { me 3261 dev/pci/if_de.c map = TULIP_GETCTX(me, bus_dmamap_t); me 3266 dev/pci/if_de.c TULIP_SETCTX(me, NULL); me 3268 dev/pci/if_de.c me->m_len = TULIP_RX_BUFLEN; me 3270 dev/pci/if_de.c IF_DEQUEUE(&sc->tulip_rxq, me->m_next); me 3271 dev/pci/if_de.c me = me->m_next; me 3281 dev/pci/if_de.c me->m_len = total_len - last_offset; me 3283 dev/pci/if_de.c map = TULIP_GETCTX(me, bus_dmamap_t); me 3284 dev/pci/if_de.c bus_dmamap_sync(sc->tulip_dmatag, map, 0, me->m_len, me 3289 dev/pci/if_de.c TULIP_SETCTX(me, NULL); me 3294 dev/pci/if_de.c if (me == ms) { me 3333 dev/pci/if_de.c map = TULIP_GETCTX(me, bus_dmamap_t); me 3337 dev/pci/if_de.c TULIP_SETCTX(me, NULL); me 3448 dev/pci/if_de.c me = ms->m_next; me 3451 dev/pci/if_de.c } while ((ms = me) != NULL); me 508 dev/pci/if_lmc.c struct mbuf *ms = NULL, *me = NULL; me 533 dev/pci/if_lmc.c me = ms; me 574 dev/pci/if_lmc.c for (me = ms; total_len > 0; total_len--) { me 575 dev/pci/if_lmc.c map = LMC_GETCTX(me, bus_dmamap_t); me 580 dev/pci/if_lmc.c LMC_SETCTX(me, NULL); me 582 dev/pci/if_lmc.c me->m_len = LMC_RX_BUFLEN; me 584 dev/pci/if_lmc.c IF_DEQUEUE(&sc->lmc_rxq, me->m_next); me 585 dev/pci/if_lmc.c me = me->m_next; me 606 dev/pci/if_lmc.c map = LMC_GETCTX(me, bus_dmamap_t); me 607 dev/pci/if_lmc.c bus_dmamap_sync(sc->lmc_dmatag, map, 0, me->m_len, me 612 dev/pci/if_lmc.c LMC_SETCTX(me, NULL); me 615 dev/pci/if_lmc.c me->m_len = total_len - last_offset; me 618 dev/pci/if_lmc.c if (me == ms) me 632 dev/pci/if_lmc.c map = LMC_GETCTX(me, bus_dmamap_t); me 636 dev/pci/if_lmc.c LMC_SETCTX(me, NULL); me 738 dev/pci/if_lmc.c me = ms->m_next; me 741 dev/pci/if_lmc.c } while ((ms = me) != NULL); me 1377 dev/pci/ubsec.c struct ubsec_q2_modexp *me = (struct ubsec_q2_modexp *)q; me 1380 dev/pci/ubsec.c krp = me->me_krp; me 1381 dev/pci/ubsec.c rlen = (me->me_modbits + 7) / 8; me 1384 dev/pci/ubsec.c bus_dmamap_sync(sc->sc_dmat, me->me_M.dma_map, me 1385 dev/pci/ubsec.c 0, me->me_M.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); me 1386 dev/pci/ubsec.c bus_dmamap_sync(sc->sc_dmat, me->me_E.dma_map, me 1387 dev/pci/ubsec.c 0, me->me_E.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); me 1388 dev/pci/ubsec.c bus_dmamap_sync(sc->sc_dmat, me->me_C.dma_map, me 1389 dev/pci/ubsec.c 0, me->me_C.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD); me 1390 dev/pci/ubsec.c bus_dmamap_sync(sc->sc_dmat, me->me_epb.dma_map, me 1391 dev/pci/ubsec.c 0, me->me_epb.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); me 1400 dev/pci/ubsec.c bcopy(me->me_C.dma_vaddr, me 1402 dev/pci/ubsec.c (me->me_modbits + 7) / 8); me 1404 dev/pci/ubsec.c ubsec_kshift_l(me->me_shiftbits, me 1405 dev/pci/ubsec.c me->me_C.dma_vaddr, me->me_normbits, me 1412 dev/pci/ubsec.c bzero(me->me_E.dma_vaddr, me->me_E.dma_size); me 1413 dev/pci/ubsec.c bzero(me->me_M.dma_vaddr, me->me_M.dma_size); me 1414 dev/pci/ubsec.c bzero(me->me_C.dma_vaddr, me->me_C.dma_size); me 1415 dev/pci/ubsec.c bzero(me->me_q.q_ctx.dma_vaddr, me->me_q.q_ctx.dma_size); me 1418 dev/pci/ubsec.c SIMPLEQ_INSERT_TAIL(&sc->sc_q2free, &me->me_q, q_next); me 1728 dev/pci/ubsec.c struct ubsec_q2_modexp *me = (struct ubsec_q2_modexp *)q; me 1730 dev/pci/ubsec.c ubsec_dma_free(sc, &me->me_q.q_mcr); me 1731 dev/pci/ubsec.c ubsec_dma_free(sc, &me->me_q.q_ctx); me 1732 dev/pci/ubsec.c ubsec_dma_free(sc, &me->me_M); me 1733 dev/pci/ubsec.c ubsec_dma_free(sc, &me->me_E); me 1734 dev/pci/ubsec.c ubsec_dma_free(sc, &me->me_C); me 1735 dev/pci/ubsec.c ubsec_dma_free(sc, &me->me_epb); me 1736 dev/pci/ubsec.c free(me, M_DEVBUF); me 1801 dev/pci/ubsec.c struct ubsec_q2_modexp *me; me 1808 dev/pci/ubsec.c me = (struct ubsec_q2_modexp *)malloc(sizeof *me, M_DEVBUF, M_NOWAIT); me 1809 dev/pci/ubsec.c if (me == NULL) { me 1813 dev/pci/ubsec.c bzero(me, sizeof *me); me 1814 dev/pci/ubsec.c me->me_krp = krp; me 1815 dev/pci/ubsec.c me->me_q.q_type = UBS_CTXOP_MODEXP; me 1835 dev/pci/ubsec.c me->me_modbits = nbits; me 1836 dev/pci/ubsec.c me->me_shiftbits = shiftbits; me 1837 dev/pci/ubsec.c me->me_normbits = normbits; me 1846 dev/pci/ubsec.c &me->me_q.q_mcr, 0)) { me 1850 dev/pci/ubsec.c mcr = (struct ubsec_mcr *)me->me_q.q_mcr.dma_vaddr; me 1853 dev/pci/ubsec.c &me->me_q.q_ctx, 0)) { me 1863 dev/pci/ubsec.c if (ubsec_dma_malloc(sc, normbits / 8, &me->me_M, 0)) { me 1869 dev/pci/ubsec.c me->me_M.dma_vaddr, normbits); me 1871 dev/pci/ubsec.c if (ubsec_dma_malloc(sc, normbits / 8, &me->me_C, 0)) { me 1875 dev/pci/ubsec.c bzero(me->me_C.dma_vaddr, me->me_C.dma_size); me 1882 dev/pci/ubsec.c if (ubsec_dma_malloc(sc, normbits / 8, &me->me_E, 0)) { me 1888 dev/pci/ubsec.c me->me_E.dma_vaddr, normbits); me 1891 dev/pci/ubsec.c &me->me_epb, 0)) { me 1895 dev/pci/ubsec.c epb = (struct ubsec_pktbuf *)me->me_epb.dma_vaddr; me 1896 dev/pci/ubsec.c epb->pb_addr = htole32(me->me_E.dma_paddr); me 1907 dev/pci/ubsec.c mcr->mcr_cmdctxp = htole32(me->me_q.q_ctx.dma_paddr); me 1911 dev/pci/ubsec.c mcr->mcr_ipktbuf.pb_addr = htole32(me->me_M.dma_paddr); me 1913 dev/pci/ubsec.c mcr->mcr_ipktbuf.pb_next = htole32(me->me_epb.dma_paddr); me 1915 dev/pci/ubsec.c mcr->mcr_opktbuf.pb_addr = htole32(me->me_C.dma_paddr); me 1929 dev/pci/ubsec.c ctx = (struct ubsec_ctx_modexp *)me->me_q.q_ctx.dma_vaddr; me 1948 dev/pci/ubsec.c bus_dmamap_sync(sc->sc_dmat, me->me_M.dma_map, me 1949 dev/pci/ubsec.c 0, me->me_M.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); me 1950 dev/pci/ubsec.c bus_dmamap_sync(sc->sc_dmat, me->me_E.dma_map, me 1951 dev/pci/ubsec.c 0, me->me_E.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); me 1952 dev/pci/ubsec.c bus_dmamap_sync(sc->sc_dmat, me->me_C.dma_map, me 1953 dev/pci/ubsec.c 0, me->me_C.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD); me 1954 dev/pci/ubsec.c bus_dmamap_sync(sc->sc_dmat, me->me_epb.dma_map, me 1955 dev/pci/ubsec.c 0, me->me_epb.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); me 1959 dev/pci/ubsec.c SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &me->me_q, q_next); me 1966 dev/pci/ubsec.c if (me != NULL) { me 1967 dev/pci/ubsec.c if (me->me_q.q_mcr.dma_map != NULL) me 1968 dev/pci/ubsec.c ubsec_dma_free(sc, &me->me_q.q_mcr); me 1969 dev/pci/ubsec.c if (me->me_q.q_ctx.dma_map != NULL) { me 1970 dev/pci/ubsec.c bzero(me->me_q.q_ctx.dma_vaddr, me->me_q.q_ctx.dma_size); me 1971 dev/pci/ubsec.c ubsec_dma_free(sc, &me->me_q.q_ctx); me 1973 dev/pci/ubsec.c if (me->me_M.dma_map != NULL) { me 1974 dev/pci/ubsec.c bzero(me->me_M.dma_vaddr, me->me_M.dma_size); me 1975 dev/pci/ubsec.c ubsec_dma_free(sc, &me->me_M); me 1977 dev/pci/ubsec.c if (me->me_E.dma_map != NULL) { me 1978 dev/pci/ubsec.c bzero(me->me_E.dma_vaddr, me->me_E.dma_size); me 1979 dev/pci/ubsec.c ubsec_dma_free(sc, &me->me_E); me 1981 dev/pci/ubsec.c if (me->me_C.dma_map != NULL) { me 1982 dev/pci/ubsec.c bzero(me->me_C.dma_vaddr, me->me_C.dma_size); me 1983 dev/pci/ubsec.c ubsec_dma_free(sc, &me->me_C); me 1985 dev/pci/ubsec.c if (me->me_epb.dma_map != NULL) me 1986 dev/pci/ubsec.c ubsec_dma_free(sc, &me->me_epb); me 1987 dev/pci/ubsec.c free(me, M_DEVBUF); me 2000 dev/pci/ubsec.c struct ubsec_q2_modexp *me; me 2007 dev/pci/ubsec.c me = (struct ubsec_q2_modexp *)malloc(sizeof *me, M_DEVBUF, M_NOWAIT); me 2008 dev/pci/ubsec.c if (me == NULL) { me 2012 dev/pci/ubsec.c bzero(me, sizeof *me); me 2013 dev/pci/ubsec.c me->me_krp = krp; me 2014 dev/pci/ubsec.c me->me_q.q_type = UBS_CTXOP_MODEXP; me 2035 dev/pci/ubsec.c me->me_modbits = nbits; me 2036 dev/pci/ubsec.c me->me_shiftbits = shiftbits; me 2037 dev/pci/ubsec.c me->me_normbits = normbits; me 2046 dev/pci/ubsec.c &me->me_q.q_mcr, 0)) { me 2050 dev/pci/ubsec.c mcr = (struct ubsec_mcr *)me->me_q.q_mcr.dma_vaddr; me 2053 dev/pci/ubsec.c &me->me_q.q_ctx, 0)) { me 2063 dev/pci/ubsec.c if (ubsec_dma_malloc(sc, normbits / 8, &me->me_M, 0)) { me 2067 dev/pci/ubsec.c bzero(me->me_M.dma_vaddr, normbits / 8); me 2069 dev/pci/ubsec.c me->me_M.dma_vaddr, (mbits + 7) / 8); me 2071 dev/pci/ubsec.c if (ubsec_dma_malloc(sc, normbits / 8, &me->me_C, 0)) { me 2075 dev/pci/ubsec.c bzero(me->me_C.dma_vaddr, me->me_C.dma_size); me 2082 dev/pci/ubsec.c if (ubsec_dma_malloc(sc, normbits / 8, &me->me_E, 0)) { me 2086 dev/pci/ubsec.c bzero(me->me_E.dma_vaddr, normbits / 8); me 2088 dev/pci/ubsec.c me->me_E.dma_vaddr, (ebits + 7) / 8); me 2091 dev/pci/ubsec.c &me->me_epb, 0)) { me 2095 dev/pci/ubsec.c epb = (struct ubsec_pktbuf *)me->me_epb.dma_vaddr; me 2096 dev/pci/ubsec.c epb->pb_addr = htole32(me->me_E.dma_paddr); me 2107 dev/pci/ubsec.c mcr->mcr_cmdctxp = htole32(me->me_q.q_ctx.dma_paddr); me 2111 dev/pci/ubsec.c mcr->mcr_ipktbuf.pb_addr = htole32(me->me_M.dma_paddr); me 2113 dev/pci/ubsec.c mcr->mcr_ipktbuf.pb_next = htole32(me->me_epb.dma_paddr); me 2115 dev/pci/ubsec.c mcr->mcr_opktbuf.pb_addr = htole32(me->me_C.dma_paddr); me 2129 dev/pci/ubsec.c ctx = (struct ubsec_ctx_modexp *)me->me_q.q_ctx.dma_vaddr; me 2147 dev/pci/ubsec.c bus_dmamap_sync(sc->sc_dmat, me->me_M.dma_map, me 2148 dev/pci/ubsec.c 0, me->me_M.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); me 2149 dev/pci/ubsec.c bus_dmamap_sync(sc->sc_dmat, me->me_E.dma_map, me 2150 dev/pci/ubsec.c 0, me->me_E.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); me 2151 dev/pci/ubsec.c bus_dmamap_sync(sc->sc_dmat, me->me_C.dma_map, me 2152 dev/pci/ubsec.c 0, me->me_C.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD); me 2153 dev/pci/ubsec.c bus_dmamap_sync(sc->sc_dmat, me->me_epb.dma_map, me 2154 dev/pci/ubsec.c 0, me->me_epb.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); me 2158 dev/pci/ubsec.c SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &me->me_q, q_next); me 2165 dev/pci/ubsec.c if (me != NULL) { me 2166 dev/pci/ubsec.c if (me->me_q.q_mcr.dma_map != NULL) me 2167 dev/pci/ubsec.c ubsec_dma_free(sc, &me->me_q.q_mcr); me 2168 dev/pci/ubsec.c if (me->me_q.q_ctx.dma_map != NULL) { me 2169 dev/pci/ubsec.c bzero(me->me_q.q_ctx.dma_vaddr, me->me_q.q_ctx.dma_size); me 2170 dev/pci/ubsec.c ubsec_dma_free(sc, &me->me_q.q_ctx); me 2172 dev/pci/ubsec.c if (me->me_M.dma_map != NULL) { me 2173 dev/pci/ubsec.c bzero(me->me_M.dma_vaddr, me->me_M.dma_size); me 2174 dev/pci/ubsec.c ubsec_dma_free(sc, &me->me_M); me 2176 dev/pci/ubsec.c if (me->me_E.dma_map != NULL) { me 2177 dev/pci/ubsec.c bzero(me->me_E.dma_vaddr, me->me_E.dma_size); me 2178 dev/pci/ubsec.c ubsec_dma_free(sc, &me->me_E); me 2180 dev/pci/ubsec.c if (me->me_C.dma_map != NULL) { me 2181 dev/pci/ubsec.c bzero(me->me_C.dma_vaddr, me->me_C.dma_size); me 2182 dev/pci/ubsec.c ubsec_dma_free(sc, &me->me_C); me 2184 dev/pci/ubsec.c if (me->me_epb.dma_map != NULL) me 2185 dev/pci/ubsec.c ubsec_dma_free(sc, &me->me_epb); me 2186 dev/pci/ubsec.c free(me, M_DEVBUF); me 769 dev/wscons/wskbd.c wskbd_mux_open(struct wsevsrc *me, struct wseventvar *evp) me 771 dev/wscons/wskbd.c struct wskbd_softc *sc = (struct wskbd_softc *)me; me 863 dev/wscons/wskbd.c wskbd_mux_close(struct wsevsrc *me) me 865 dev/wscons/wskbd.c struct wskbd_softc *sc = (struct wskbd_softc *)me; me 1188 dev/wscons/wskbd.c wskbd_set_console_display(struct device *displaydv, struct wsevsrc *me) me 1196 dev/wscons/wskbd.c (void)wsmux_attach_sc((struct wsmux_softc *)me, &sc->sc_base); me 675 dev/wscons/wsmouse.c wsmouse_mux_open(struct wsevsrc *me, struct wseventvar *evp) me 677 dev/wscons/wsmouse.c struct wsmouse_softc *sc = (struct wsmouse_softc *)me; me 686 dev/wscons/wsmouse.c wsmouse_mux_close(struct wsevsrc *me) me 688 dev/wscons/wsmouse.c struct wsmouse_softc *sc = (struct wsmouse_softc *)me; me 217 dev/wscons/wsmux.c wsmux_mux_open(struct wsevsrc *me, struct wseventvar *evar) me 219 dev/wscons/wsmux.c struct wsmux_softc *sc = (struct wsmux_softc *)me; me 241 dev/wscons/wsmux.c struct wsevsrc *me; me 249 dev/wscons/wsmux.c CIRCLEQ_FOREACH(me, &sc->sc_cld, me_next) { me 251 dev/wscons/wsmux.c sc->sc_base.me_dv.dv_xname, me, me->me_dv.dv_xname)); me 253 dev/wscons/wsmux.c if (me->me_evp != NULL) { me 257 dev/wscons/wsmux.c if (me->me_parent != sc) { me 258 dev/wscons/wsmux.c printf("wsmux_do_open: bad child=%p\n", me); me 261 dev/wscons/wsmux.c error = wsevsrc_open(me, evar); me 267 dev/wscons/wsmux.c (void)wsevsrc_open(me, evar); me 296 dev/wscons/wsmux.c wsmux_mux_close(struct wsevsrc *me) me 298 dev/wscons/wsmux.c me->me_evp = NULL; me 299 dev/wscons/wsmux.c wsmux_do_close((struct wsmux_softc *)me); me 307 dev/wscons/wsmux.c struct wsevsrc *me; me 312 dev/wscons/wsmux.c CIRCLEQ_FOREACH(me, &sc->sc_cld, me_next) { me 314 dev/wscons/wsmux.c sc->sc_base.me_dv.dv_xname, me, me->me_dv.dv_xname)); me 316 dev/wscons/wsmux.c if (me->me_parent != sc) { me 317 dev/wscons/wsmux.c printf("wsmuxclose: bad child=%p\n", me); me 321 dev/wscons/wsmux.c (void)wsevsrc_close(me); me 322 dev/wscons/wsmux.c me->me_evp = NULL; me 370 dev/wscons/wsmux.c struct wsevsrc *me; me 441 dev/wscons/wsmux.c CIRCLEQ_FOREACH(me, &sc->sc_cld, me_next) { me 442 dev/wscons/wsmux.c if (me->me_ops->type == d->type && me 443 dev/wscons/wsmux.c me->me_dv.dv_unit == d->idx) { me 445 dev/wscons/wsmux.c wsmux_detach_sc(me); me 456 dev/wscons/wsmux.c CIRCLEQ_FOREACH(me, &sc->sc_cld, me_next) { me 459 dev/wscons/wsmux.c l->devices[n].type = me->me_ops->type; me 460 dev/wscons/wsmux.c l->devices[n].idx = me->me_dv.dv_unit; me 514 dev/wscons/wsmux.c CIRCLEQ_FOREACH(me, &sc->sc_cld, me_next) { me 517 dev/wscons/wsmux.c if (me->me_parent != sc) { me 518 dev/wscons/wsmux.c printf("wsmux_do_ioctl: bad child %p\n", me); me 522 dev/wscons/wsmux.c error = wsevsrc_ioctl(me, cmd, data, flag, p); me 524 dev/wscons/wsmux.c sc->sc_base.me_dv.dv_xname, me, me->me_dv.dv_xname, me 607 dev/wscons/wsmux.c wsmux_attach_sc(struct wsmux_softc *sc, struct wsevsrc *me) me 615 dev/wscons/wsmux.c sc->sc_base.me_dv.dv_xname, sc, me->me_ops->type)); me 618 dev/wscons/wsmux.c if (me->me_parent != NULL) { me 623 dev/wscons/wsmux.c me->me_parent = sc; me 624 dev/wscons/wsmux.c CIRCLEQ_INSERT_TAIL(&sc->sc_cld, me, me_next); me 632 dev/wscons/wsmux.c if (me->me_ops->dsetdisplay != NULL) { me 633 dev/wscons/wsmux.c error = wsevsrc_set_display(me, sc->sc_displaydv); me 640 dev/wscons/wsmux.c me->me_dv.dv_xname, sc->sc_rawkbd)); me 641 dev/wscons/wsmux.c (void)wsevsrc_ioctl(me, WSKBDIO_SETMODE, me 645 dev/wscons/wsmux.c (void)wsevsrc_ioctl(me, me 655 dev/wscons/wsmux.c sc->sc_base.me_dv.dv_xname, me->me_dv.dv_xname)); me 656 dev/wscons/wsmux.c error = wsevsrc_open(me, sc->sc_base.me_evp); me 663 dev/wscons/wsmux.c me->me_parent = NULL; me 664 dev/wscons/wsmux.c CIRCLEQ_REMOVE(&sc->sc_cld, me, me_next); me 674 dev/wscons/wsmux.c wsmux_detach_sc(struct wsevsrc *me) me 676 dev/wscons/wsmux.c struct wsmux_softc *sc = me->me_parent; me 679 dev/wscons/wsmux.c me->me_dv.dv_xname, me, sc)); me 684 dev/wscons/wsmux.c me->me_dv.dv_xname); me 691 dev/wscons/wsmux.c if (me->me_ops->dsetdisplay != NULL) me 693 dev/wscons/wsmux.c (void)wsevsrc_set_display(me, NULL); me 696 dev/wscons/wsmux.c if (me->me_evp != NULL) { me 699 dev/wscons/wsmux.c (void)wsevsrc_close(me); me 702 dev/wscons/wsmux.c CIRCLEQ_REMOVE(&sc->sc_cld, me, me_next); me 703 dev/wscons/wsmux.c me->me_parent = NULL; me 716 dev/wscons/wsmux.c struct wsevsrc *me; me 735 dev/wscons/wsmux.c CIRCLEQ_FOREACH(me, &sc->sc_cld, me_next) { me 736 dev/wscons/wsmux.c DPRINTF(("wsmux_displayioctl: me=%p\n", me)); me 738 dev/wscons/wsmux.c if (me->me_parent != sc) { me 739 dev/wscons/wsmux.c printf("wsmux_displayioctl: bad child %p\n", me); me 743 dev/wscons/wsmux.c if (me->me_ops->ddispioctl != NULL) { me 744 dev/wscons/wsmux.c error = wsevsrc_display_ioctl(me, cmd, data, flag, p); me 746 dev/wscons/wsmux.c me, me->me_dv.dv_xname, error)); me 784 dev/wscons/wsmux.c struct wsevsrc *me; me 797 dev/wscons/wsmux.c CIRCLEQ_FOREACH(me, &sc->sc_cld,me_next) { me 799 dev/wscons/wsmux.c if (me->me_parent != sc) { me 800 dev/wscons/wsmux.c printf("wsmux_set_display: bad child parent %p\n", me); me 804 dev/wscons/wsmux.c if (me->me_ops->dsetdisplay != NULL) { me 805 dev/wscons/wsmux.c error = wsevsrc_set_display(me, nsc->sc_displaydv); me 807 dev/wscons/wsmux.c me, me->me_dv.dv_xname, error)); me 813 dev/wscons/wsmux.c me->me_dv.dv_xname, sc->sc_rawkbd)); me 814 dev/wscons/wsmux.c (void)wsevsrc_ioctl(me, WSKBDIO_SETMODE, me 71 dev/wscons/wsmuxvar.h #define wsevsrc_open(me, evp) \ me 72 dev/wscons/wsmuxvar.h ((me)->me_ops->dopen((me), evp)) me 73 dev/wscons/wsmuxvar.h #define wsevsrc_close(me) \ me 74 dev/wscons/wsmuxvar.h ((me)->me_ops->dclose((me))) me 75 dev/wscons/wsmuxvar.h #define wsevsrc_ioctl(me, cmd, data, flag, p) \ me 76 dev/wscons/wsmuxvar.h ((me)->me_ops->dioctl(&(me)->me_dv, cmd, (caddr_t)data, flag, p)) me 77 dev/wscons/wsmuxvar.h #define wsevsrc_display_ioctl(me, cmd, data, flag, p) \ me 78 dev/wscons/wsmuxvar.h ((me)->me_ops->ddispioctl(&(me)->me_dv, cmd, (caddr_t)data, flag, p)) me 79 dev/wscons/wsmuxvar.h #define wsevsrc_set_display(me, arg) \ me 80 dev/wscons/wsmuxvar.h ((me)->me_ops->dsetdisplay(&(me)->me_dv, arg)) me 1051 net/if_spppsubr.c u_int32_t me, mymask; me 1111 net/if_spppsubr.c sppp_get_ip_addrs(sp, &me, 0, &mymask); me 1112 net/if_spppsubr.c if (me != 0) me 1113 net/if_spppsubr.c sppp_cisco_send(sp, CISCO_ADDR_REPLY, me, mymask); me 390 uvm/uvm_map.c struct vm_map_entry *me, *ne; me 397 uvm/uvm_map.c me = uvm.kentry_free; me 398 uvm/uvm_map.c if (me == NULL) { me 408 uvm/uvm_map.c me = ne; me 414 uvm/uvm_map.c uvm.kentry_free = me->next; me 418 uvm/uvm_map.c me->flags = UVM_MAP_STATIC; me 421 uvm/uvm_map.c me = pool_get(&uvm_map_entry_kmem_pool, PR_WAITOK); me 422 uvm/uvm_map.c me->flags = UVM_MAP_KMEM; me 425 uvm/uvm_map.c me = pool_get(&uvm_map_entry_pool, PR_WAITOK); me 426 uvm/uvm_map.c me->flags = 0; me 429 uvm/uvm_map.c UVMHIST_LOG(maphist, "<- new entry=%p [kentry=%ld]", me, me 431 uvm/uvm_map.c return(me); me 441 uvm/uvm_map.c uvm_mapent_free(struct vm_map_entry *me) me 447 uvm/uvm_map.c me, me->flags, 0, 0); me 448 uvm/uvm_map.c if (me->flags & UVM_MAP_STATIC) { me 451 uvm/uvm_map.c me->next = uvm.kentry_free; me 452 uvm/uvm_map.c uvm.kentry_free = me; me 456 uvm/uvm_map.c } else if (me->flags & UVM_MAP_KMEM) { me 458 uvm/uvm_map.c pool_put(&uvm_map_entry_kmem_pool, me); me 461 uvm/uvm_map.c pool_put(&uvm_map_entry_pool, me);