This source file includes following definitions.
- epic_attach
- epic_shutdown
- epic_start
- epic_watchdog
- epic_ioctl
- epic_intr
- epic_tick
- epic_fixup_clock_source
- epic_reset
- epic_init
- epic_rxdrain
- epic_stop
- epic_read_eeprom
- epic_add_rxbuf
- epic_set_mchash
- epic_mii_wait
- epic_mii_read
- epic_mii_write
- epic_statchg
- epic_mediastatus
- epic_mediachange
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46 #if 0
47 #include <sys/cdefs.h>
48 __KERNEL_RCSID(0, "$NetBSD: smc83c170.c,v 1.59 2005/02/27 00:27:02 perry Exp $");
49 #endif
50
51 #include "bpfilter.h"
52
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/timeout.h>
56 #include <sys/mbuf.h>
57 #include <sys/malloc.h>
58 #include <sys/kernel.h>
59 #include <sys/socket.h>
60 #include <sys/ioctl.h>
61 #include <sys/errno.h>
62 #include <sys/device.h>
63
64 #include <net/if.h>
65 #include <net/if_dl.h>
66
67 #ifdef INET
68 #include <netinet/in.h>
69 #include <netinet/in_systm.h>
70 #include <netinet/in_var.h>
71 #include <netinet/ip.h>
72 #include <netinet/if_ether.h>
73 #endif
74
75 #include <net/if_media.h>
76
77 #if NBPFILTER > 0
78 #include <net/bpf.h>
79 #endif
80
81 #include <machine/bus.h>
82 #include <machine/intr.h>
83
84 #include <dev/mii/miivar.h>
85 #include <dev/mii/lxtphyreg.h>
86
87 #include <dev/ic/smc83c170reg.h>
88 #include <dev/ic/smc83c170var.h>
89
90 void epic_start(struct ifnet *);
91 void epic_watchdog(struct ifnet *);
92 int epic_ioctl(struct ifnet *, u_long, caddr_t);
93 int epic_init(struct ifnet *);
94 void epic_stop(struct ifnet *, int);
95
96 void epic_shutdown(void *);
97
98 void epic_reset(struct epic_softc *);
99 void epic_rxdrain(struct epic_softc *);
100 int epic_add_rxbuf(struct epic_softc *, int);
101 void epic_read_eeprom(struct epic_softc *, int, int, u_int16_t *);
102 void epic_set_mchash(struct epic_softc *);
103 void epic_fixup_clock_source(struct epic_softc *);
104 int epic_mii_read(struct device *, int, int);
105 void epic_mii_write(struct device *, int, int, int);
106 int epic_mii_wait(struct epic_softc *, u_int32_t);
107 void epic_tick(void *);
108
109 void epic_statchg(struct device *);
110 int epic_mediachange(struct ifnet *);
111 void epic_mediastatus(struct ifnet *, struct ifmediareq *);
112
113 struct cfdriver epic_cd = {
114 0, "epic", DV_IFNET
115 };
116
117 #define INTMASK (INTSTAT_FATAL_INT | INTSTAT_TXU | \
118 INTSTAT_TXC | INTSTAT_RXE | INTSTAT_RQE | INTSTAT_RCC)
119
120 int epic_copy_small = 0;
121
122 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
123
124
125
126
127 void
128 epic_attach(struct epic_softc *sc, const char *intrstr)
129 {
130 bus_space_tag_t st = sc->sc_st;
131 bus_space_handle_t sh = sc->sc_sh;
132 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
133 int rseg, error, miiflags;
134 u_int i;
135 bus_dma_segment_t seg;
136 u_int8_t enaddr[ETHER_ADDR_LEN], devname[12 + 1];
137 u_int16_t myea[ETHER_ADDR_LEN / 2], mydevname[6];
138 char *nullbuf;
139
140 timeout_set(&sc->sc_mii_timeout, epic_tick, sc);
141
142
143
144
145
146 if ((error = bus_dmamem_alloc(sc->sc_dmat,
147 sizeof(struct epic_control_data) + ETHER_PAD_LEN, PAGE_SIZE, 0,
148 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
149 printf(": unable to allocate control data, error = %d\n",
150 error);
151 goto fail_0;
152 }
153
154 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
155 sizeof(struct epic_control_data) + ETHER_PAD_LEN,
156 (caddr_t *)&sc->sc_control_data,
157 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
158 printf(": unable to map control data, error = %d\n", error);
159 goto fail_1;
160 }
161 nullbuf =
162 (char *)sc->sc_control_data + sizeof(struct epic_control_data);
163 memset(nullbuf, 0, ETHER_PAD_LEN);
164
165 if ((error = bus_dmamap_create(sc->sc_dmat,
166 sizeof(struct epic_control_data), 1,
167 sizeof(struct epic_control_data), 0, BUS_DMA_NOWAIT,
168 &sc->sc_cddmamap)) != 0) {
169 printf(": unable to create control data DMA map, error = %d\n",
170 error);
171 goto fail_2;
172 }
173
174 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
175 sc->sc_control_data, sizeof(struct epic_control_data), NULL,
176 BUS_DMA_NOWAIT)) != 0) {
177 printf(": unable to load control data DMA map, error = %d\n",
178 error);
179 goto fail_3;
180 }
181
182
183
184
185 for (i = 0; i < EPIC_NTXDESC; i++) {
186 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
187 EPIC_NFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
188 &EPIC_DSTX(sc, i)->ds_dmamap)) != 0) {
189 printf(": unable to create tx DMA map %d, error = %d\n",
190 i, error);
191 goto fail_4;
192 }
193 }
194
195
196
197
198 for (i = 0; i < EPIC_NRXDESC; i++) {
199 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
200 MCLBYTES, 0, BUS_DMA_NOWAIT,
201 &EPIC_DSRX(sc, i)->ds_dmamap)) != 0) {
202 printf(": unable to create rx DMA map %d, error = %d\n",
203 i, error);
204 goto fail_5;
205 }
206 EPIC_DSRX(sc, i)->ds_mbuf = NULL;
207 }
208
209
210
211
212 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1,
213 ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT,&sc->sc_nulldmamap)) != 0) {
214 printf(": unable to create pad buffer DMA map, error = %d\n",
215 error);
216 goto fail_5;
217 }
218
219 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap,
220 nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) {
221 printf(": unable to load pad buffer DMA map, error = %d\n",
222 error);
223 goto fail_6;
224 }
225 bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN,
226 BUS_DMASYNC_PREWRITE);
227
228
229
230
231 bus_space_write_4(st, sh, EPIC_GENCTL, 0);
232 epic_reset(sc);
233
234
235
236
237 epic_read_eeprom(sc, 0, (sizeof(myea) / sizeof(myea[0])), myea);
238 for (i = 0; i < sizeof(myea)/ sizeof(myea[0]); i++) {
239 enaddr[i * 2] = myea[i] & 0xff;
240 enaddr[i * 2 + 1] = myea[i] >> 8;
241 }
242
243
244
245
246 epic_read_eeprom(sc, 0x2c, (sizeof(mydevname) / sizeof(mydevname[0])),
247 mydevname);
248 for (i = 0; i < sizeof(mydevname) / sizeof(mydevname[0]); i++) {
249 devname[i * 2] = mydevname[i] & 0xff;
250 devname[i * 2 + 1] = mydevname[i] >> 8;
251 }
252
253 devname[sizeof(devname) - 1] = ' ';
254 for (i = sizeof(devname) - 1; devname[i] == ' '; i--) {
255 devname[i] = '\0';
256 if (i == 0)
257 break;
258 }
259
260 printf(", %s : %s, address %s\n", devname, intrstr,
261 ether_sprintf(enaddr));
262
263 miiflags = 0;
264 if (sc->sc_hwflags & EPIC_HAS_MII_FIBER)
265 miiflags |= MIIF_HAVEFIBER;
266
267
268
269
270 sc->sc_mii.mii_ifp = ifp;
271 sc->sc_mii.mii_readreg = epic_mii_read;
272 sc->sc_mii.mii_writereg = epic_mii_write;
273 sc->sc_mii.mii_statchg = epic_statchg;
274 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, epic_mediachange,
275 epic_mediastatus);
276 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
277 MII_OFFSET_ANY, miiflags);
278 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
279 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
280 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
281 } else
282 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
283
284 if (sc->sc_hwflags & EPIC_HAS_BNC) {
285
286 sc->sc_serinst = sc->sc_mii.mii_instance++;
287 ifmedia_add(&sc->sc_mii.mii_media,
288 IFM_MAKEWORD(IFM_ETHER, IFM_10_2, 0,
289 sc->sc_serinst),
290 0, NULL);
291 } else
292 sc->sc_serinst = -1;
293
294 bcopy(enaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
295 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
296 ifp->if_softc = sc;
297 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
298 ifp->if_ioctl = epic_ioctl;
299 ifp->if_start = epic_start;
300 ifp->if_watchdog = epic_watchdog;
301 IFQ_SET_MAXLEN(&ifp->if_snd, EPIC_NTXDESC - 1);
302 IFQ_SET_READY(&ifp->if_snd);
303
304 ifp->if_capabilities = IFCAP_VLAN_MTU;
305
306
307
308
309 if_attach(ifp);
310 ether_ifattach(ifp);
311
312
313
314
315 sc->sc_sdhook = shutdownhook_establish(epic_shutdown, sc);
316 if (sc->sc_sdhook == NULL)
317 printf("%s: WARNING: unable to establish shutdown hook\n",
318 sc->sc_dev.dv_xname);
319 return;
320
321
322
323
324
325 fail_6:
326 bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap);
327 fail_5:
328 for (i = 0; i < EPIC_NRXDESC; i++) {
329 if (EPIC_DSRX(sc, i)->ds_dmamap != NULL)
330 bus_dmamap_destroy(sc->sc_dmat,
331 EPIC_DSRX(sc, i)->ds_dmamap);
332 }
333 fail_4:
334 for (i = 0; i < EPIC_NTXDESC; i++) {
335 if (EPIC_DSTX(sc, i)->ds_dmamap != NULL)
336 bus_dmamap_destroy(sc->sc_dmat,
337 EPIC_DSTX(sc, i)->ds_dmamap);
338 }
339 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
340 fail_3:
341 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
342 fail_2:
343 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
344 sizeof(struct epic_control_data));
345 fail_1:
346 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
347 fail_0:
348 return;
349 }
350
351
352
353
354 void
355 epic_shutdown(void *arg)
356 {
357 struct epic_softc *sc = arg;
358
359 epic_stop(&sc->sc_arpcom.ac_if, 1);
360 }
361
362
363
364
365
366 void
367 epic_start(struct ifnet *ifp)
368 {
369 struct epic_softc *sc = ifp->if_softc;
370 struct mbuf *m0, *m;
371 struct epic_txdesc *txd;
372 struct epic_descsoft *ds;
373 struct epic_fraglist *fr;
374 bus_dmamap_t dmamap;
375 int error, firsttx, nexttx, opending, seg;
376 u_int len;
377
378
379
380
381
382 opending = sc->sc_txpending;
383 firsttx = EPIC_NEXTTX(sc->sc_txlast);
384
385
386
387
388
389
390 while (sc->sc_txpending < EPIC_NTXDESC) {
391
392
393
394 IFQ_POLL(&ifp->if_snd, m0);
395 if (m0 == NULL)
396 break;
397 m = NULL;
398
399
400
401
402 nexttx = EPIC_NEXTTX(sc->sc_txlast);
403 txd = EPIC_CDTX(sc, nexttx);
404 fr = EPIC_CDFL(sc, nexttx);
405 ds = EPIC_DSTX(sc, nexttx);
406 dmamap = ds->ds_dmamap;
407
408
409
410
411
412
413
414 if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
415 BUS_DMA_WRITE|BUS_DMA_NOWAIT)) != 0 ||
416 (m0->m_pkthdr.len < ETHER_PAD_LEN &&
417 dmamap-> dm_nsegs == EPIC_NFRAGS)) {
418 if (error == 0)
419 bus_dmamap_unload(sc->sc_dmat, dmamap);
420
421 MGETHDR(m, M_DONTWAIT, MT_DATA);
422 if (m == NULL)
423 break;
424 if (m0->m_pkthdr.len > MHLEN) {
425 MCLGET(m, M_DONTWAIT);
426 if ((m->m_flags & M_EXT) == 0) {
427 m_freem(m);
428 break;
429 }
430 }
431 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
432 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
433 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
434 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
435 if (error)
436 break;
437 }
438 IFQ_DEQUEUE(&ifp->if_snd, m0);
439 if (m != NULL) {
440 m_freem(m0);
441 m0 = m;
442 }
443
444
445 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
446 fr->ef_frags[seg].ef_addr =
447 dmamap->dm_segs[seg].ds_addr;
448 fr->ef_frags[seg].ef_length =
449 dmamap->dm_segs[seg].ds_len;
450 }
451 len = m0->m_pkthdr.len;
452 if (len < ETHER_PAD_LEN) {
453 fr->ef_frags[seg].ef_addr = sc->sc_nulldma;
454 fr->ef_frags[seg].ef_length = ETHER_PAD_LEN - len;
455 len = ETHER_PAD_LEN;
456 seg++;
457 }
458 fr->ef_nfrags = seg;
459
460 EPIC_CDFLSYNC(sc, nexttx, BUS_DMASYNC_PREWRITE);
461
462
463 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
464 BUS_DMASYNC_PREWRITE);
465
466
467
468
469 ds->ds_mbuf = m0;
470
471
472
473
474 txd->et_control = ET_TXCTL_LASTDESC | ET_TXCTL_FRAGLIST;
475
476
477
478
479
480
481 if (nexttx == firsttx)
482 txd->et_txstatus = TXSTAT_TXLENGTH(len);
483 else
484 txd->et_txstatus =
485 TXSTAT_TXLENGTH(len) | ET_TXSTAT_OWNER;
486
487 EPIC_CDTXSYNC(sc, nexttx,
488 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
489
490
491 sc->sc_txpending++;
492 sc->sc_txlast = nexttx;
493
494 #if NBPFILTER > 0
495
496
497
498 if (ifp->if_bpf)
499 bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
500 #endif
501 }
502
503 if (sc->sc_txpending == EPIC_NTXDESC) {
504
505 ifp->if_flags |= IFF_OACTIVE;
506 }
507
508 if (sc->sc_txpending != opending) {
509
510
511
512
513 if (opending == 0)
514 sc->sc_txdirty = firsttx;
515
516
517
518
519
520 EPIC_CDTX(sc, sc->sc_txlast)->et_control |= ET_TXCTL_IAF;
521 EPIC_CDTXSYNC(sc, sc->sc_txlast,
522 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
523
524
525
526
527
528 EPIC_CDTX(sc, firsttx)->et_txstatus |= ET_TXSTAT_OWNER;
529 EPIC_CDTXSYNC(sc, firsttx,
530 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
531
532
533 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
534 COMMAND_TXQUEUED);
535
536
537 ifp->if_timer = 5;
538 }
539 }
540
541
542
543
544
545 void
546 epic_watchdog(struct ifnet *ifp)
547 {
548 struct epic_softc *sc = ifp->if_softc;
549
550 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
551 ifp->if_oerrors++;
552
553 (void) epic_init(ifp);
554 }
555
556
557
558
559
560 int
561 epic_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
562 {
563 struct epic_softc *sc = ifp->if_softc;
564 struct ifreq *ifr = (struct ifreq *)data;
565 struct ifaddr *ifa = (struct ifaddr *)data;
566 int s, error;
567
568 s = splnet();
569
570 if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) {
571 splx(s);
572 return (error);
573 }
574
575 switch (cmd) {
576 case SIOCSIFADDR:
577 ifp->if_flags |= IFF_UP;
578
579 switch (ifa->ifa_addr->sa_family) {
580 #ifdef INET
581 case AF_INET:
582 epic_init(ifp);
583 arp_ifinit(&sc->sc_arpcom, ifa);
584 break;
585 #endif
586 default:
587 epic_init(ifp);
588 break;
589 }
590 break;
591
592 case SIOCSIFMTU:
593 if (ifr->ifr_mtu > ETHERMTU || ifr->ifr_mtu < ETHERMIN)
594 error = EINVAL;
595 else if (ifp->if_mtu != ifr->ifr_mtu)
596 ifp->if_mtu = ifr->ifr_mtu;
597 break;
598
599 case SIOCSIFFLAGS:
600
601
602
603
604
605
606 if (ifp->if_flags & IFF_UP)
607 epic_init(ifp);
608 else if (ifp->if_flags & IFF_RUNNING)
609 epic_stop(ifp, 1);
610 break;
611
612 case SIOCADDMULTI:
613 case SIOCDELMULTI:
614 error = (cmd == SIOCADDMULTI) ?
615 ether_addmulti(ifr, &sc->sc_arpcom) :
616 ether_delmulti(ifr, &sc->sc_arpcom);
617
618 if (error == ENETRESET) {
619
620
621
622
623 if (ifp->if_flags & IFF_RUNNING) {
624 mii_pollstat(&sc->sc_mii);
625 epic_set_mchash(sc);
626 }
627 error = 0;
628 }
629 break;
630
631 case SIOCSIFMEDIA:
632 case SIOCGIFMEDIA:
633 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
634 break;
635
636 default:
637 error = EINVAL;
638 }
639 splx(s);
640 return (error);
641 }
642
643
644
645
646 int
647 epic_intr(void *arg)
648 {
649 struct epic_softc *sc = arg;
650 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
651 struct epic_rxdesc *rxd;
652 struct epic_txdesc *txd;
653 struct epic_descsoft *ds;
654 struct mbuf *m;
655 u_int32_t intstat, rxstatus, txstatus;
656 int i, claimed = 0;
657 u_int len;
658
659 top:
660
661
662
663 intstat = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT);
664 if ((intstat & INTSTAT_INT_ACTV) == 0)
665 return (claimed);
666
667 claimed = 1;
668
669
670
671
672 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT,
673 intstat & INTMASK);
674
675
676
677
678 if (intstat & (INTSTAT_RCC | INTSTAT_RXE | INTSTAT_RQE)) {
679 for (i = sc->sc_rxptr;; i = EPIC_NEXTRX(i)) {
680 rxd = EPIC_CDRX(sc, i);
681 ds = EPIC_DSRX(sc, i);
682
683 EPIC_CDRXSYNC(sc, i,
684 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
685
686 rxstatus = rxd->er_rxstatus;
687 if (rxstatus & ER_RXSTAT_OWNER) {
688
689
690
691
692 break;
693 }
694
695
696
697
698
699
700
701 if ((rxstatus & ER_RXSTAT_PKTINTACT) == 0) {
702 if (rxstatus & ER_RXSTAT_CRCERROR)
703 printf("%s: CRC error\n",
704 sc->sc_dev.dv_xname);
705 if (rxstatus & ER_RXSTAT_ALIGNERROR)
706 printf("%s: alignment error\n",
707 sc->sc_dev.dv_xname);
708 ifp->if_ierrors++;
709 EPIC_INIT_RXDESC(sc, i);
710 continue;
711 }
712
713 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
714 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
715
716
717
718
719
720 len = RXSTAT_RXLENGTH(rxstatus) - ETHER_CRC_LEN;
721
722 if (len < sizeof(struct ether_header)) {
723
724
725
726 ifp->if_ierrors++;
727 EPIC_INIT_RXDESC(sc, i);
728 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
729 ds->ds_dmamap->dm_mapsize,
730 BUS_DMASYNC_PREREAD);
731 continue;
732 }
733
734
735
736
737
738
739
740
741
742
743
744
745 if (epic_copy_small != 0 && len <= MHLEN) {
746 MGETHDR(m, M_DONTWAIT, MT_DATA);
747 if (m == NULL)
748 goto dropit;
749 memcpy(mtod(m, caddr_t),
750 mtod(ds->ds_mbuf, caddr_t), len);
751 EPIC_INIT_RXDESC(sc, i);
752 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
753 ds->ds_dmamap->dm_mapsize,
754 BUS_DMASYNC_PREREAD);
755 } else {
756 m = ds->ds_mbuf;
757 if (epic_add_rxbuf(sc, i) != 0) {
758 dropit:
759 ifp->if_ierrors++;
760 EPIC_INIT_RXDESC(sc, i);
761 bus_dmamap_sync(sc->sc_dmat,
762 ds->ds_dmamap, 0,
763 ds->ds_dmamap->dm_mapsize,
764 BUS_DMASYNC_PREREAD);
765 continue;
766 }
767 }
768
769 m->m_pkthdr.rcvif = ifp;
770 m->m_pkthdr.len = m->m_len = len;
771
772 #if NBPFILTER > 0
773
774
775
776
777 if (ifp->if_bpf)
778 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
779 #endif
780
781
782 ether_input_mbuf(ifp, m);
783 ifp->if_ipackets++;
784 }
785
786
787 sc->sc_rxptr = i;
788
789
790
791
792 if (intstat & INTSTAT_RQE) {
793 printf("%s: receiver queue empty\n",
794 sc->sc_dev.dv_xname);
795
796
797
798
799 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_PRCDAR,
800 EPIC_CDRXADDR(sc, sc->sc_rxptr));
801 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
802 COMMAND_RXQUEUED | COMMAND_START_RX);
803 }
804 }
805
806
807
808
809 if (intstat & (INTSTAT_TXC | INTSTAT_TXU)) {
810 ifp->if_flags &= ~IFF_OACTIVE;
811 for (i = sc->sc_txdirty; sc->sc_txpending != 0;
812 i = EPIC_NEXTTX(i), sc->sc_txpending--) {
813 txd = EPIC_CDTX(sc, i);
814 ds = EPIC_DSTX(sc, i);
815
816 EPIC_CDTXSYNC(sc, i,
817 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
818
819 txstatus = txd->et_txstatus;
820 if (txstatus & ET_TXSTAT_OWNER)
821 break;
822
823 EPIC_CDFLSYNC(sc, i, BUS_DMASYNC_POSTWRITE);
824
825 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
826 0, ds->ds_dmamap->dm_mapsize,
827 BUS_DMASYNC_POSTWRITE);
828 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
829 m_freem(ds->ds_mbuf);
830 ds->ds_mbuf = NULL;
831
832
833
834
835 if ((txstatus & ET_TXSTAT_PACKETTX) == 0)
836 ifp->if_oerrors++;
837 else
838 ifp->if_opackets++;
839 ifp->if_collisions +=
840 TXSTAT_COLLISIONS(txstatus);
841 if (txstatus & ET_TXSTAT_CARSENSELOST)
842 printf("%s: lost carrier\n",
843 sc->sc_dev.dv_xname);
844 }
845
846
847 sc->sc_txdirty = i;
848
849
850
851
852
853 if (sc->sc_txpending == 0)
854 ifp->if_timer = 0;
855
856
857
858
859 if (intstat & INTSTAT_TXU) {
860 printf("%s: transmit underrun\n", sc->sc_dev.dv_xname);
861 bus_space_write_4(sc->sc_st, sc->sc_sh,
862 EPIC_COMMAND, COMMAND_TXUGO);
863 if (sc->sc_txpending)
864 bus_space_write_4(sc->sc_st, sc->sc_sh,
865 EPIC_COMMAND, COMMAND_TXQUEUED);
866 }
867
868
869
870
871 epic_start(ifp);
872 }
873
874
875
876
877 if (intstat & INTSTAT_FATAL_INT) {
878 if (intstat & INTSTAT_PTA)
879 printf("%s: PCI target abort error\n",
880 sc->sc_dev.dv_xname);
881 else if (intstat & INTSTAT_PMA)
882 printf("%s: PCI master abort error\n",
883 sc->sc_dev.dv_xname);
884 else if (intstat & INTSTAT_APE)
885 printf("%s: PCI address parity error\n",
886 sc->sc_dev.dv_xname);
887 else if (intstat & INTSTAT_DPE)
888 printf("%s: PCI data parity error\n",
889 sc->sc_dev.dv_xname);
890 else
891 printf("%s: unknown fatal error\n",
892 sc->sc_dev.dv_xname);
893 (void) epic_init(ifp);
894 }
895
896
897
898
899 goto top;
900 }
901
902
903
904
905 void
906 epic_tick(void *arg)
907 {
908 struct epic_softc *sc = arg;
909 int s;
910
911 s = splnet();
912 mii_tick(&sc->sc_mii);
913 splx(s);
914
915 timeout_add(&sc->sc_mii_timeout, hz);
916 }
917
918
919
920
921 void
922 epic_fixup_clock_source(struct epic_softc *sc)
923 {
924 int i;
925
926
927
928
929
930
931
932
933
934 for (i = 0; i < 16; i++)
935 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TEST,
936 TEST_CLOCKTEST);
937 }
938
939
940
941
942 void
943 epic_reset(struct epic_softc *sc)
944 {
945
946 epic_fixup_clock_source(sc);
947
948 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, 0);
949 delay(100);
950 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, GENCTL_SOFTRESET);
951 delay(100);
952
953 epic_fixup_clock_source(sc);
954 }
955
956
957
958
959 int
960 epic_init(struct ifnet *ifp)
961 {
962 struct epic_softc *sc = ifp->if_softc;
963 bus_space_tag_t st = sc->sc_st;
964 bus_space_handle_t sh = sc->sc_sh;
965 struct epic_txdesc *txd;
966 struct epic_descsoft *ds;
967 u_int32_t genctl, reg0;
968 int i, error = 0;
969
970
971
972
973 epic_stop(ifp, 0);
974
975
976
977
978 epic_reset(sc);
979
980
981
982
983 bus_space_write_4(st, sh, EPIC_TXTEST, 0);
984
985
986
987
988
989
990
991 genctl = GENCTL_RX_FIFO_THRESH0 | GENCTL_ONECOPY;
992 #if BYTE_ORDER == BIG_ENDIAN
993 genctl |= GENCTL_BIG_ENDIAN;
994 #endif
995 bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
996
997
998
999
1000 reg0 = bus_space_read_4(st, sh, EPIC_NVCTL);
1001 bus_space_write_4(st, sh, EPIC_NVCTL, reg0 | NVCTL_GPIO1 | NVCTL_GPOE1);
1002 bus_space_write_4(st, sh, EPIC_MIICFG, MIICFG_ENASER);
1003 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_RESET_PHY);
1004 delay(100);
1005 bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
1006 delay(1000);
1007 bus_space_write_4(st, sh, EPIC_NVCTL, reg0);
1008
1009
1010
1011
1012 reg0 = sc->sc_arpcom.ac_enaddr[1] << 8 | sc->sc_arpcom.ac_enaddr[0];
1013 bus_space_write_4(st, sh, EPIC_LAN0, reg0);
1014 reg0 = sc->sc_arpcom.ac_enaddr[3] << 8 | sc->sc_arpcom.ac_enaddr[2];
1015 bus_space_write_4(st, sh, EPIC_LAN1, reg0);
1016 reg0 = sc->sc_arpcom.ac_enaddr[5] << 8 | sc->sc_arpcom.ac_enaddr[4];
1017 bus_space_write_4(st, sh, EPIC_LAN2, reg0);
1018
1019
1020
1021
1022
1023 reg0 = bus_space_read_4(st, sh, EPIC_RXCON) &
1024 (RXCON_EXTBUFSIZESEL1 | RXCON_EXTBUFSIZESEL0);
1025 reg0 |= (RXCON_RXMULTICAST | RXCON_RXBROADCAST);
1026 if (ifp->if_flags & IFF_PROMISC)
1027 reg0 |= RXCON_PROMISCMODE;
1028 bus_space_write_4(st, sh, EPIC_RXCON, reg0);
1029
1030
1031 epic_mediachange(ifp);
1032
1033
1034 epic_set_mchash(sc);
1035
1036
1037
1038
1039
1040
1041 for (i = 0; i < EPIC_NTXDESC; i++) {
1042 txd = EPIC_CDTX(sc, i);
1043 memset(txd, 0, sizeof(struct epic_txdesc));
1044 txd->et_bufaddr = EPIC_CDFLADDR(sc, i);
1045 txd->et_nextdesc = EPIC_CDTXADDR(sc, EPIC_NEXTTX(i));
1046 EPIC_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1047 }
1048 sc->sc_txpending = 0;
1049 sc->sc_txdirty = 0;
1050 sc->sc_txlast = EPIC_NTXDESC - 1;
1051
1052
1053
1054
1055 for (i = 0; i < EPIC_NRXDESC; i++) {
1056 ds = EPIC_DSRX(sc, i);
1057 if (ds->ds_mbuf == NULL) {
1058 if ((error = epic_add_rxbuf(sc, i)) != 0) {
1059 printf("%s: unable to allocate or map rx "
1060 "buffer %d error = %d\n",
1061 sc->sc_dev.dv_xname, i, error);
1062
1063
1064
1065
1066 epic_rxdrain(sc);
1067 goto out;
1068 }
1069 } else
1070 EPIC_INIT_RXDESC(sc, i);
1071 }
1072 sc->sc_rxptr = 0;
1073
1074
1075
1076
1077 bus_space_write_4(st, sh, EPIC_INTMASK, INTMASK);
1078 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_INTENA);
1079
1080
1081
1082
1083 bus_space_write_4(st, sh, EPIC_PTCDAR,
1084 EPIC_CDTXADDR(sc, EPIC_NEXTTX(sc->sc_txlast)));
1085 bus_space_write_4(st, sh, EPIC_PRCDAR,
1086 EPIC_CDRXADDR(sc, sc->sc_rxptr));
1087
1088
1089
1090
1091 bus_space_write_4(st, sh, EPIC_COMMAND,
1092 COMMAND_RXQUEUED | COMMAND_START_RX);
1093
1094
1095
1096
1097 ifp->if_flags |= IFF_RUNNING;
1098 ifp->if_flags &= ~IFF_OACTIVE;
1099
1100
1101
1102
1103 timeout_add(&sc->sc_mii_timeout, hz);
1104
1105
1106
1107
1108 epic_start(ifp);
1109
1110 out:
1111 if (error)
1112 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
1113 return (error);
1114 }
1115
1116
1117
1118
1119 void
1120 epic_rxdrain(struct epic_softc *sc)
1121 {
1122 struct epic_descsoft *ds;
1123 int i;
1124
1125 for (i = 0; i < EPIC_NRXDESC; i++) {
1126 ds = EPIC_DSRX(sc, i);
1127 if (ds->ds_mbuf != NULL) {
1128 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1129 m_freem(ds->ds_mbuf);
1130 ds->ds_mbuf = NULL;
1131 }
1132 }
1133 }
1134
1135
1136
1137
1138 void
1139 epic_stop(struct ifnet *ifp, int disable)
1140 {
1141 struct epic_softc *sc = ifp->if_softc;
1142 bus_space_tag_t st = sc->sc_st;
1143 bus_space_handle_t sh = sc->sc_sh;
1144 struct epic_descsoft *ds;
1145 u_int32_t reg;
1146 int i;
1147
1148
1149
1150
1151 timeout_del(&sc->sc_mii_timeout);
1152
1153
1154
1155
1156 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1157 ifp->if_timer = 0;
1158
1159
1160 mii_down(&sc->sc_mii);
1161
1162
1163 epic_fixup_clock_source(sc);
1164
1165
1166
1167
1168 reg = bus_space_read_4(st, sh, EPIC_GENCTL);
1169 bus_space_write_4(st, sh, EPIC_GENCTL, reg & ~GENCTL_INTENA);
1170 bus_space_write_4(st, sh, EPIC_INTMASK, 0);
1171
1172
1173
1174
1175 bus_space_write_4(st, sh, EPIC_COMMAND, COMMAND_STOP_RDMA |
1176 COMMAND_STOP_TDMA | COMMAND_STOP_RX);
1177
1178
1179
1180
1181 for (i = 0; i < EPIC_NTXDESC; i++) {
1182 ds = EPIC_DSTX(sc, i);
1183 if (ds->ds_mbuf != NULL) {
1184 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1185 m_freem(ds->ds_mbuf);
1186 ds->ds_mbuf = NULL;
1187 }
1188 }
1189
1190 if (disable)
1191 epic_rxdrain(sc);
1192 }
1193
1194
1195
1196
1197 void
1198 epic_read_eeprom(struct epic_softc *sc, int word, int wordcnt, u_int16_t *data)
1199 {
1200 bus_space_tag_t st = sc->sc_st;
1201 bus_space_handle_t sh = sc->sc_sh;
1202 u_int16_t reg;
1203 int i, x;
1204
1205 #define EEPROM_WAIT_READY(st, sh) \
1206 while ((bus_space_read_4((st), (sh), EPIC_EECTL) & EECTL_EERDY) == 0) \
1207
1208
1209
1210
1211
1212 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1213 EEPROM_WAIT_READY(st, sh);
1214
1215 for (i = 0; i < wordcnt; i++) {
1216
1217 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE|EECTL_EECS);
1218 EEPROM_WAIT_READY(st, sh);
1219
1220
1221 for (x = 3; x > 0; x--) {
1222 reg = EECTL_ENABLE|EECTL_EECS;
1223 if (EPIC_EEPROM_OPC_READ & (1 << (x - 1)))
1224 reg |= EECTL_EEDI;
1225 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1226 EEPROM_WAIT_READY(st, sh);
1227 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1228 EEPROM_WAIT_READY(st, sh);
1229 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1230 EEPROM_WAIT_READY(st, sh);
1231 }
1232
1233
1234 for (x = 6; x > 0; x--) {
1235 reg = EECTL_ENABLE|EECTL_EECS;
1236 if ((word + i) & (1 << (x - 1)))
1237 reg |= EECTL_EEDI;
1238 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1239 EEPROM_WAIT_READY(st, sh);
1240 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1241 EEPROM_WAIT_READY(st, sh);
1242 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1243 EEPROM_WAIT_READY(st, sh);
1244 }
1245
1246
1247 reg = EECTL_ENABLE|EECTL_EECS;
1248 data[i] = 0;
1249 for (x = 16; x > 0; x--) {
1250 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1251 EEPROM_WAIT_READY(st, sh);
1252 if (bus_space_read_4(st, sh, EPIC_EECTL) & EECTL_EEDO)
1253 data[i] |= (1 << (x - 1));
1254 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1255 EEPROM_WAIT_READY(st, sh);
1256 }
1257
1258
1259 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1260 EEPROM_WAIT_READY(st, sh);
1261 }
1262
1263
1264
1265
1266 bus_space_write_4(st, sh, EPIC_EECTL, 0);
1267
1268 #undef EEPROM_WAIT_READY
1269 }
1270
1271
1272
1273
1274 int
1275 epic_add_rxbuf(struct epic_softc *sc, int idx)
1276 {
1277 struct epic_descsoft *ds = EPIC_DSRX(sc, idx);
1278 struct mbuf *m;
1279 int error;
1280
1281 MGETHDR(m, M_DONTWAIT, MT_DATA);
1282 if (m == NULL)
1283 return (ENOBUFS);
1284
1285 MCLGET(m, M_DONTWAIT);
1286 if ((m->m_flags & M_EXT) == 0) {
1287 m_freem(m);
1288 return (ENOBUFS);
1289 }
1290
1291 if (ds->ds_mbuf != NULL)
1292 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1293
1294 ds->ds_mbuf = m;
1295
1296 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1297 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1298 BUS_DMA_READ|BUS_DMA_NOWAIT);
1299 if (error) {
1300 printf("%s: can't load rx DMA map %d, error = %d\n",
1301 sc->sc_dev.dv_xname, idx, error);
1302 panic("epic_add_rxbuf");
1303 }
1304
1305 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1306 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1307
1308 EPIC_INIT_RXDESC(sc, idx);
1309
1310 return (0);
1311 }
1312
1313
1314
1315
1316
1317
1318 void
1319 epic_set_mchash(struct epic_softc *sc)
1320 {
1321 struct arpcom *ac = &sc->sc_arpcom;
1322 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1323 struct ether_multi *enm;
1324 struct ether_multistep step;
1325 u_int32_t hash, mchash[4];
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336 if (ifp->if_flags & IFF_PROMISC)
1337 goto allmulti;
1338
1339 if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_10_T) {
1340
1341 goto allmulti;
1342 }
1343
1344 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0;
1345
1346 ETHER_FIRST_MULTI(step, ac, enm);
1347 while (enm != NULL) {
1348 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN))
1349 goto allmulti;
1350
1351 hash = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
1352 hash >>= 26;
1353
1354
1355 mchash[hash >> 4] |= 1 << (hash & 0xf);
1356
1357 ETHER_NEXT_MULTI(step, enm);
1358 }
1359
1360 ifp->if_flags &= ~IFF_ALLMULTI;
1361 goto sethash;
1362
1363 allmulti:
1364 ifp->if_flags |= IFF_ALLMULTI;
1365 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0xffff;
1366
1367 sethash:
1368 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC0, mchash[0]);
1369 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC1, mchash[1]);
1370 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC2, mchash[2]);
1371 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC3, mchash[3]);
1372 }
1373
1374
1375
1376
1377 int
1378 epic_mii_wait(struct epic_softc *sc, u_int32_t rw)
1379 {
1380 int i;
1381
1382 for (i = 0; i < 50; i++) {
1383 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL) & rw)
1384 == 0)
1385 break;
1386 delay(2);
1387 }
1388 if (i == 50) {
1389 printf("%s: MII timed out\n", sc->sc_dev.dv_xname);
1390 return (1);
1391 }
1392
1393 return (0);
1394 }
1395
1396
1397
1398
1399 int
1400 epic_mii_read(struct device *self, int phy, int reg)
1401 {
1402 struct epic_softc *sc = (struct epic_softc *)self;
1403
1404 if (epic_mii_wait(sc, MMCTL_WRITE))
1405 return (0);
1406
1407 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL,
1408 MMCTL_ARG(phy, reg, MMCTL_READ));
1409
1410 if (epic_mii_wait(sc, MMCTL_READ))
1411 return (0);
1412
1413 return (bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA) &
1414 MMDATA_MASK);
1415 }
1416
1417
1418
1419
1420 void
1421 epic_mii_write(struct device *self, int phy, int reg, int val)
1422 {
1423 struct epic_softc *sc = (struct epic_softc *)self;
1424
1425 if (epic_mii_wait(sc, MMCTL_WRITE))
1426 return;
1427
1428 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA, val);
1429 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL,
1430 MMCTL_ARG(phy, reg, MMCTL_WRITE));
1431 }
1432
1433
1434
1435
1436 void
1437 epic_statchg(struct device *self)
1438 {
1439 struct epic_softc *sc = (struct epic_softc *)self;
1440 u_int32_t txcon, miicfg;
1441
1442
1443
1444
1445 txcon = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_TXCON);
1446 if (sc->sc_mii.mii_media_active & IFM_FDX)
1447 txcon |= (TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2);
1448 else
1449 txcon &= ~(TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2);
1450 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TXCON, txcon);
1451
1452
1453 if (sc->sc_hwflags & EPIC_DUPLEXLED_ON_694) {
1454 miicfg = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG);
1455 if (IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX)
1456 miicfg |= MIICFG_ENABLE;
1457 else
1458 miicfg &= ~MIICFG_ENABLE;
1459 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG, miicfg);
1460 }
1461
1462
1463
1464
1465
1466 epic_set_mchash(sc);
1467 }
1468
1469
1470
1471
1472 void
1473 epic_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1474 {
1475 struct epic_softc *sc = ifp->if_softc;
1476
1477 mii_pollstat(&sc->sc_mii);
1478 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1479 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1480 }
1481
1482
1483
1484
1485 int
1486 epic_mediachange(struct ifnet *ifp)
1487 {
1488 struct epic_softc *sc = ifp->if_softc;
1489 struct mii_data *mii = &sc->sc_mii;
1490 struct ifmedia *ifm = &mii->mii_media;
1491 int media = ifm->ifm_cur->ifm_media;
1492 u_int32_t miicfg;
1493 struct mii_softc *miisc;
1494 int cfg;
1495
1496 if (!(ifp->if_flags & IFF_UP))
1497 return (0);
1498
1499 if (IFM_INST(media) != sc->sc_serinst) {
1500
1501 #ifdef EPICMEDIADEBUG
1502 printf("%s: parallel mode\n", ifp->if_xname);
1503 #endif
1504 miicfg = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG);
1505 miicfg &= ~MIICFG_SERMODEENA;
1506 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG, miicfg);
1507 }
1508
1509 mii_mediachg(mii);
1510
1511 if (IFM_INST(media) == sc->sc_serinst) {
1512
1513 #ifdef EPICMEDIADEBUG
1514 printf("%s: serial mode\n", ifp->if_xname);
1515 #endif
1516 miicfg = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG);
1517 miicfg |= (MIICFG_SERMODEENA | MIICFG_ENABLE);
1518 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG, miicfg);
1519
1520
1521 mii->mii_media_active = media;
1522 mii->mii_media_status = 0;
1523
1524 epic_statchg(&sc->sc_dev);
1525 return (0);
1526 }
1527
1528
1529 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
1530 miisc = LIST_NEXT(miisc, mii_list)) {
1531 if (IFM_INST(media) == miisc->mii_inst)
1532 break;
1533 }
1534 if (!miisc) {
1535 printf("epic_mediachange: can't happen\n");
1536 return (0);
1537 }
1538 #ifdef EPICMEDIADEBUG
1539 printf("%s: using phy %s\n", ifp->if_xname,
1540 miisc->mii_dev.dv_xname);
1541 #endif
1542
1543 if (miisc->mii_flags & MIIF_HAVEFIBER) {
1544
1545
1546
1547 cfg = PHY_READ(miisc, MII_LXTPHY_CONFIG);
1548 if (IFM_SUBTYPE(media) == IFM_100_FX) {
1549 #ifdef EPICMEDIADEBUG
1550 printf("%s: power up fiber\n", ifp->if_xname);
1551 #endif
1552 cfg |= (CONFIG_LEDC1 | CONFIG_LEDC0);
1553 } else {
1554 #ifdef EPICMEDIADEBUG
1555 printf("%s: power down fiber\n", ifp->if_xname);
1556 #endif
1557 cfg &= ~(CONFIG_LEDC1 | CONFIG_LEDC0);
1558 }
1559 PHY_WRITE(miisc, MII_LXTPHY_CONFIG, cfg);
1560 }
1561
1562 return (0);
1563 }