This source file includes following definitions.
- stge_match
- stge_attach
- stge_shutdown
- stge_dma_wait
- stge_start
- stge_watchdog
- stge_ioctl
- stge_intr
- stge_txintr
- stge_rxintr
- stge_tick
- stge_stats_update
- stge_reset
- stge_init
- stge_rxdrain
- stge_stop
- stge_eeprom_wait
- stge_read_eeprom
- stge_add_rxbuf
- stge_set_filter
- stge_mii_readreg
- stge_mii_writereg
- stge_mii_statchg
- stge_mii_bitbang_read
- stge_mii_bitbang_write
- stge_mediastatus
- stge_mediachange
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45 #include "bpfilter.h"
46 #include "vlan.h"
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/timeout.h>
51 #include <sys/mbuf.h>
52 #include <sys/malloc.h>
53 #include <sys/kernel.h>
54 #include <sys/socket.h>
55 #include <sys/ioctl.h>
56 #include <sys/errno.h>
57 #include <sys/device.h>
58 #include <sys/queue.h>
59
60 #include <net/if.h>
61 #include <net/if_dl.h>
62
63 #ifdef INET
64 #include <netinet/in.h>
65 #include <netinet/in_systm.h>
66 #include <netinet/in_var.h>
67 #include <netinet/ip.h>
68 #include <netinet/if_ether.h>
69 #endif
70
71 #include <net/if_media.h>
72
73 #if NVLAN > 0
74 #include <net/if_types.h>
75 #include <net/if_vlan_var.h>
76 #endif
77
78 #if NBPFILTER > 0
79 #include <net/bpf.h>
80 #endif
81
82 #include <machine/bus.h>
83 #include <machine/intr.h>
84
85 #include <dev/mii/mii.h>
86 #include <dev/mii/miivar.h>
87 #include <dev/mii/mii_bitbang.h>
88
89 #include <dev/pci/pcireg.h>
90 #include <dev/pci/pcivar.h>
91 #include <dev/pci/pcidevs.h>
92
93 #include <dev/pci/if_stgereg.h>
94
95 void stge_start(struct ifnet *);
96 void stge_watchdog(struct ifnet *);
97 int stge_ioctl(struct ifnet *, u_long, caddr_t);
98 int stge_init(struct ifnet *);
99 void stge_stop(struct ifnet *, int);
100
101 void stge_shutdown(void *);
102
103 void stge_reset(struct stge_softc *);
104 void stge_rxdrain(struct stge_softc *);
105 int stge_add_rxbuf(struct stge_softc *, int);
106 void stge_read_eeprom(struct stge_softc *, int, uint16_t *);
107 void stge_tick(void *);
108
109 void stge_stats_update(struct stge_softc *);
110
111 void stge_set_filter(struct stge_softc *);
112
113 int stge_intr(void *);
114 void stge_txintr(struct stge_softc *);
115 void stge_rxintr(struct stge_softc *);
116
117 int stge_mii_readreg(struct device *, int, int);
118 void stge_mii_writereg(struct device *, int, int, int);
119 void stge_mii_statchg(struct device *);
120
121 int stge_mediachange(struct ifnet *);
122 void stge_mediastatus(struct ifnet *, struct ifmediareq *);
123
124 int stge_match(struct device *, void *, void *);
125 void stge_attach(struct device *, struct device *, void *);
126
127 int stge_copy_small = 0;
128
129 struct cfattach stge_ca = {
130 sizeof(struct stge_softc), stge_match, stge_attach,
131 };
132
133 struct cfdriver stge_cd = {
134 0, "stge", DV_IFNET
135 };
136
137 uint32_t stge_mii_bitbang_read(struct device *);
138 void stge_mii_bitbang_write(struct device *, uint32_t);
139
140 const struct mii_bitbang_ops stge_mii_bitbang_ops = {
141 stge_mii_bitbang_read,
142 stge_mii_bitbang_write,
143 {
144 PC_MgmtData,
145 PC_MgmtData,
146 PC_MgmtClk,
147 PC_MgmtDir,
148 0,
149 }
150 };
151
152
153
154
155 const struct pci_matchid stge_devices[] = {
156 { PCI_VENDOR_SUNDANCE, PCI_PRODUCT_SUNDANCE_ST1023 },
157 { PCI_VENDOR_SUNDANCE, PCI_PRODUCT_SUNDANCE_ST2021 },
158 { PCI_VENDOR_TAMARACK, PCI_PRODUCT_TAMARACK_TC9021 },
159 { PCI_VENDOR_TAMARACK, PCI_PRODUCT_TAMARACK_TC9021_ALT },
160
161
162
163
164 { PCI_VENDOR_SUNDANCE, PCI_PRODUCT_SUNDANCE_TC9021 },
165 { PCI_VENDOR_SUNDANCE, PCI_PRODUCT_SUNDANCE_TC9021_ALT },
166 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE550T },
167 { PCI_VENDOR_ANTARES, PCI_PRODUCT_ANTARES_TC9021 }
168 };
169
170 int
171 stge_match(struct device *parent, void *match, void *aux)
172 {
173 return (pci_matchbyid((struct pci_attach_args *)aux, stge_devices,
174 sizeof(stge_devices) / sizeof(stge_devices[0])));
175 }
176
177 void
178 stge_attach(struct device *parent, struct device *self, void *aux)
179 {
180 struct stge_softc *sc = (struct stge_softc *) self;
181 struct pci_attach_args *pa = aux;
182 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
183 pci_chipset_tag_t pc = pa->pa_pc;
184 pci_intr_handle_t ih;
185 const char *intrstr = NULL;
186 bus_space_tag_t iot, memt;
187 bus_space_handle_t ioh, memh;
188 bus_dma_segment_t seg;
189 bus_size_t iosize;
190 int ioh_valid, memh_valid;
191 int i, rseg, error;
192 pcireg_t pmode;
193 int pmreg;
194
195 timeout_set(&sc->sc_timeout, stge_tick, sc);
196
197 sc->sc_rev = PCI_REVISION(pa->pa_class);
198
199
200
201
202 ioh_valid = (pci_mapreg_map(pa, STGE_PCI_IOBA,
203 PCI_MAPREG_TYPE_IO, 0,
204 &iot, &ioh, NULL, &iosize, 0) == 0);
205 memh_valid = (pci_mapreg_map(pa, STGE_PCI_MMBA,
206 PCI_MAPREG_TYPE_MEM|PCI_MAPREG_MEM_TYPE_32BIT, 0,
207 &memt, &memh, NULL, &iosize, 0) == 0);
208
209 if (memh_valid) {
210 sc->sc_st = memt;
211 sc->sc_sh = memh;
212 } else if (ioh_valid) {
213 sc->sc_st = iot;
214 sc->sc_sh = ioh;
215 } else {
216 printf(": unable to map device registers\n");
217 return;
218 }
219
220 sc->sc_dmat = pa->pa_dmat;
221
222
223 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
224 pmode = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) &
225 PCI_PMCSR_STATE_MASK;
226 if (pmode == PCI_PMCSR_STATE_D3) {
227
228
229
230
231 printf(": unable to wake up from power state D3\n");
232 return;
233 }
234 if (pmode != 0) {
235 printf(": waking up from power state D%d\n", pmode);
236 pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR,
237 PCI_PMCSR_STATE_D0);
238 }
239 }
240
241
242
243
244 if (pci_intr_map(pa, &ih)) {
245 printf(": unable to map interrupt\n");
246 goto fail_0;
247 }
248 intrstr = pci_intr_string(pc, ih);
249 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, stge_intr, sc,
250 sc->sc_dev.dv_xname);
251 if (sc->sc_ih == NULL) {
252 printf(": unable to establish interrupt");
253 if (intrstr != NULL)
254 printf(" at %s", intrstr);
255 printf("\n");
256 goto fail_0;
257 }
258 printf(": %s", intrstr);
259
260
261
262
263
264 if ((error = bus_dmamem_alloc(sc->sc_dmat,
265 sizeof(struct stge_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
266 0)) != 0) {
267 printf("%s: unable to allocate control data, error = %d\n",
268 sc->sc_dev.dv_xname, error);
269 goto fail_0;
270 }
271
272 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
273 sizeof(struct stge_control_data), (caddr_t *)&sc->sc_control_data,
274 BUS_DMA_COHERENT)) != 0) {
275 printf("%s: unable to map control data, error = %d\n",
276 sc->sc_dev.dv_xname, error);
277 goto fail_1;
278 }
279
280 if ((error = bus_dmamap_create(sc->sc_dmat,
281 sizeof(struct stge_control_data), 1,
282 sizeof(struct stge_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
283 printf("%s: unable to create control data DMA map, "
284 "error = %d\n", sc->sc_dev.dv_xname, error);
285 goto fail_2;
286 }
287
288 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
289 sc->sc_control_data, sizeof(struct stge_control_data), NULL,
290 0)) != 0) {
291 printf("%s: unable to load control data DMA map, error = %d\n",
292 sc->sc_dev.dv_xname, error);
293 goto fail_3;
294 }
295
296
297
298
299
300
301
302 for (i = 0; i < STGE_NTXDESC; i++) {
303 if ((error = bus_dmamap_create(sc->sc_dmat,
304 STGE_JUMBO_FRAMELEN, STGE_NTXFRAGS, MCLBYTES, 0, 0,
305 &sc->sc_txsoft[i].ds_dmamap)) != 0) {
306 printf("%s: unable to create tx DMA map %d, "
307 "error = %d\n", sc->sc_dev.dv_xname, i, error);
308 goto fail_4;
309 }
310 }
311
312
313
314
315 for (i = 0; i < STGE_NRXDESC; i++) {
316 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
317 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
318 printf("%s: unable to create rx DMA map %d, "
319 "error = %d\n", sc->sc_dev.dv_xname, i, error);
320 goto fail_5;
321 }
322 sc->sc_rxsoft[i].ds_mbuf = NULL;
323 }
324
325
326
327
328
329 if (CSR_READ_4(sc, STGE_AsicCtrl) & AC_PhyMedia)
330 sc->sc_usefiber = 1;
331 else
332 sc->sc_usefiber = 0;
333
334
335
336
337 stge_reset(sc);
338
339
340
341
342
343
344
345
346 if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_SUNDANCE_ST1023) {
347 sc->sc_arpcom.ac_enaddr[0] = CSR_READ_2(sc,
348 STGE_StationAddress0) & 0xff;
349 sc->sc_arpcom.ac_enaddr[1] = CSR_READ_2(sc,
350 STGE_StationAddress0) >> 8;
351 sc->sc_arpcom.ac_enaddr[2] = CSR_READ_2(sc,
352 STGE_StationAddress1) & 0xff;
353 sc->sc_arpcom.ac_enaddr[3] = CSR_READ_2(sc,
354 STGE_StationAddress1) >> 8;
355 sc->sc_arpcom.ac_enaddr[4] = CSR_READ_2(sc,
356 STGE_StationAddress2) & 0xff;
357 sc->sc_arpcom.ac_enaddr[5] = CSR_READ_2(sc,
358 STGE_StationAddress2) >> 8;
359 sc->sc_stge1023 = 0;
360 } else {
361 uint16_t myaddr[ETHER_ADDR_LEN / 2];
362 for (i = 0; i <ETHER_ADDR_LEN / 2; i++) {
363 stge_read_eeprom(sc, STGE_EEPROM_StationAddress0 + i,
364 &myaddr[i]);
365 myaddr[i] = letoh16(myaddr[i]);
366 }
367 (void)memcpy(sc->sc_arpcom.ac_enaddr, myaddr,
368 sizeof(sc->sc_arpcom.ac_enaddr));
369 sc->sc_stge1023 = 1;
370 }
371
372 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
373
374
375
376
377 sc->sc_PhyCtrl = CSR_READ_1(sc, STGE_PhyCtrl) &
378 (PC_PhyDuplexPolarity | PC_PhyLnkPolarity);
379
380
381
382
383 sc->sc_mii.mii_ifp = ifp;
384 sc->sc_mii.mii_readreg = stge_mii_readreg;
385 sc->sc_mii.mii_writereg = stge_mii_writereg;
386 sc->sc_mii.mii_statchg = stge_mii_statchg;
387 ifmedia_init(&sc->sc_mii.mii_media, 0, stge_mediachange,
388 stge_mediastatus);
389 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
390 MII_OFFSET_ANY, MIIF_DOPAUSE);
391 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
392 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
393 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
394 } else
395 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
396
397 ifp = &sc->sc_arpcom.ac_if;
398 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof ifp->if_xname);
399 ifp->if_softc = sc;
400 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
401 ifp->if_ioctl = stge_ioctl;
402 ifp->if_start = stge_start;
403 ifp->if_watchdog = stge_watchdog;
404 #ifdef STGE_JUMBO
405 ifp->if_hardmtu = STGE_JUMBO_MTU;
406 #endif
407 IFQ_SET_MAXLEN(&ifp->if_snd, STGE_NTXDESC - 1);
408 IFQ_SET_READY(&ifp->if_snd);
409
410 ifp->if_capabilities = IFCAP_VLAN_MTU;
411
412
413
414
415
416
417
418 sc->sc_txthresh = 0x0fff;
419
420
421
422
423 sc->sc_DMACtrl = 0;
424 #ifdef fake
425 if ((pa->pa_flags & PCI_FLAGS_MWI_OKAY) == 0)
426 sc->sc_DMACtrl |= DMAC_MWIDisable;
427 #endif
428
429 #ifdef STGE_CHECKSUM
430
431
432
433 sc->sc_arpcom.ac_if.if_capabilities |= IFCAP_CSUM_IPv4 |
434 IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
435 #endif
436
437
438
439
440 if_attach(ifp);
441 ether_ifattach(ifp);
442
443
444
445
446 sc->sc_sdhook = shutdownhook_establish(stge_shutdown, sc);
447 if (sc->sc_sdhook == NULL)
448 printf("%s: WARNING: unable to establish shutdown hook\n",
449 sc->sc_dev.dv_xname);
450 return;
451
452
453
454
455
456 fail_5:
457 for (i = 0; i < STGE_NRXDESC; i++) {
458 if (sc->sc_rxsoft[i].ds_dmamap != NULL)
459 bus_dmamap_destroy(sc->sc_dmat,
460 sc->sc_rxsoft[i].ds_dmamap);
461 }
462 fail_4:
463 for (i = 0; i < STGE_NTXDESC; i++) {
464 if (sc->sc_txsoft[i].ds_dmamap != NULL)
465 bus_dmamap_destroy(sc->sc_dmat,
466 sc->sc_txsoft[i].ds_dmamap);
467 }
468 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
469 fail_3:
470 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
471 fail_2:
472 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
473 sizeof(struct stge_control_data));
474 fail_1:
475 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
476 fail_0:
477 bus_space_unmap(sc->sc_st, sc->sc_sh, iosize);
478 return;
479 }
480
481
482
483
484
485
486 void
487 stge_shutdown(void *arg)
488 {
489 struct stge_softc *sc = arg;
490
491 stge_stop(&sc->sc_arpcom.ac_if, 1);
492 }
493
494 static void
495 stge_dma_wait(struct stge_softc *sc)
496 {
497 int i;
498
499 for (i = 0; i < STGE_TIMEOUT; i++) {
500 delay(2);
501 if ((CSR_READ_4(sc, STGE_DMACtrl) & DMAC_TxDMAInProg) == 0)
502 break;
503 }
504
505 if (i == STGE_TIMEOUT)
506 printf("%s: DMA wait timed out\n", sc->sc_dev.dv_xname);
507 }
508
509
510
511
512
513
514 void
515 stge_start(struct ifnet *ifp)
516 {
517 struct stge_softc *sc = ifp->if_softc;
518 struct mbuf *m0;
519 struct stge_descsoft *ds;
520 struct stge_tfd *tfd;
521 bus_dmamap_t dmamap;
522 int error, firsttx, nexttx, opending, seg, totlen;
523 uint64_t csum_flags = 0;
524
525 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
526 return;
527
528
529
530
531
532 opending = sc->sc_txpending;
533 firsttx = STGE_NEXTTX(sc->sc_txlast);
534
535
536
537
538
539
540 for (;;) {
541
542
543
544 IFQ_POLL(&ifp->if_snd, m0);
545 if (m0 == NULL)
546 break;
547
548
549
550
551
552 if (sc->sc_txpending == (STGE_NTXDESC - 1))
553 break;
554
555
556
557
558 nexttx = STGE_NEXTTX(sc->sc_txlast);
559 tfd = &sc->sc_txdescs[nexttx];
560 ds = &sc->sc_txsoft[nexttx];
561
562 dmamap = ds->ds_dmamap;
563
564
565
566
567
568
569
570
571
572 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
573 BUS_DMA_NOWAIT);
574 if (error) {
575 if (error == EFBIG) {
576 printf("%s: Tx packet consumes too many "
577 "DMA segments (%u), dropping...\n",
578 sc->sc_dev.dv_xname, dmamap->dm_nsegs);
579 IFQ_DEQUEUE(&ifp->if_snd, m0);
580 m_freem(m0);
581 continue;
582 }
583
584
585
586 break;
587 }
588
589 IFQ_DEQUEUE(&ifp->if_snd, m0);
590
591
592
593
594
595
596 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
597 BUS_DMASYNC_PREWRITE);
598
599
600 for (totlen = 0, seg = 0; seg < dmamap->dm_nsegs; seg++) {
601 tfd->tfd_frags[seg].frag_word0 =
602 htole64(FRAG_ADDR(dmamap->dm_segs[seg].ds_addr) |
603 FRAG_LEN(dmamap->dm_segs[seg].ds_len));
604 totlen += dmamap->dm_segs[seg].ds_len;
605 }
606
607 #ifdef STGE_CHECKSUM
608
609
610
611
612 if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
613 csum_flags |= TFD_IPChecksumEnable;
614
615 if (m0->m_pkthdr.csum_flags & M_TCPV4_CSUM_OUT)
616 csum_flags |= TFD_TCPChecksumEnable;
617 else if (m0->m_pkthdr.csum_flags & M_UDPV4_CSUM_OUT)
618 csum_flags |= TFD_UDPChecksumEnable;
619 #endif
620
621
622
623
624 tfd->tfd_control = htole64(TFD_FrameId(nexttx) |
625 TFD_WordAlign(3) |
626 TFD_FragCount(seg) | csum_flags |
627 (((nexttx & STGE_TXINTR_SPACING_MASK) == 0) ?
628 TFD_TxDMAIndicate : 0));
629
630
631 STGE_CDTXSYNC(sc, nexttx,
632 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
633
634
635
636
637 CSR_WRITE_4(sc, STGE_DMACtrl,
638 sc->sc_DMACtrl | DMAC_TxDMAPollNow);
639
640
641
642
643 ds->ds_mbuf = m0;
644
645
646 sc->sc_txpending++;
647 sc->sc_txlast = nexttx;
648
649 #if NBPFILTER > 0
650
651
652
653 if (ifp->if_bpf)
654 bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
655 #endif
656 }
657
658 if (sc->sc_txpending == (STGE_NTXDESC - 1)) {
659
660 ifp->if_flags |= IFF_OACTIVE;
661 }
662
663 if (sc->sc_txpending != opending) {
664
665
666
667
668 if (opending == 0)
669 sc->sc_txdirty = firsttx;
670
671
672 ifp->if_timer = 5;
673 }
674 }
675
676
677
678
679
680
681 void
682 stge_watchdog(struct ifnet *ifp)
683 {
684 struct stge_softc *sc = ifp->if_softc;
685
686
687
688
689 stge_txintr(sc);
690 if (sc->sc_txpending != 0) {
691 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
692 ifp->if_oerrors++;
693
694 (void) stge_init(ifp);
695
696
697 stge_start(ifp);
698 }
699 }
700
701
702
703
704
705
706 int
707 stge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
708 {
709 struct stge_softc *sc = ifp->if_softc;
710 struct ifreq *ifr = (struct ifreq *)data;
711 struct ifaddr *ifa = (struct ifaddr *)data;
712 int s, error;
713
714 s = splnet();
715
716 if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) {
717
718 stge_start(ifp);
719
720 splx(s);
721 return (error);
722 }
723
724 switch (cmd) {
725 case SIOCSIFADDR:
726 ifp->if_flags |= IFF_UP;
727 if (!(ifp->if_flags & IFF_RUNNING))
728 stge_init(ifp);
729
730 #ifdef INET
731 if (ifa->ifa_addr->sa_family == AF_INET)
732 arp_ifinit(&sc->sc_arpcom, ifa);
733 #endif
734 break;
735
736 case SIOCSIFMTU:
737 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ifp->if_hardmtu)
738 error = EINVAL;
739 else if (ifp->if_mtu != ifr->ifr_mtu)
740 ifp->if_mtu = ifr->ifr_mtu;
741 break;
742
743 case SIOCSIFFLAGS:
744 if (ifp->if_flags & IFF_UP) {
745 if (ifp->if_flags & IFF_RUNNING &&
746 (ifp->if_flags ^ sc->stge_if_flags) &
747 IFF_PROMISC) {
748 stge_set_filter(sc);
749 } else {
750 if (!(ifp->if_flags & IFF_RUNNING))
751 stge_init(ifp);
752 }
753 } else {
754 if (ifp->if_flags & IFF_RUNNING)
755 stge_stop(ifp, 1);
756 }
757 sc->stge_if_flags = ifp->if_flags;
758 break;
759
760 case SIOCADDMULTI:
761 case SIOCDELMULTI:
762 error = (cmd == SIOCADDMULTI) ?
763 ether_addmulti(ifr, &sc->sc_arpcom) :
764 ether_delmulti(ifr, &sc->sc_arpcom);
765
766 if (error == ENETRESET) {
767
768
769
770
771 if (ifp->if_flags & IFF_RUNNING)
772 stge_set_filter(sc);
773 error = 0;
774 }
775 break;
776
777 case SIOCSIFMEDIA:
778 case SIOCGIFMEDIA:
779 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
780 break;
781
782 default:
783 error = ENOTTY;
784 }
785
786
787 stge_start(ifp);
788
789 splx(s);
790 return (error);
791 }
792
793
794
795
796
797
798 int
799 stge_intr(void *arg)
800 {
801 struct stge_softc *sc = arg;
802 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
803 uint32_t txstat;
804 int wantinit;
805 uint16_t isr;
806
807 if ((CSR_READ_2(sc, STGE_IntStatus) & IS_InterruptStatus) == 0)
808 return (0);
809
810 for (wantinit = 0; wantinit == 0;) {
811 isr = CSR_READ_2(sc, STGE_IntStatusAck);
812 if ((isr & sc->sc_IntEnable) == 0)
813 break;
814
815
816 if (isr & IS_HostError) {
817 printf("%s: Host interface error\n",
818 sc->sc_dev.dv_xname);
819 wantinit = 1;
820 continue;
821 }
822
823
824 if (isr & (IS_RxDMAComplete|IS_RFDListEnd)) {
825 stge_rxintr(sc);
826 if (isr & IS_RFDListEnd) {
827 printf("%s: receive ring overflow\n",
828 sc->sc_dev.dv_xname);
829
830
831
832
833 wantinit = 1;
834 }
835 }
836
837
838 if (isr & (IS_TxDMAComplete|IS_TxComplete))
839 stge_txintr(sc);
840
841
842 if (isr & IS_UpdateStats)
843 stge_stats_update(sc);
844
845
846 if (isr & IS_TxComplete) {
847 for (;;) {
848 txstat = CSR_READ_4(sc, STGE_TxStatus);
849 if ((txstat & TS_TxComplete) == 0)
850 break;
851 if (txstat & TS_TxUnderrun) {
852 sc->sc_txthresh++;
853 if (sc->sc_txthresh > 0x0fff)
854 sc->sc_txthresh = 0x0fff;
855 printf("%s: transmit underrun, new "
856 "threshold: %d bytes\n",
857 sc->sc_dev.dv_xname,
858 sc->sc_txthresh << 5);
859 }
860 if (txstat & TS_MaxCollisions)
861 printf("%s: excessive collisions\n",
862 sc->sc_dev.dv_xname);
863 }
864 wantinit = 1;
865 }
866
867 }
868
869 if (wantinit)
870 stge_init(ifp);
871
872 CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
873
874
875 stge_start(ifp);
876
877 return (1);
878 }
879
880
881
882
883
884
885 void
886 stge_txintr(struct stge_softc *sc)
887 {
888 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
889 struct stge_descsoft *ds;
890 uint64_t control;
891 int i;
892
893 ifp->if_flags &= ~IFF_OACTIVE;
894
895
896
897
898
899 for (i = sc->sc_txdirty; sc->sc_txpending != 0;
900 i = STGE_NEXTTX(i), sc->sc_txpending--) {
901 ds = &sc->sc_txsoft[i];
902
903 STGE_CDTXSYNC(sc, i,
904 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
905
906 control = letoh64(sc->sc_txdescs[i].tfd_control);
907 if ((control & TFD_TFDDone) == 0)
908 break;
909
910 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
911 0, ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
912 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
913 m_freem(ds->ds_mbuf);
914 ds->ds_mbuf = NULL;
915 }
916
917
918 sc->sc_txdirty = i;
919
920
921
922
923
924 if (sc->sc_txpending == 0)
925 ifp->if_timer = 0;
926 }
927
928
929
930
931
932
933 void
934 stge_rxintr(struct stge_softc *sc)
935 {
936 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
937 struct stge_descsoft *ds;
938 struct mbuf *m, *tailm;
939 uint64_t status;
940 int i, len;
941
942 for (i = sc->sc_rxptr;; i = STGE_NEXTRX(i)) {
943 ds = &sc->sc_rxsoft[i];
944
945 STGE_CDRXSYNC(sc, i,
946 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
947
948 status = letoh64(sc->sc_rxdescs[i].rfd_status);
949
950 if ((status & RFD_RFDDone) == 0)
951 break;
952
953 if (__predict_false(sc->sc_rxdiscard)) {
954 STGE_INIT_RXDESC(sc, i);
955 if (status & RFD_FrameEnd) {
956
957 sc->sc_rxdiscard = 0;
958 }
959 continue;
960 }
961
962 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
963 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
964
965 m = ds->ds_mbuf;
966
967
968
969
970 if (stge_add_rxbuf(sc, i) != 0) {
971
972
973
974
975 ifp->if_ierrors++;
976 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
977 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
978 STGE_INIT_RXDESC(sc, i);
979 if ((status & RFD_FrameEnd) == 0)
980 sc->sc_rxdiscard = 1;
981 if (sc->sc_rxhead != NULL)
982 m_freem(sc->sc_rxhead);
983 STGE_RXCHAIN_RESET(sc);
984 continue;
985 }
986
987 #ifdef DIAGNOSTIC
988 if (status & RFD_FrameStart) {
989 KASSERT(sc->sc_rxhead == NULL);
990 KASSERT(sc->sc_rxtailp == &sc->sc_rxhead);
991 }
992 #endif
993
994 STGE_RXCHAIN_LINK(sc, m);
995
996
997
998
999
1000 if ((status & RFD_FrameEnd) == 0) {
1001 sc->sc_rxlen += m->m_len;
1002 continue;
1003 }
1004
1005
1006
1007
1008 *sc->sc_rxtailp = NULL;
1009 m = sc->sc_rxhead;
1010 tailm = sc->sc_rxtail;
1011
1012 STGE_RXCHAIN_RESET(sc);
1013
1014
1015
1016
1017
1018 if (status & (RFD_RxFIFOOverrun | RFD_RxRuntFrame |
1019 RFD_RxAlignmentError | RFD_RxFCSError |
1020 RFD_RxLengthError)) {
1021 m_freem(m);
1022 continue;
1023 }
1024
1025
1026
1027
1028
1029
1030
1031 len = RFD_RxDMAFrameLen(status);
1032 tailm->m_len = len - sc->sc_rxlen;
1033
1034
1035
1036
1037
1038
1039
1040
1041 if (stge_copy_small != 0 && len <= (MHLEN - 2)) {
1042 struct mbuf *nm;
1043 MGETHDR(nm, M_DONTWAIT, MT_DATA);
1044 if (nm == NULL) {
1045 ifp->if_ierrors++;
1046 m_freem(m);
1047 continue;
1048 }
1049 nm->m_data += 2;
1050 nm->m_pkthdr.len = nm->m_len = len;
1051 m_copydata(m, 0, len, mtod(nm, caddr_t));
1052 m_freem(m);
1053 m = nm;
1054 }
1055
1056
1057
1058
1059 if (status & RFD_IPDetected) {
1060 if (!(status & RFD_IPError))
1061 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1062 if ((status & RFD_TCPDetected) &&
1063 (!(status & RFD_TCPError)))
1064 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
1065 else if ((status & RFD_UDPDetected) &&
1066 (!(status & RFD_UDPError)))
1067 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK;
1068 }
1069
1070 m->m_pkthdr.rcvif = ifp;
1071 m->m_pkthdr.len = len;
1072
1073 #if NBPFILTER > 0
1074
1075
1076
1077
1078 if (ifp->if_bpf)
1079 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
1080 #endif
1081
1082
1083 ether_input_mbuf(ifp, m);
1084 }
1085
1086
1087 sc->sc_rxptr = i;
1088 }
1089
1090
1091
1092
1093
1094
1095 void
1096 stge_tick(void *arg)
1097 {
1098 struct stge_softc *sc = arg;
1099 int s;
1100
1101 s = splnet();
1102 mii_tick(&sc->sc_mii);
1103 stge_stats_update(sc);
1104 splx(s);
1105
1106 timeout_add(&sc->sc_timeout, hz);
1107 }
1108
1109
1110
1111
1112
1113
1114 void
1115 stge_stats_update(struct stge_softc *sc)
1116 {
1117 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1118
1119 (void) CSR_READ_4(sc, STGE_OctetRcvOk);
1120
1121 ifp->if_ipackets +=
1122 CSR_READ_4(sc, STGE_FramesRcvdOk);
1123
1124 ifp->if_ierrors +=
1125 (u_int) CSR_READ_2(sc, STGE_FramesLostRxErrors);
1126
1127 (void) CSR_READ_4(sc, STGE_OctetXmtdOk);
1128
1129 ifp->if_opackets +=
1130 CSR_READ_4(sc, STGE_FramesXmtdOk);
1131
1132 ifp->if_collisions +=
1133 CSR_READ_4(sc, STGE_LateCollisions) +
1134 CSR_READ_4(sc, STGE_MultiColFrames) +
1135 CSR_READ_4(sc, STGE_SingleColFrames);
1136
1137 ifp->if_oerrors +=
1138 (u_int) CSR_READ_2(sc, STGE_FramesAbortXSColls) +
1139 (u_int) CSR_READ_2(sc, STGE_FramesWEXDeferal);
1140 }
1141
1142
1143
1144
1145
1146
1147 void
1148 stge_reset(struct stge_softc *sc)
1149 {
1150 uint32_t ac;
1151 int i;
1152
1153 ac = CSR_READ_4(sc, STGE_AsicCtrl);
1154
1155
1156
1157
1158
1159
1160 CSR_WRITE_4(sc, STGE_AsicCtrl,
1161 ac | AC_GlobalReset | AC_RxReset | AC_TxReset |
1162 AC_DMA | AC_FIFO | AC_Network | AC_Host | AC_AutoInit |
1163 (sc->sc_usefiber ? AC_RstOut : 0));
1164
1165 delay(50000);
1166
1167 for (i = 0; i < STGE_TIMEOUT; i++) {
1168 delay(5000);
1169 if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
1170 break;
1171 }
1172
1173 if (i == STGE_TIMEOUT)
1174 printf("%s: reset failed to complete\n", sc->sc_dev.dv_xname);
1175
1176 delay(1000);
1177 }
1178
1179
1180
1181
1182
1183
1184 int
1185 stge_init(struct ifnet *ifp)
1186 {
1187 struct stge_softc *sc = ifp->if_softc;
1188 struct stge_descsoft *ds;
1189 int i, error = 0;
1190
1191
1192
1193
1194 stge_stop(ifp, 0);
1195
1196
1197
1198
1199 stge_reset(sc);
1200
1201
1202
1203
1204 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1205 for (i = 0; i < STGE_NTXDESC; i++) {
1206 sc->sc_txdescs[i].tfd_next = htole64(
1207 STGE_CDTXADDR(sc, STGE_NEXTTX(i)));
1208 sc->sc_txdescs[i].tfd_control = htole64(TFD_TFDDone);
1209 }
1210 sc->sc_txpending = 0;
1211 sc->sc_txdirty = 0;
1212 sc->sc_txlast = STGE_NTXDESC - 1;
1213
1214
1215
1216
1217
1218 for (i = 0; i < STGE_NRXDESC; i++) {
1219 ds = &sc->sc_rxsoft[i];
1220 if (ds->ds_mbuf == NULL) {
1221 if ((error = stge_add_rxbuf(sc, i)) != 0) {
1222 printf("%s: unable to allocate or map rx "
1223 "buffer %d, error = %d\n",
1224 sc->sc_dev.dv_xname, i, error);
1225
1226
1227
1228
1229 stge_rxdrain(sc);
1230 goto out;
1231 }
1232 } else
1233 STGE_INIT_RXDESC(sc, i);
1234 }
1235 sc->sc_rxptr = 0;
1236 sc->sc_rxdiscard = 0;
1237 STGE_RXCHAIN_RESET(sc);
1238
1239
1240 for (i = 0; i < 6; i++)
1241 CSR_WRITE_1(sc, STGE_StationAddress0 + i,
1242 sc->sc_arpcom.ac_enaddr[i]);
1243
1244
1245
1246
1247
1248 CSR_WRITE_4(sc, STGE_RMONStatisticsMask, 0xffffffff);
1249 CSR_WRITE_4(sc, STGE_StatisticsMask,
1250 (1U << 1) | (1U << 2) | (1U << 3) | (1U << 4) | (1U << 5) |
1251 (1U << 6) | (1U << 7) | (1U << 8) | (1U << 9) | (1U << 10) |
1252 (1U << 13) | (1U << 14) | (1U << 15) | (1U << 19) | (1U << 20) |
1253 (1U << 21));
1254
1255
1256 stge_set_filter(sc);
1257
1258
1259
1260
1261 CSR_WRITE_4(sc, STGE_TFDListPtrHi, 0);
1262 CSR_WRITE_4(sc, STGE_TFDListPtrLo,
1263 STGE_CDTXADDR(sc, sc->sc_txdirty));
1264
1265 CSR_WRITE_4(sc, STGE_RFDListPtrHi, 0);
1266 CSR_WRITE_4(sc, STGE_RFDListPtrLo,
1267 STGE_CDRXADDR(sc, sc->sc_rxptr));
1268
1269
1270
1271
1272
1273
1274 CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
1275
1276
1277 CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 64);
1278
1279
1280 CSR_WRITE_2(sc, STGE_TxStartThresh, sc->sc_txthresh);
1281
1282
1283 CSR_WRITE_1(sc, STGE_RxDMABurstThresh, 0x30);
1284 CSR_WRITE_1(sc, STGE_RxDMAUrgentThresh, 0x30);
1285
1286
1287 CSR_WRITE_2(sc, STGE_RxEarlyThresh, 0x7ff);
1288
1289
1290 CSR_WRITE_1(sc, STGE_TxDMABurstThresh, 0x30);
1291 CSR_WRITE_1(sc, STGE_TxDMAUrgentThresh, 0x04);
1292
1293
1294
1295
1296
1297
1298
1299
1300 CSR_WRITE_4(sc, STGE_RxDMAIntCtrl,
1301 RDIC_RxFrameCount(8) | RDIC_RxDMAWaitTime(512));
1302
1303
1304
1305
1306 sc->sc_IntEnable = IS_HostError | IS_TxComplete | IS_UpdateStats |
1307 IS_TxDMAComplete | IS_RxDMAComplete | IS_RFDListEnd;
1308 CSR_WRITE_2(sc, STGE_IntStatus, 0xffff);
1309 CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
1310
1311
1312
1313
1314
1315 CSR_WRITE_4(sc, STGE_DMACtrl, sc->sc_DMACtrl |
1316 DMAC_TxBurstLimit(3));
1317
1318
1319
1320
1321
1322
1323 CSR_WRITE_2(sc, STGE_FlowOnTresh, 29696 / 16);
1324 CSR_WRITE_2(sc, STGE_FlowOffThresh, 0);
1325
1326
1327
1328
1329 #ifdef STGE_JUMBO
1330 CSR_WRITE_2(sc, STGE_MaxFrameSize, STGE_JUMBO_FRAMELEN);
1331 #else
1332 CSR_WRITE_2(sc, STGE_MaxFrameSize, ETHER_MAX_LEN);
1333 #endif
1334
1335
1336
1337
1338
1339
1340
1341
1342 sc->sc_MACCtrl = MC_IFSSelect(0);
1343 CSR_WRITE_4(sc, STGE_MACCtrl, sc->sc_MACCtrl);
1344 sc->sc_MACCtrl |= MC_StatisticsEnable | MC_TxEnable | MC_RxEnable;
1345
1346 if (sc->sc_rev >= 6) {
1347
1348 CSR_WRITE_2(sc, STGE_DebugCtrl,
1349 CSR_READ_2(sc, STGE_DebugCtrl) | 0x0200);
1350
1351
1352 CSR_WRITE_2(sc, STGE_DebugCtrl,
1353 CSR_READ_2(sc, STGE_DebugCtrl) | 0x0010);
1354
1355 CSR_WRITE_2(sc, STGE_DebugCtrl,
1356 CSR_READ_2(sc, STGE_DebugCtrl) | 0x0020);
1357 }
1358
1359
1360
1361
1362 mii_mediachg(&sc->sc_mii);
1363
1364
1365
1366
1367 timeout_add(&sc->sc_timeout, hz);
1368
1369
1370
1371
1372 ifp->if_flags |= IFF_RUNNING;
1373 ifp->if_flags &= ~IFF_OACTIVE;
1374
1375 out:
1376 if (error)
1377 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
1378 return (error);
1379 }
1380
1381
1382
1383
1384
1385
1386 void
1387 stge_rxdrain(struct stge_softc *sc)
1388 {
1389 struct stge_descsoft *ds;
1390 int i;
1391
1392 for (i = 0; i < STGE_NRXDESC; i++) {
1393 ds = &sc->sc_rxsoft[i];
1394 if (ds->ds_mbuf != NULL) {
1395 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1396 ds->ds_mbuf->m_next = NULL;
1397 m_freem(ds->ds_mbuf);
1398 ds->ds_mbuf = NULL;
1399 }
1400 }
1401 }
1402
1403
1404
1405
1406
1407
1408 void
1409 stge_stop(struct ifnet *ifp, int disable)
1410 {
1411 struct stge_softc *sc = ifp->if_softc;
1412 struct stge_descsoft *ds;
1413 int i;
1414
1415
1416
1417
1418 timeout_del(&sc->sc_timeout);
1419
1420
1421
1422
1423 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1424 ifp->if_timer = 0;
1425
1426
1427 mii_down(&sc->sc_mii);
1428
1429
1430
1431
1432 CSR_WRITE_2(sc, STGE_IntEnable, 0);
1433
1434
1435
1436
1437 CSR_WRITE_4(sc, STGE_MACCtrl,
1438 MC_StatisticsDisable | MC_TxDisable | MC_RxDisable);
1439
1440
1441
1442
1443 stge_dma_wait(sc);
1444 CSR_WRITE_4(sc, STGE_TFDListPtrHi, 0);
1445 CSR_WRITE_4(sc, STGE_TFDListPtrLo, 0);
1446 CSR_WRITE_4(sc, STGE_RFDListPtrHi, 0);
1447 CSR_WRITE_4(sc, STGE_RFDListPtrLo, 0);
1448
1449
1450
1451
1452 for (i = 0; i < STGE_NTXDESC; i++) {
1453 ds = &sc->sc_txsoft[i];
1454 if (ds->ds_mbuf != NULL) {
1455 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1456 m_freem(ds->ds_mbuf);
1457 ds->ds_mbuf = NULL;
1458 }
1459 }
1460
1461 if (disable)
1462 stge_rxdrain(sc);
1463 }
1464
1465 static int
1466 stge_eeprom_wait(struct stge_softc *sc)
1467 {
1468 int i;
1469
1470 for (i = 0; i < STGE_TIMEOUT; i++) {
1471 delay(1000);
1472 if ((CSR_READ_2(sc, STGE_EepromCtrl) & EC_EepromBusy) == 0)
1473 return (0);
1474 }
1475 return (1);
1476 }
1477
1478
1479
1480
1481
1482
1483 void
1484 stge_read_eeprom(struct stge_softc *sc, int offset, uint16_t *data)
1485 {
1486
1487 if (stge_eeprom_wait(sc))
1488 printf("%s: EEPROM failed to come ready\n",
1489 sc->sc_dev.dv_xname);
1490
1491 CSR_WRITE_2(sc, STGE_EepromCtrl,
1492 EC_EepromAddress(offset) | EC_EepromOpcode(EC_OP_RR));
1493 if (stge_eeprom_wait(sc))
1494 printf("%s: EEPROM read timed out\n",
1495 sc->sc_dev.dv_xname);
1496 *data = CSR_READ_2(sc, STGE_EepromData);
1497 }
1498
1499
1500
1501
1502
1503
1504 int
1505 stge_add_rxbuf(struct stge_softc *sc, int idx)
1506 {
1507 struct stge_descsoft *ds = &sc->sc_rxsoft[idx];
1508 struct mbuf *m;
1509 int error;
1510
1511 MGETHDR(m, M_DONTWAIT, MT_DATA);
1512 if (m == NULL)
1513 return (ENOBUFS);
1514
1515 MCLGET(m, M_DONTWAIT);
1516 if ((m->m_flags & M_EXT) == 0) {
1517 m_freem(m);
1518 return (ENOBUFS);
1519 }
1520
1521 m->m_data = m->m_ext.ext_buf + 2;
1522 m->m_len = MCLBYTES - 2;
1523
1524 if (ds->ds_mbuf != NULL)
1525 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1526
1527 ds->ds_mbuf = m;
1528
1529 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1530 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1531 if (error) {
1532 printf("%s: can't load rx DMA map %d, error = %d\n",
1533 sc->sc_dev.dv_xname, idx, error);
1534 panic("stge_add_rxbuf");
1535 }
1536
1537 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1538 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1539
1540 STGE_INIT_RXDESC(sc, idx);
1541
1542 return (0);
1543 }
1544
1545
1546
1547
1548
1549
1550 void
1551 stge_set_filter(struct stge_softc *sc)
1552 {
1553 struct arpcom *ac = &sc->sc_arpcom;
1554 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1555 struct ether_multi *enm;
1556 struct ether_multistep step;
1557 uint32_t crc;
1558 uint32_t mchash[2];
1559
1560 sc->sc_ReceiveMode = RM_ReceiveUnicast;
1561 if (ifp->if_flags & IFF_BROADCAST)
1562 sc->sc_ReceiveMode |= RM_ReceiveBroadcast;
1563
1564
1565 if (sc->sc_stge1023)
1566 ifp->if_flags |= IFF_PROMISC;
1567
1568 if (ifp->if_flags & IFF_PROMISC) {
1569 sc->sc_ReceiveMode |= RM_ReceiveAllFrames;
1570 goto allmulti;
1571 }
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581 memset(mchash, 0, sizeof(mchash));
1582
1583 ETHER_FIRST_MULTI(step, ac, enm);
1584 if (enm == NULL)
1585 goto done;
1586
1587 while (enm != NULL) {
1588 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1589
1590
1591
1592
1593
1594
1595
1596
1597 goto allmulti;
1598 }
1599
1600 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
1601
1602
1603 crc &= 0x3f;
1604
1605
1606 mchash[crc >> 5] |= 1 << (crc & 0x1f);
1607
1608 ETHER_NEXT_MULTI(step, enm);
1609 }
1610
1611 sc->sc_ReceiveMode |= RM_ReceiveMulticastHash;
1612
1613 ifp->if_flags &= ~IFF_ALLMULTI;
1614 goto done;
1615
1616 allmulti:
1617 ifp->if_flags |= IFF_ALLMULTI;
1618 sc->sc_ReceiveMode |= RM_ReceiveMulticast;
1619
1620 done:
1621 if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
1622
1623
1624
1625 CSR_WRITE_4(sc, STGE_HashTable0, mchash[0]);
1626 CSR_WRITE_4(sc, STGE_HashTable1, mchash[1]);
1627 }
1628
1629 CSR_WRITE_2(sc, STGE_ReceiveMode, sc->sc_ReceiveMode);
1630 }
1631
1632
1633
1634
1635
1636
1637 int
1638 stge_mii_readreg(struct device *self, int phy, int reg)
1639 {
1640
1641 return (mii_bitbang_readreg(self, &stge_mii_bitbang_ops, phy, reg));
1642 }
1643
1644
1645
1646
1647
1648
1649 void
1650 stge_mii_writereg(struct device *self, int phy, int reg, int val)
1651 {
1652
1653 mii_bitbang_writereg(self, &stge_mii_bitbang_ops, phy, reg, val);
1654 }
1655
1656
1657
1658
1659
1660
1661 void
1662 stge_mii_statchg(struct device *self)
1663 {
1664 struct stge_softc *sc = (struct stge_softc *) self;
1665
1666 if (sc->sc_mii.mii_media_active & IFM_FDX)
1667 sc->sc_MACCtrl |= MC_DuplexSelect;
1668 else
1669 sc->sc_MACCtrl &= ~MC_DuplexSelect;
1670
1671
1672
1673 CSR_WRITE_4(sc, STGE_MACCtrl, sc->sc_MACCtrl);
1674 }
1675
1676
1677
1678
1679
1680
1681 uint32_t
1682 stge_mii_bitbang_read(struct device *self)
1683 {
1684 struct stge_softc *sc = (void *) self;
1685
1686 return (CSR_READ_1(sc, STGE_PhyCtrl));
1687 }
1688
1689
1690
1691
1692
1693
1694 void
1695 stge_mii_bitbang_write(struct device *self, uint32_t val)
1696 {
1697 struct stge_softc *sc = (void *) self;
1698
1699 CSR_WRITE_1(sc, STGE_PhyCtrl, val | sc->sc_PhyCtrl);
1700 }
1701
1702
1703
1704
1705
1706
1707 void
1708 stge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1709 {
1710 struct stge_softc *sc = ifp->if_softc;
1711
1712 mii_pollstat(&sc->sc_mii);
1713 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1714 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1715 }
1716
1717
1718
1719
1720
1721
1722 int
1723 stge_mediachange(struct ifnet *ifp)
1724 {
1725 struct stge_softc *sc = ifp->if_softc;
1726
1727 if (ifp->if_flags & IFF_UP)
1728 mii_mediachg(&sc->sc_mii);
1729 return (0);
1730 }