This source file includes following definitions.
- bce_probe
- bce_attach
- bce_ioctl
- bce_start
- bce_watchdog
- bce_intr
- bce_rxintr
- bce_txintr
- bce_init
- bce_add_mac
- bce_add_rxbuf
- bce_rxdrain
- bce_stop
- bce_reset
- bce_set_filter
- bce_mii_read
- bce_mii_write
- bce_statchg
- bce_mediachange
- bce_mediastatus
- bce_tick
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38 #include "bpfilter.h"
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/timeout.h>
43 #include <sys/sockio.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/device.h>
48 #include <sys/socket.h>
49
50 #include <net/if.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53
54 #ifdef INET
55 #include <netinet/in.h>
56 #include <netinet/in_systm.h>
57 #include <netinet/in_var.h>
58 #include <netinet/ip.h>
59 #include <netinet/if_ether.h>
60 #endif
61 #if NBPFILTER > 0
62 #include <net/bpf.h>
63 #endif
64
65 #include <dev/pci/pcireg.h>
66 #include <dev/pci/pcivar.h>
67 #include <dev/pci/pcidevs.h>
68
69 #include <dev/mii/mii.h>
70 #include <dev/mii/miivar.h>
71 #include <dev/mii/miidevs.h>
72 #include <dev/mii/brgphyreg.h>
73
74 #include <dev/pci/if_bcereg.h>
75
76 #include <uvm/uvm_extern.h>
77
78
79 #define BCE_NTXFRAGS 16
80
81
82 struct bce_dma_slot {
83 u_int32_t ctrl;
84 u_int32_t addr;
85 };
86 #define CTRL_BC_MASK 0x1fff
87 #define CTRL_EOT 0x10000000
88 #define CTRL_IOC 0x20000000
89 #define CTRL_EOF 0x40000000
90 #define CTRL_SOF 0x80000000
91
92
93 struct rx_pph {
94 u_int16_t len;
95 u_int16_t flags;
96 u_int16_t pad[12];
97 };
98
99 #define BCE_PREPKT_HEADER_SIZE 30
100
101
102 #define RXF_NO 0x8
103 #define RXF_RXER 0x4
104 #define RXF_CRC 0x2
105 #define RXF_OV 0x1
106
107
108 #define BCE_NRXDESC 128
109 #define BCE_NTXDESC 128
110
111
112
113
114
115
116 struct bce_chain_data {
117 struct mbuf *bce_tx_chain[BCE_NTXDESC];
118 struct mbuf *bce_rx_chain[BCE_NRXDESC];
119 bus_dmamap_t bce_tx_map[BCE_NTXDESC];
120 bus_dmamap_t bce_rx_map[BCE_NRXDESC];
121 };
122
123 #define BCE_TIMEOUT 100
124
125 struct bce_softc {
126 struct device bce_dev;
127 bus_space_tag_t bce_btag;
128 bus_space_handle_t bce_bhandle;
129 bus_dma_tag_t bce_dmatag;
130 struct arpcom bce_ac;
131 void *bce_intrhand;
132 struct pci_attach_args bce_pa;
133 struct mii_data bce_mii;
134 u_int32_t bce_phy;
135 struct bce_dma_slot *bce_rx_ring;
136 struct bce_dma_slot *bce_tx_ring;
137 struct bce_chain_data bce_cdata;
138 bus_dmamap_t bce_ring_map;
139 u_int32_t bce_intmask;
140 u_int32_t bce_rxin;
141 u_int32_t bce_txin;
142 int bce_txsfree;
143 int bce_txsnext;
144 struct timeout bce_timeout;
145 };
146
147
148 #define BCE_RXBUF_LEN (MCLBYTES - 4)
149 #define BCE_INIT_RXDESC(sc, x) \
150 do { \
151 struct bce_dma_slot *__bced = &sc->bce_rx_ring[x]; \
152 \
153 *mtod(sc->bce_cdata.bce_rx_chain[x], u_int32_t *) = 0; \
154 __bced->addr = \
155 htole32(sc->bce_cdata.bce_rx_map[x]->dm_segs[0].ds_addr \
156 + 0x40000000); \
157 if (x != (BCE_NRXDESC - 1)) \
158 __bced->ctrl = htole32(BCE_RXBUF_LEN); \
159 else \
160 __bced->ctrl = htole32(BCE_RXBUF_LEN | CTRL_EOT); \
161 bus_dmamap_sync(sc->bce_dmatag, sc->bce_ring_map, \
162 sizeof(struct bce_dma_slot) * x, \
163 sizeof(struct bce_dma_slot), \
164 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
165 } while ( 0)
166
167 int bce_probe(struct device *, void *, void *);
168 void bce_attach(struct device *, struct device *, void *);
169 int bce_ioctl(struct ifnet *, u_long, caddr_t);
170 void bce_start(struct ifnet *);
171 void bce_watchdog(struct ifnet *);
172 int bce_intr(void *);
173 void bce_rxintr(struct bce_softc *);
174 void bce_txintr(struct bce_softc *);
175 int bce_init(struct ifnet *);
176 void bce_add_mac(struct bce_softc *, u_int8_t *, unsigned long);
177 int bce_add_rxbuf(struct bce_softc *, int);
178 void bce_rxdrain(struct bce_softc *);
179 void bce_stop(struct ifnet *, int);
180 void bce_reset(struct bce_softc *);
181 void bce_set_filter(struct ifnet *);
182 int bce_mii_read(struct device *, int, int);
183 void bce_mii_write(struct device *, int, int, int);
184 void bce_statchg(struct device *);
185 int bce_mediachange(struct ifnet *);
186 void bce_mediastatus(struct ifnet *, struct ifmediareq *);
187 void bce_tick(void *);
188
189 #ifdef BCE_DEBUG
190 #define DPRINTF(x) do { \
191 if (bcedebug) \
192 printf x; \
193 } while ( 0)
194 #define DPRINTFN(n,x) do { \
195 if (bcedebug >= (n)) \
196 printf x; \
197 } while ( 0)
198 int bcedebug = 0;
199 #else
200 #define DPRINTF(x)
201 #define DPRINTFN(n,x)
202 #endif
203
204 struct cfattach bce_ca = {
205 sizeof(struct bce_softc), bce_probe, bce_attach
206 };
207 struct cfdriver bce_cd = {
208 0, "bce", DV_IFNET
209 };
210
211 const struct pci_matchid bce_devices[] = {
212 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4401 },
213 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4401B0 },
214 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4401B1 }
215 };
216
217 int
218 bce_probe(struct device *parent, void *match, void *aux)
219 {
220 return (pci_matchbyid((struct pci_attach_args *)aux, bce_devices,
221 sizeof(bce_devices)/sizeof(bce_devices[0])));
222 }
223
224 void
225 bce_attach(struct device *parent, struct device *self, void *aux)
226 {
227 struct bce_softc *sc = (struct bce_softc *) self;
228 struct pci_attach_args *pa = aux;
229 pci_chipset_tag_t pc = pa->pa_pc;
230 pci_intr_handle_t ih;
231 const char *intrstr = NULL;
232 caddr_t kva;
233 bus_dma_segment_t seg;
234 int rseg;
235 struct ifnet *ifp;
236 pcireg_t memtype;
237 bus_addr_t memaddr;
238 bus_size_t memsize;
239 int pmreg;
240 pcireg_t pmode;
241 int error;
242 int i;
243
244 sc->bce_pa = *pa;
245 sc->bce_dmatag = pa->pa_dmat;
246
247
248
249
250 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BCE_PCI_BAR0);
251 switch (memtype) {
252 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
253 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
254 if (pci_mapreg_map(pa, BCE_PCI_BAR0, memtype, 0, &sc->bce_btag,
255 &sc->bce_bhandle, &memaddr, &memsize, 0) == 0)
256 break;
257 default:
258 printf("%s: unable to find mem space\n",
259 sc->bce_dev.dv_xname);
260 return;
261 }
262
263
264 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
265 pmode = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
266 if (pmode == 3) {
267
268
269
270
271 printf("%s: unable to wake up from power state D3\n",
272 sc->bce_dev.dv_xname);
273 return;
274 }
275 if (pmode != 0) {
276 printf("%s: waking up from power state D%d\n",
277 sc->bce_dev.dv_xname, pmode);
278 pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
279 }
280 }
281 if (pci_intr_map(pa, &ih)) {
282 printf("%s: couldn't map interrupt\n",
283 sc->bce_dev.dv_xname);
284 return;
285 }
286 intrstr = pci_intr_string(pc, ih);
287
288 sc->bce_intrhand = pci_intr_establish(pc, ih, IPL_NET, bce_intr, sc,
289 self->dv_xname);
290
291 if (sc->bce_intrhand == NULL) {
292 printf("%s: couldn't establish interrupt",
293 sc->bce_dev.dv_xname);
294 if (intrstr != NULL)
295 printf(" at %s", intrstr);
296 printf("\n");
297 return;
298 }
299
300
301 bce_reset(sc);
302
303
304
305
306
307
308
309
310
311
312 if ((error = bus_dmamem_alloc(sc->bce_dmatag,
313 2 * PAGE_SIZE, PAGE_SIZE, 2 * PAGE_SIZE,
314 &seg, 1, &rseg, BUS_DMA_NOWAIT))) {
315 printf("%s: unable to alloc space for ring descriptors, "
316 "error = %d\n", sc->bce_dev.dv_xname, error);
317 return;
318 }
319
320 if ((error = bus_dmamem_map(sc->bce_dmatag, &seg, rseg,
321 2 * PAGE_SIZE, &kva, BUS_DMA_NOWAIT))) {
322 printf("%s: unable to map DMA buffers, error = %d\n",
323 sc->bce_dev.dv_xname, error);
324 bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
325 return;
326 }
327
328 if ((error = bus_dmamap_create(sc->bce_dmatag,
329 2 * PAGE_SIZE, 1, 2 * PAGE_SIZE, 0, BUS_DMA_NOWAIT,
330 &sc->bce_ring_map))) {
331 printf("%s: unable to create ring DMA map, error = %d\n",
332 sc->bce_dev.dv_xname, error);
333 bus_dmamem_unmap(sc->bce_dmatag, kva, 2 * PAGE_SIZE);
334 bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
335 return;
336 }
337
338 if (bus_dmamap_load(sc->bce_dmatag, sc->bce_ring_map, kva,
339 2 * PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
340 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_ring_map);
341 bus_dmamem_unmap(sc->bce_dmatag, kva, 2 * PAGE_SIZE);
342 bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
343 return;
344 }
345
346 sc->bce_rx_ring = (struct bce_dma_slot *) kva;
347 sc->bce_tx_ring = (struct bce_dma_slot *) (kva + PAGE_SIZE);
348
349
350 for (i = 0; i < BCE_NTXDESC; i++) {
351 if ((error = bus_dmamap_create(sc->bce_dmatag, MCLBYTES,
352 BCE_NTXFRAGS, MCLBYTES, 0, 0, &sc->bce_cdata.bce_tx_map[i])) != 0) {
353 printf("%s: unable to create tx DMA map, error = %d\n",
354 sc->bce_dev.dv_xname, error);
355 }
356 sc->bce_cdata.bce_tx_chain[i] = NULL;
357 }
358
359
360 for (i = 0; i < BCE_NRXDESC; i++) {
361 if ((error = bus_dmamap_create(sc->bce_dmatag, MCLBYTES, 1,
362 MCLBYTES, 0, 0, &sc->bce_cdata.bce_rx_map[i])) != 0) {
363 printf("%s: unable to create rx DMA map, error = %d\n",
364 sc->bce_dev.dv_xname, error);
365 }
366 sc->bce_cdata.bce_rx_chain[i] = NULL;
367 }
368
369
370 ifp = &sc->bce_ac.ac_if;
371 strlcpy(ifp->if_xname, sc->bce_dev.dv_xname, IF_NAMESIZE);
372 ifp->if_softc = sc;
373 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
374 ifp->if_ioctl = bce_ioctl;
375 ifp->if_start = bce_start;
376 ifp->if_watchdog = bce_watchdog;
377 ifp->if_init = bce_init;
378 IFQ_SET_READY(&ifp->if_snd);
379
380 ifp->if_capabilities = IFCAP_VLAN_MTU;
381
382
383 sc->bce_ac.ac_enaddr[0] =
384 bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET0);
385 sc->bce_ac.ac_enaddr[1] =
386 bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET1);
387 sc->bce_ac.ac_enaddr[2] =
388 bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET2);
389 sc->bce_ac.ac_enaddr[3] =
390 bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET3);
391 sc->bce_ac.ac_enaddr[4] =
392 bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET4);
393 sc->bce_ac.ac_enaddr[5] =
394 bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET5);
395 printf(": %s, address %s\n", intrstr,
396 ether_sprintf(sc->bce_ac.ac_enaddr));
397
398
399
400 sc->bce_mii.mii_ifp = ifp;
401 sc->bce_mii.mii_readreg = bce_mii_read;
402 sc->bce_mii.mii_writereg = bce_mii_write;
403 sc->bce_mii.mii_statchg = bce_statchg;
404 ifmedia_init(&sc->bce_mii.mii_media, 0, bce_mediachange,
405 bce_mediastatus);
406 mii_attach(&sc->bce_dev, &sc->bce_mii, 0xffffffff, MII_PHY_ANY,
407 MII_OFFSET_ANY, 0);
408 if (LIST_FIRST(&sc->bce_mii.mii_phys) == NULL) {
409 ifmedia_add(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
410 ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE);
411 } else
412 ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_AUTO);
413
414 sc->bce_phy = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
415 BCE_PHY) & 0x1f;
416
417
418
419
420 bce_mii_write((struct device *) sc, 1, 26,
421 bce_mii_read((struct device *) sc, 1, 26) & 0x7fff);
422
423 bce_mii_write((struct device *) sc, 1, 27,
424 bce_mii_read((struct device *) sc, 1, 27) | (1 << 6));
425
426
427
428 if_attach(ifp);
429 ether_ifattach(ifp);
430 timeout_set(&sc->bce_timeout, bce_tick, sc);
431 }
432
433
434 int
435 bce_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
436 {
437 struct bce_softc *sc = ifp->if_softc;
438 struct ifreq *ifr = (struct ifreq *) data;
439 struct ifaddr *ifa = (struct ifaddr *)data;
440 int s, error = 0;
441
442 s = splnet();
443
444 if ((error = ether_ioctl(ifp, &sc->bce_ac, cmd, data)) > 0) {
445 splx(s);
446 return (error);
447 }
448
449 switch (cmd) {
450 case SIOCSIFADDR:
451 ifp->if_flags |= IFF_UP;
452
453 switch (ifa->ifa_addr->sa_family) {
454 #ifdef INET
455 case AF_INET:
456 bce_init(ifp);
457 arp_ifinit(&sc->bce_ac, ifa);
458 break;
459 #endif
460 default:
461 bce_init(ifp);
462 break;
463 }
464 break;
465 case SIOCSIFMTU:
466 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU)
467 error = EINVAL;
468 else if (ifp->if_mtu != ifr->ifr_mtu)
469 ifp->if_mtu = ifr->ifr_mtu;
470 break;
471 case SIOCSIFFLAGS:
472 if(ifp->if_flags & IFF_UP)
473 if(ifp->if_flags & IFF_RUNNING)
474 bce_set_filter(ifp);
475 else
476 bce_init(ifp);
477 else if(ifp->if_flags & IFF_RUNNING)
478 bce_stop(ifp, 0);
479
480 break;
481 case SIOCADDMULTI:
482 case SIOCDELMULTI:
483 error = (cmd == SIOCADDMULTI) ?
484 ether_addmulti(ifr, &sc->bce_ac) :
485 ether_delmulti(ifr, &sc->bce_ac);
486
487 if (error == ENETRESET) {
488
489
490
491
492 if (ifp->if_flags & IFF_RUNNING)
493 bce_set_filter(ifp);
494 error = 0;
495 }
496 break;
497 case SIOCSIFMEDIA:
498 case SIOCGIFMEDIA:
499 error = ifmedia_ioctl(ifp, ifr, &sc->bce_mii.mii_media, cmd);
500 break;
501 default:
502 error = ENOTTY;
503 break;
504 }
505
506 if (error == 0) {
507
508 bce_start(ifp);
509 }
510
511 splx(s);
512 return error;
513 }
514
515
516 void
517 bce_start(struct ifnet *ifp)
518 {
519 struct bce_softc *sc = ifp->if_softc;
520 struct mbuf *m0;
521 bus_dmamap_t dmamap;
522 int txstart;
523 int txsfree;
524 int newpkts = 0;
525 int error;
526
527
528
529
530
531 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
532 return;
533
534
535 if (sc->bce_txsnext >= sc->bce_txin)
536 txsfree = BCE_NTXDESC - 1 + sc->bce_txin - sc->bce_txsnext;
537 else
538 txsfree = sc->bce_txin - sc->bce_txsnext - 1;
539
540
541
542
543
544
545 while (txsfree > 0) {
546 int seg;
547
548
549 IFQ_POLL(&ifp->if_snd, m0);
550 if (m0 == NULL)
551 break;
552
553
554 dmamap = sc->bce_cdata.bce_tx_map[sc->bce_txsnext];
555
556
557
558
559
560
561
562
563 error = bus_dmamap_load_mbuf(sc->bce_dmatag, dmamap, m0,
564 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
565 if (error == EFBIG) {
566 printf("%s: Tx packet consumes too many DMA segments, "
567 "dropping...\n", sc->bce_dev.dv_xname);
568 IFQ_DEQUEUE(&ifp->if_snd, m0);
569 m_freem(m0);
570 ifp->if_oerrors++;
571 continue;
572 } else if (error) {
573
574 printf("%s: unable to load Tx buffer, error = %d\n",
575 sc->bce_dev.dv_xname, error);
576 break;
577 }
578
579 if (dmamap->dm_nsegs > txsfree) {
580 ifp->if_flags |= IFF_OACTIVE;
581 bus_dmamap_unload(sc->bce_dmatag, dmamap);
582 break;
583 }
584
585
586
587 IFQ_DEQUEUE(&ifp->if_snd, m0);
588
589
590 sc->bce_cdata.bce_tx_chain[sc->bce_txsnext] = m0;
591
592
593 bus_dmamap_sync(sc->bce_dmatag, dmamap, 0, dmamap->dm_mapsize,
594 BUS_DMASYNC_PREWRITE);
595
596
597 txstart = sc->bce_txsnext;
598 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
599 u_int32_t ctrl;
600
601 ctrl = dmamap->dm_segs[seg].ds_len & CTRL_BC_MASK;
602 if (seg == 0)
603 ctrl |= CTRL_SOF;
604 if (seg == dmamap->dm_nsegs - 1)
605 ctrl |= CTRL_EOF;
606 if (sc->bce_txsnext == BCE_NTXDESC - 1)
607 ctrl |= CTRL_EOT;
608 ctrl |= CTRL_IOC;
609 sc->bce_tx_ring[sc->bce_txsnext].ctrl = htole32(ctrl);
610 sc->bce_tx_ring[sc->bce_txsnext].addr =
611 htole32(dmamap->dm_segs[seg].ds_addr + 0x40000000);
612 if (sc->bce_txsnext + 1 > BCE_NTXDESC - 1)
613 sc->bce_txsnext = 0;
614 else
615 sc->bce_txsnext++;
616 txsfree--;
617 }
618
619 bus_dmamap_sync(sc->bce_dmatag, sc->bce_ring_map,
620 sizeof(struct bce_dma_slot) * txstart + PAGE_SIZE,
621 sizeof(struct bce_dma_slot) * dmamap->dm_nsegs,
622 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
623
624
625 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_DPTR,
626 sc->bce_txsnext * sizeof(struct bce_dma_slot));
627
628 newpkts++;
629
630 #if NBPFILTER > 0
631
632 if (ifp->if_bpf)
633 bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
634 #endif
635 }
636 if (txsfree == 0) {
637
638 ifp->if_flags |= IFF_OACTIVE;
639 }
640 if (newpkts) {
641
642 ifp->if_timer = 5;
643 }
644 }
645
646
647 void
648 bce_watchdog(struct ifnet *ifp)
649 {
650 struct bce_softc *sc = ifp->if_softc;
651
652 printf("%s: device timeout\n", sc->bce_dev.dv_xname);
653 ifp->if_oerrors++;
654
655 (void) bce_init(ifp);
656
657
658 bce_start(ifp);
659 }
660
661 int
662 bce_intr(void *xsc)
663 {
664 struct bce_softc *sc;
665 struct ifnet *ifp;
666 u_int32_t intstatus;
667 int wantinit;
668 int handled = 0;
669
670 sc = xsc;
671 ifp = &sc->bce_ac.ac_if;
672
673
674 for (wantinit = 0; wantinit == 0;) {
675 intstatus = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
676 BCE_INT_STS);
677
678
679 intstatus &= sc->bce_intmask;
680 if (intstatus == 0)
681 break;
682
683 handled = 1;
684
685
686 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_STS,
687 intstatus);
688
689
690 if (intstatus & I_RI)
691 bce_rxintr(sc);
692
693 if (intstatus & I_XI)
694 bce_txintr(sc);
695
696 if (intstatus & ~(I_RI | I_XI)) {
697 if (intstatus & I_XU)
698 printf("%s: transmit fifo underflow\n",
699 sc->bce_dev.dv_xname);
700 if (intstatus & I_RO) {
701 printf("%s: receive fifo overflow\n",
702 sc->bce_dev.dv_xname);
703 ifp->if_ierrors++;
704 }
705 if (intstatus & I_RU)
706 printf("%s: receive descriptor underflow\n",
707 sc->bce_dev.dv_xname);
708 if (intstatus & I_DE)
709 printf("%s: descriptor protocol error\n",
710 sc->bce_dev.dv_xname);
711 if (intstatus & I_PD)
712 printf("%s: data error\n",
713 sc->bce_dev.dv_xname);
714 if (intstatus & I_PC)
715 printf("%s: descriptor error\n",
716 sc->bce_dev.dv_xname);
717 if (intstatus & I_TO)
718 printf("%s: general purpose timeout\n",
719 sc->bce_dev.dv_xname);
720 wantinit = 1;
721 }
722 }
723
724 if (handled) {
725 if (wantinit)
726 bce_init(ifp);
727
728 bce_start(ifp);
729 }
730 return (handled);
731 }
732
733
734 void
735 bce_rxintr(struct bce_softc *sc)
736 {
737 struct ifnet *ifp = &sc->bce_ac.ac_if;
738 struct rx_pph *pph;
739 struct mbuf *m;
740 int curr;
741 int len;
742 int i;
743
744
745 curr = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXSTATUS)
746 & RS_CD_MASK;
747 curr = curr / sizeof(struct bce_dma_slot);
748 if (curr >= BCE_NRXDESC)
749 curr = BCE_NRXDESC - 1;
750
751
752 for (i = sc->bce_rxin; i != curr;
753 i + 1 > BCE_NRXDESC - 1 ? i = 0 : i++) {
754
755 bus_dmamap_sync(sc->bce_dmatag, sc->bce_cdata.bce_rx_map[i], 0,
756 sc->bce_cdata.bce_rx_map[i]->dm_mapsize,
757 BUS_DMASYNC_POSTREAD);
758
759
760
761
762
763 pph = mtod(sc->bce_cdata.bce_rx_chain[i], struct rx_pph *);
764 if (pph->flags & (RXF_NO | RXF_RXER | RXF_CRC | RXF_OV)) {
765 ifp->if_ierrors++;
766 pph->len = 0;
767 pph->flags = 0;
768 continue;
769 }
770
771 len = pph->len;
772 if (len == 0)
773 continue;
774 pph->len = 0;
775 pph->flags = 0;
776
777 sc->bce_cdata.bce_rx_chain[i]->m_data +=
778 BCE_PREPKT_HEADER_SIZE;
779
780
781
782
783
784 len -= ETHER_CRC_LEN;
785
786
787
788
789
790
791
792
793
794
795
796
797 if (len <= (MHLEN - 2)) {
798 MGETHDR(m, M_DONTWAIT, MT_DATA);
799 if (m == NULL)
800 goto dropit;
801 m->m_data += 2;
802 memcpy(mtod(m, caddr_t),
803 mtod(sc->bce_cdata.bce_rx_chain[i], caddr_t), len);
804 sc->bce_cdata.bce_rx_chain[i]->m_data -=
805 BCE_PREPKT_HEADER_SIZE;
806 } else {
807 m = sc->bce_cdata.bce_rx_chain[i];
808 if (bce_add_rxbuf(sc, i) != 0) {
809 dropit:
810 ifp->if_ierrors++;
811
812 sc->bce_cdata.bce_rx_chain[i]->m_data -=
813 BCE_PREPKT_HEADER_SIZE;
814 bus_dmamap_sync(sc->bce_dmatag,
815 sc->bce_cdata.bce_rx_map[i], 0,
816 sc->bce_cdata.bce_rx_map[i]->dm_mapsize,
817 BUS_DMASYNC_PREREAD);
818 continue;
819 }
820 }
821
822 m->m_pkthdr.rcvif = ifp;
823 m->m_pkthdr.len = m->m_len = len;
824 ifp->if_ipackets++;
825
826 #if NBPFILTER > 0
827
828
829
830
831 if (ifp->if_bpf)
832 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
833 #endif
834
835
836 ether_input_mbuf(ifp, m);
837
838
839 curr = (bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
840 BCE_DMA_RXSTATUS) & RS_CD_MASK) /
841 sizeof(struct bce_dma_slot);
842 if (curr >= BCE_NRXDESC)
843 curr = BCE_NRXDESC - 1;
844 }
845 sc->bce_rxin = curr;
846 }
847
848
849 void
850 bce_txintr(struct bce_softc *sc)
851 {
852 struct ifnet *ifp = &sc->bce_ac.ac_if;
853 int curr;
854 int i;
855
856 ifp->if_flags &= ~IFF_OACTIVE;
857
858
859
860
861
862 curr = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXSTATUS) &
863 RS_CD_MASK;
864 curr = curr / sizeof(struct bce_dma_slot);
865 if (curr >= BCE_NTXDESC)
866 curr = BCE_NTXDESC - 1;
867 for (i = sc->bce_txin; i != curr;
868 i + 1 > BCE_NTXDESC - 1 ? i = 0 : i++) {
869
870 if (sc->bce_cdata.bce_tx_chain[i] == NULL)
871 continue;
872 bus_dmamap_sync(sc->bce_dmatag, sc->bce_cdata.bce_tx_map[i], 0,
873 sc->bce_cdata.bce_tx_map[i]->dm_mapsize,
874 BUS_DMASYNC_POSTWRITE);
875 bus_dmamap_unload(sc->bce_dmatag, sc->bce_cdata.bce_tx_map[i]);
876 m_freem(sc->bce_cdata.bce_tx_chain[i]);
877 sc->bce_cdata.bce_tx_chain[i] = NULL;
878 ifp->if_opackets++;
879 }
880 sc->bce_txin = curr;
881
882
883
884
885
886 if (sc->bce_txsnext == sc->bce_txin)
887 ifp->if_timer = 0;
888 }
889
890
891 int
892 bce_init(struct ifnet *ifp)
893 {
894 struct bce_softc *sc = ifp->if_softc;
895 u_int32_t reg_win;
896 int error;
897 int i;
898
899
900 bce_stop(ifp, 0);
901
902
903
904
905
906
907 reg_win = pci_conf_read(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag,
908 BCE_REG_WIN);
909
910
911 pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN,
912 BCE_SONICS_WIN);
913
914
915 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC,
916 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC) |
917 SBIV_ENET0);
918
919
920 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2,
921 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2) |
922 SBTOPCI_PREF | SBTOPCI_BURST);
923
924
925 pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN,
926 reg_win);
927
928
929 bce_reset(sc);
930
931
932 memset(sc->bce_tx_ring, 0, BCE_NTXDESC * sizeof(struct bce_dma_slot));
933 sc->bce_txsnext = 0;
934 sc->bce_txin = 0;
935
936
937 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL,
938 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL) |
939 BCE_EMC_CRC32_ENAB | BCE_EMC_LED);
940
941
942 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL,
943 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL) &
944 ~BCE_EMC_PDOWN);
945
946
947 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMAI_CTL, 1 << 24);
948
949
950 bce_set_filter(ifp);
951
952
953 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_MAX,
954 ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
955 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_MAX,
956 ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
957
958
959 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_WATER, 56);
960
961
962 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL, XC_XE);
963 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXADDR,
964 sc->bce_ring_map->dm_segs[0].ds_addr + PAGE_SIZE + 0x40000000);
965
966
967
968
969
970 sc->bce_rxin = 0;
971
972
973 memset(sc->bce_rx_ring, 0, BCE_NRXDESC * sizeof(struct bce_dma_slot));
974
975 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXCTL,
976 BCE_PREPKT_HEADER_SIZE << 1 | XC_XE);
977 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXADDR,
978 sc->bce_ring_map->dm_segs[0].ds_addr + 0x40000000);
979
980
981 for (i = 0; i < BCE_NRXDESC; i++) {
982 if (sc->bce_cdata.bce_rx_chain[i] == NULL) {
983 if ((error = bce_add_rxbuf(sc, i)) != 0) {
984 printf("%s: unable to allocate or map rx(%d) "
985 "mbuf, error = %d\n", sc->bce_dev.dv_xname,
986 i, error);
987 bce_rxdrain(sc);
988 return (error);
989 }
990 } else
991 BCE_INIT_RXDESC(sc, i);
992 }
993
994
995 sc->bce_intmask =
996 I_XI | I_RI | I_XU | I_RO | I_RU | I_DE | I_PD | I_PC | I_TO;
997 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_MASK,
998 sc->bce_intmask);
999
1000
1001 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXDPTR,
1002 BCE_NRXDESC * sizeof(struct bce_dma_slot));
1003
1004
1005 mii_mediachg(&sc->bce_mii);
1006
1007
1008 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL,
1009 bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1010 BCE_ENET_CTL) | EC_EE);
1011
1012
1013 timeout_add(&sc->bce_timeout, hz);
1014
1015
1016 ifp->if_flags |= IFF_RUNNING;
1017 ifp->if_flags &= ~IFF_OACTIVE;
1018
1019 return 0;
1020 }
1021
1022
1023 void
1024 bce_add_mac(struct bce_softc *sc, u_int8_t *mac, unsigned long idx)
1025 {
1026 int i;
1027 u_int32_t rval;
1028
1029 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_LOW,
1030 mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]);
1031 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_HI,
1032 mac[0] << 8 | mac[1] | 0x10000);
1033 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL,
1034 idx << 16 | 8);
1035
1036 for (i = 0; i < 100; i++) {
1037 rval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1038 BCE_FILT_CTL);
1039 if (!(rval & 0x80000000))
1040 break;
1041 delay(10);
1042 }
1043 if (i == 100) {
1044 printf("%s: timed out writing pkt filter ctl\n",
1045 sc->bce_dev.dv_xname);
1046 }
1047 }
1048
1049
1050 int
1051 bce_add_rxbuf(struct bce_softc *sc, int idx)
1052 {
1053 struct mbuf *m;
1054 int error;
1055
1056 MGETHDR(m, M_DONTWAIT, MT_DATA);
1057 if (m == NULL)
1058 return (ENOBUFS);
1059
1060 MCLGET(m, M_DONTWAIT);
1061 if ((m->m_flags & M_EXT) == 0) {
1062 m_freem(m);
1063 return (ENOBUFS);
1064 }
1065 if (sc->bce_cdata.bce_rx_chain[idx] != NULL)
1066 bus_dmamap_unload(sc->bce_dmatag,
1067 sc->bce_cdata.bce_rx_map[idx]);
1068
1069 sc->bce_cdata.bce_rx_chain[idx] = m;
1070
1071 error = bus_dmamap_load(sc->bce_dmatag, sc->bce_cdata.bce_rx_map[idx],
1072 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1073 BUS_DMA_READ | BUS_DMA_NOWAIT);
1074 if (error)
1075 return (error);
1076
1077 bus_dmamap_sync(sc->bce_dmatag, sc->bce_cdata.bce_rx_map[idx], 0,
1078 sc->bce_cdata.bce_rx_map[idx]->dm_mapsize, BUS_DMASYNC_PREREAD);
1079
1080 BCE_INIT_RXDESC(sc, idx);
1081
1082 return (0);
1083
1084 }
1085
1086
1087 void
1088 bce_rxdrain(struct bce_softc *sc)
1089 {
1090 int i;
1091
1092 for (i = 0; i < BCE_NRXDESC; i++) {
1093 if (sc->bce_cdata.bce_rx_chain[i] != NULL) {
1094 bus_dmamap_unload(sc->bce_dmatag,
1095 sc->bce_cdata.bce_rx_map[i]);
1096 m_freem(sc->bce_cdata.bce_rx_chain[i]);
1097 sc->bce_cdata.bce_rx_chain[i] = NULL;
1098 }
1099 }
1100 }
1101
1102
1103 void
1104 bce_stop(struct ifnet *ifp, int disable)
1105 {
1106 struct bce_softc *sc = ifp->if_softc;
1107 int i;
1108 u_int32_t val;
1109
1110
1111 timeout_del(&sc->bce_timeout);
1112
1113
1114 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1115 ifp->if_timer = 0;
1116
1117
1118 mii_down(&sc->bce_mii);
1119
1120
1121 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_MASK, 0);
1122 sc->bce_intmask = 0;
1123 delay(10);
1124
1125
1126 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL, EC_ED);
1127 for (i = 0; i < 200; i++) {
1128 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1129 BCE_ENET_CTL);
1130 if (!(val & EC_ED))
1131 break;
1132 delay(10);
1133 }
1134
1135
1136 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXCTL, 0);
1137 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL, 0);
1138 delay(10);
1139
1140
1141 for (i = 0; i < BCE_NTXDESC; i++) {
1142 if (sc->bce_cdata.bce_tx_chain[i] != NULL) {
1143 bus_dmamap_unload(sc->bce_dmatag,
1144 sc->bce_cdata.bce_tx_map[i]);
1145 m_freem(sc->bce_cdata.bce_tx_chain[i]);
1146 sc->bce_cdata.bce_tx_chain[i] = NULL;
1147 }
1148 }
1149
1150
1151 if (disable)
1152 bce_rxdrain(sc);
1153 }
1154
1155
1156 void
1157 bce_reset(struct bce_softc *sc)
1158 {
1159 u_int32_t val;
1160 u_int32_t sbval;
1161 int i;
1162
1163
1164 sbval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1165 BCE_SBTMSTATELOW);
1166 if ((sbval & (SBTML_RESET | SBTML_REJ | SBTML_CLK)) == SBTML_CLK) {
1167 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMAI_CTL,
1168 0);
1169
1170
1171 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL,
1172 EC_ED);
1173 for (i = 0; i < 200; i++) {
1174 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1175 BCE_ENET_CTL);
1176 if (!(val & EC_ED))
1177 break;
1178 delay(10);
1179 }
1180 if (i == 200)
1181 printf("%s: timed out disabling ethernet mac\n",
1182 sc->bce_dev.dv_xname);
1183
1184
1185 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL, 0);
1186 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXSTATUS);
1187
1188 if (val & RS_ERROR) {
1189 for (i = 0; i < 100; i++) {
1190 val = bus_space_read_4(sc->bce_btag,
1191 sc->bce_bhandle, BCE_DMA_RXSTATUS);
1192 if (val & RS_DMA_IDLE)
1193 break;
1194 delay(10);
1195 }
1196 if (i == 100)
1197 printf("%s: receive dma did not go idle after"
1198 " error\n", sc->bce_dev.dv_xname);
1199 }
1200 bus_space_write_4(sc->bce_btag, sc->bce_bhandle,
1201 BCE_DMA_RXSTATUS, 0);
1202
1203
1204 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL,
1205 EC_ES);
1206 for (i = 0; i < 200; i++) {
1207 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1208 BCE_ENET_CTL);
1209 if (!(val & EC_ES))
1210 break;
1211 delay(10);
1212 }
1213 if (i == 200)
1214 printf("%s: timed out resetting ethernet mac\n",
1215 sc->bce_dev.dv_xname);
1216 } else {
1217 u_int32_t reg_win;
1218
1219
1220
1221
1222 reg_win = pci_conf_read(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag,
1223 BCE_REG_WIN);
1224
1225 pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag,
1226 BCE_REG_WIN, BCE_SONICS_WIN);
1227
1228
1229 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC,
1230 bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1231 BCE_SBINTVEC) |
1232 SBIV_ENET0);
1233
1234
1235 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2,
1236 bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1237 BCE_SPCI_TR2) |
1238 SBTOPCI_PREF | SBTOPCI_BURST);
1239
1240
1241 pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN,
1242 reg_win);
1243 }
1244
1245
1246 if (!(sbval & SBTML_RESET)) {
1247
1248
1249 bus_space_write_4(sc->bce_btag, sc->bce_bhandle,
1250 BCE_SBTMSTATELOW, SBTML_REJ | SBTML_CLK);
1251 for (i = 0; i < 200; i++) {
1252 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1253 BCE_SBTMSTATELOW);
1254 if (val & SBTML_REJ)
1255 break;
1256 delay(1);
1257 }
1258 if (i == 200)
1259 printf("%s: while resetting core, reject did not set\n",
1260 sc->bce_dev.dv_xname);
1261
1262 for (i = 0; i < 200; i++) {
1263 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1264 BCE_SBTMSTATEHI);
1265 if (!(val & 0x4))
1266 break;
1267 delay(1);
1268 }
1269 if (i == 200)
1270 printf("%s: while resetting core, busy did not clear\n",
1271 sc->bce_dev.dv_xname);
1272
1273 bus_space_write_4(sc->bce_btag, sc->bce_bhandle,
1274 BCE_SBTMSTATELOW,
1275 SBTML_FGC | SBTML_CLK | SBTML_REJ | SBTML_RESET);
1276 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1277 BCE_SBTMSTATELOW);
1278 delay(10);
1279 bus_space_write_4(sc->bce_btag, sc->bce_bhandle,
1280 BCE_SBTMSTATELOW, SBTML_REJ | SBTML_RESET);
1281 delay(1);
1282 }
1283
1284 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW,
1285 SBTML_FGC | SBTML_CLK | SBTML_RESET);
1286 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW);
1287 delay(1);
1288
1289
1290 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATEHI);
1291 if (val & 1)
1292 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATEHI,
1293 0);
1294 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBIMSTATE);
1295 if (val & SBIM_ERRORBITS)
1296 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBIMSTATE,
1297 val & ~SBIM_ERRORBITS);
1298
1299
1300 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW,
1301 SBTML_FGC | SBTML_CLK);
1302 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW);
1303 delay(1);
1304
1305
1306 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW,
1307 SBTML_CLK);
1308 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW);
1309 delay(1);
1310
1311
1312 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_CTL, 0x8d);
1313
1314
1315 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_DEVCTL);
1316 if (!(val & BCE_DC_IP)) {
1317
1318 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL, EC_EP);
1319 } else if (val & BCE_DC_ER) {
1320 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DEVCTL,
1321 val & ~BCE_DC_ER);
1322 delay(100);
1323 }
1324 }
1325
1326
1327 void
1328 bce_set_filter(struct ifnet *ifp)
1329 {
1330 struct bce_softc *sc = ifp->if_softc;
1331
1332 if (ifp->if_flags & IFF_PROMISC) {
1333 ifp->if_flags |= IFF_ALLMULTI;
1334 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL,
1335 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL)
1336 | ERC_PE);
1337 } else {
1338 ifp->if_flags &= ~IFF_ALLMULTI;
1339
1340
1341 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL,
1342 bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1343 BCE_RX_CTL) & ~ERC_PE);
1344
1345
1346 if (ifp->if_flags & IFF_BROADCAST)
1347 bus_space_write_4(sc->bce_btag, sc->bce_bhandle,
1348 BCE_RX_CTL, bus_space_read_4(sc->bce_btag,
1349 sc->bce_bhandle, BCE_RX_CTL) & ~ERC_DB);
1350 else
1351 bus_space_write_4(sc->bce_btag, sc->bce_bhandle,
1352 BCE_RX_CTL, bus_space_read_4(sc->bce_btag,
1353 sc->bce_bhandle, BCE_RX_CTL) | ERC_DB);
1354
1355
1356 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL,
1357 0);
1358
1359
1360 bce_add_mac(sc, sc->bce_ac.ac_enaddr, 0);
1361
1362
1363 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL,
1364 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL) |
1365 ERC_AM);
1366 ifp->if_flags |= IFF_ALLMULTI;
1367
1368
1369 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL,
1370 bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1371 BCE_FILT_CTL) | 1);
1372 }
1373 }
1374
1375
1376 int
1377 bce_mii_read(struct device *self, int phy, int reg)
1378 {
1379 struct bce_softc *sc = (struct bce_softc *) self;
1380 int i;
1381 u_int32_t val;
1382
1383
1384 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_STS, BCE_MIINTR);
1385
1386
1387 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM,
1388 (MII_COMMAND_READ << 28) | (MII_COMMAND_START << 30) |
1389 (MII_COMMAND_ACK << 16) | BCE_MIPHY(phy) | BCE_MIREG(reg));
1390
1391 for (i = 0; i < BCE_TIMEOUT; i++) {
1392 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_STS);
1393 if (val & BCE_MIINTR)
1394 break;
1395 delay(10);
1396 }
1397 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM);
1398 if (i == BCE_TIMEOUT) {
1399 printf("%s: PHY read timed out reading phy %d, reg %d, val = "
1400 "0x%08x\n", sc->bce_dev.dv_xname, phy, reg, val);
1401 return (0);
1402 }
1403 return (val & BCE_MICOMM_DATA);
1404 }
1405
1406
1407 void
1408 bce_mii_write(struct device *self, int phy, int reg, int val)
1409 {
1410 struct bce_softc *sc = (struct bce_softc *) self;
1411 int i;
1412 u_int32_t rval;
1413
1414
1415 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_STS,
1416 BCE_MIINTR);
1417
1418
1419 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM,
1420 (MII_COMMAND_WRITE << 28) | (MII_COMMAND_START << 30) |
1421 (MII_COMMAND_ACK << 16) | (val & BCE_MICOMM_DATA) |
1422 BCE_MIPHY(phy) | BCE_MIREG(reg));
1423
1424
1425 for (i = 0; i < BCE_TIMEOUT; i++) {
1426 rval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1427 BCE_MI_STS);
1428 if (rval & BCE_MIINTR)
1429 break;
1430 delay(10);
1431 }
1432 rval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM);
1433 if (i == BCE_TIMEOUT) {
1434 printf("%s: PHY timed out writing phy %d, reg %d, val "
1435 "= 0x%08x\n", sc->bce_dev.dv_xname, phy, reg, val);
1436 }
1437 }
1438
1439
1440 void
1441 bce_statchg(struct device *self)
1442 {
1443 struct bce_softc *sc = (struct bce_softc *) self;
1444 u_int32_t reg;
1445
1446
1447 reg = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_CTL);
1448 if (sc->bce_mii.mii_media_active & IFM_FDX && !(reg & EXC_FD))
1449 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_CTL,
1450 reg | EXC_FD);
1451 else if (!(sc->bce_mii.mii_media_active & IFM_FDX) && reg & EXC_FD)
1452 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_CTL,
1453 reg & ~EXC_FD);
1454
1455
1456
1457
1458
1459 bce_mii_write((struct device *) sc, 1, 26,
1460 bce_mii_read((struct device *) sc, 1, 26) & 0x7fff);
1461
1462 bce_mii_write((struct device *) sc, 1, 26,
1463 bce_mii_read((struct device *) sc, 1, 27) | (1 << 6));
1464 }
1465
1466
1467 int
1468 bce_mediachange(struct ifnet *ifp)
1469 {
1470 struct bce_softc *sc = ifp->if_softc;
1471
1472 if (ifp->if_flags & IFF_UP)
1473 mii_mediachg(&sc->bce_mii);
1474 return (0);
1475 }
1476
1477
1478 void
1479 bce_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1480 {
1481 struct bce_softc *sc = ifp->if_softc;
1482
1483 mii_pollstat(&sc->bce_mii);
1484 ifmr->ifm_active = sc->bce_mii.mii_media_active;
1485 ifmr->ifm_status = sc->bce_mii.mii_media_status;
1486 }
1487
1488
1489 void
1490 bce_tick(void *v)
1491 {
1492 struct bce_softc *sc = v;
1493
1494
1495 mii_tick(&sc->bce_mii);
1496
1497 timeout_add(&sc->bce_timeout, hz);
1498 }