This source file includes following definitions.
- re_set_bufaddr
- re_eeprom_putbyte
- re_eeprom_getword
- re_read_eeprom
- re_gmii_readreg
- re_gmii_writereg
- re_miibus_readreg
- re_miibus_writereg
- re_miibus_statchg
- re_setmulti
- re_setpromisc
- re_reset
- re_diag
- re_attach
- re_newbuf
- re_tx_list_init
- re_rx_list_init
- re_rxeof
- re_txeof
- re_tick
- re_intr
- re_encap
- re_start
- re_init
- re_ifmedia_upd
- re_ifmedia_sts
- re_ioctl
- re_watchdog
- re_stop
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110 #include "bpfilter.h"
111 #include "vlan.h"
112
113 #include <sys/param.h>
114 #include <sys/endian.h>
115 #include <sys/systm.h>
116 #include <sys/sockio.h>
117 #include <sys/mbuf.h>
118 #include <sys/malloc.h>
119 #include <sys/kernel.h>
120 #include <sys/device.h>
121 #include <sys/timeout.h>
122 #include <sys/socket.h>
123
124 #include <net/if.h>
125 #include <net/if_dl.h>
126 #include <net/if_media.h>
127
128 #ifdef INET
129 #include <netinet/in.h>
130 #include <netinet/in_systm.h>
131 #include <netinet/in_var.h>
132 #include <netinet/ip.h>
133 #include <netinet/if_ether.h>
134 #endif
135
136 #if NVLAN > 0
137 #include <net/if_types.h>
138 #include <net/if_vlan_var.h>
139 #endif
140
141 #if NBPFILTER > 0
142 #include <net/bpf.h>
143 #endif
144
145 #include <dev/mii/mii.h>
146 #include <dev/mii/miivar.h>
147
148 #include <dev/pci/pcireg.h>
149 #include <dev/pci/pcivar.h>
150
151 #include <dev/ic/rtl81x9reg.h>
152 #include <dev/ic/revar.h>
153
154 #ifdef RE_DEBUG
155 int redebug = 0;
156 #define DPRINTF(x) do { if (redebug) printf x; } while (0)
157 #else
158 #define DPRINTF(x)
159 #endif
160
161 static inline void re_set_bufaddr(struct rl_desc *, bus_addr_t);
162
163 int re_encap(struct rl_softc *, struct mbuf *, int *);
164
165 int re_newbuf(struct rl_softc *, int, struct mbuf *);
166 int re_rx_list_init(struct rl_softc *);
167 int re_tx_list_init(struct rl_softc *);
168 void re_rxeof(struct rl_softc *);
169 void re_txeof(struct rl_softc *);
170 void re_tick(void *);
171 void re_start(struct ifnet *);
172 int re_ioctl(struct ifnet *, u_long, caddr_t);
173 void re_watchdog(struct ifnet *);
174 int re_ifmedia_upd(struct ifnet *);
175 void re_ifmedia_sts(struct ifnet *, struct ifmediareq *);
176
177 void re_eeprom_putbyte(struct rl_softc *, int);
178 void re_eeprom_getword(struct rl_softc *, int, u_int16_t *);
179 void re_read_eeprom(struct rl_softc *, caddr_t, int, int);
180
181 int re_gmii_readreg(struct device *, int, int);
182 void re_gmii_writereg(struct device *, int, int, int);
183
184 int re_miibus_readreg(struct device *, int, int);
185 void re_miibus_writereg(struct device *, int, int, int);
186 void re_miibus_statchg(struct device *);
187
188 void re_setmulti(struct rl_softc *);
189 void re_setpromisc(struct rl_softc *);
190 void re_reset(struct rl_softc *);
191
192 #ifdef RE_DIAG
193 int re_diag(struct rl_softc *);
194 #endif
195
196 struct cfdriver re_cd = {
197 0, "re", DV_IFNET
198 };
199
200 #define EE_SET(x) \
201 CSR_WRITE_1(sc, RL_EECMD, \
202 CSR_READ_1(sc, RL_EECMD) | x)
203
204 #define EE_CLR(x) \
205 CSR_WRITE_1(sc, RL_EECMD, \
206 CSR_READ_1(sc, RL_EECMD) & ~x)
207
208 static const struct re_revision {
209 u_int32_t re_chipid;
210 const char *re_name;
211 } re_revisions[] = {
212 { RL_HWREV_8169, "RTL8169" },
213 { RL_HWREV_8110S, "RTL8110S" },
214 { RL_HWREV_8169S, "RTL8169S" },
215 { RL_HWREV_8169_8110SB, "RTL8169/8110SB" },
216 { RL_HWREV_8169_8110SCd, "RTL8169/8110SCd" },
217 { RL_HWREV_8168_SPIN1, "RTL8168 1" },
218 { RL_HWREV_8100E_SPIN1, "RTL8100E 1" },
219 { RL_HWREV_8101E, "RTL8101E" },
220 { RL_HWREV_8168_SPIN2, "RTL8168 2" },
221 { RL_HWREV_8168_SPIN3, "RTL8168 3" },
222 { RL_HWREV_8100E_SPIN2, "RTL8100E 2" },
223 { RL_HWREV_8139CPLUS, "RTL8139C+" },
224 { RL_HWREV_8101, "RTL8101" },
225 { RL_HWREV_8100, "RTL8100" },
226 { RL_HWREV_8169_8110SCe, "RTL8169/8110SCe" },
227
228 { 0, NULL }
229 };
230
231
232 static inline void
233 re_set_bufaddr(struct rl_desc *d, bus_addr_t addr)
234 {
235 d->rl_bufaddr_lo = htole32((uint32_t)addr);
236 if (sizeof(bus_addr_t) == sizeof(uint64_t))
237 d->rl_bufaddr_hi = htole32((uint64_t)addr >> 32);
238 else
239 d->rl_bufaddr_hi = 0;
240 }
241
242
243
244
245 void
246 re_eeprom_putbyte(struct rl_softc *sc, int addr)
247 {
248 int d, i;
249
250 d = addr | (RL_9346_READ << sc->rl_eewidth);
251
252
253
254
255
256 for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) {
257 if (d & i)
258 EE_SET(RL_EE_DATAIN);
259 else
260 EE_CLR(RL_EE_DATAIN);
261 DELAY(100);
262 EE_SET(RL_EE_CLK);
263 DELAY(150);
264 EE_CLR(RL_EE_CLK);
265 DELAY(100);
266 }
267 }
268
269
270
271
272 void
273 re_eeprom_getword(struct rl_softc *sc, int addr, u_int16_t *dest)
274 {
275 int i;
276 u_int16_t word = 0;
277
278
279
280
281 re_eeprom_putbyte(sc, addr);
282
283
284
285
286 for (i = 0x8000; i; i >>= 1) {
287 EE_SET(RL_EE_CLK);
288 DELAY(100);
289 if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT)
290 word |= i;
291 EE_CLR(RL_EE_CLK);
292 DELAY(100);
293 }
294
295 *dest = word;
296 }
297
298
299
300
301 void
302 re_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int cnt)
303 {
304 int i;
305 u_int16_t word = 0, *ptr;
306
307 CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM);
308
309 DELAY(100);
310
311 for (i = 0; i < cnt; i++) {
312 CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL);
313 re_eeprom_getword(sc, off + i, &word);
314 CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL);
315 ptr = (u_int16_t *)(dest + (i * 2));
316 *ptr = word;
317 }
318
319 CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM);
320 }
321
322 int
323 re_gmii_readreg(struct device *self, int phy, int reg)
324 {
325 struct rl_softc *sc = (struct rl_softc *)self;
326 u_int32_t rval;
327 int i;
328
329 if (phy != 7)
330 return (0);
331
332
333
334 if (reg == RL_GMEDIASTAT) {
335 rval = CSR_READ_1(sc, RL_GMEDIASTAT);
336 return (rval);
337 }
338
339 CSR_WRITE_4(sc, RL_PHYAR, reg << 16);
340 DELAY(1000);
341
342 for (i = 0; i < RL_TIMEOUT; i++) {
343 rval = CSR_READ_4(sc, RL_PHYAR);
344 if (rval & RL_PHYAR_BUSY)
345 break;
346 DELAY(100);
347 }
348
349 if (i == RL_TIMEOUT) {
350 printf ("%s: PHY read failed\n", sc->sc_dev.dv_xname);
351 return (0);
352 }
353
354 return (rval & RL_PHYAR_PHYDATA);
355 }
356
357 void
358 re_gmii_writereg(struct device *dev, int phy, int reg, int data)
359 {
360 struct rl_softc *sc = (struct rl_softc *)dev;
361 u_int32_t rval;
362 int i;
363
364 CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) |
365 (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY);
366 DELAY(1000);
367
368 for (i = 0; i < RL_TIMEOUT; i++) {
369 rval = CSR_READ_4(sc, RL_PHYAR);
370 if (!(rval & RL_PHYAR_BUSY))
371 break;
372 DELAY(100);
373 }
374
375 if (i == RL_TIMEOUT)
376 printf ("%s: PHY write failed\n", sc->sc_dev.dv_xname);
377 }
378
379 int
380 re_miibus_readreg(struct device *dev, int phy, int reg)
381 {
382 struct rl_softc *sc = (struct rl_softc *)dev;
383 u_int16_t rval = 0;
384 u_int16_t re8139_reg = 0;
385 int s;
386
387 s = splnet();
388
389 if (sc->rl_type == RL_8169) {
390 rval = re_gmii_readreg(dev, phy, reg);
391 splx(s);
392 return (rval);
393 }
394
395
396 if (phy) {
397 splx(s);
398 return (0);
399 }
400 switch(reg) {
401 case MII_BMCR:
402 re8139_reg = RL_BMCR;
403 break;
404 case MII_BMSR:
405 re8139_reg = RL_BMSR;
406 break;
407 case MII_ANAR:
408 re8139_reg = RL_ANAR;
409 break;
410 case MII_ANER:
411 re8139_reg = RL_ANER;
412 break;
413 case MII_ANLPAR:
414 re8139_reg = RL_LPAR;
415 break;
416 case MII_PHYIDR1:
417 case MII_PHYIDR2:
418 splx(s);
419 return (0);
420
421
422
423
424
425
426 case RL_MEDIASTAT:
427 rval = CSR_READ_1(sc, RL_MEDIASTAT);
428 splx(s);
429 return (rval);
430 default:
431 printf("%s: bad phy register %x\n", sc->sc_dev.dv_xname, reg);
432 splx(s);
433 return (0);
434 }
435 rval = CSR_READ_2(sc, re8139_reg);
436 if (sc->rl_type == RL_8139CPLUS && re8139_reg == RL_BMCR) {
437
438 rval &= ~(BMCR_LOOP | BMCR_ISO);
439 }
440 splx(s);
441 return (rval);
442 }
443
444 void
445 re_miibus_writereg(struct device *dev, int phy, int reg, int data)
446 {
447 struct rl_softc *sc = (struct rl_softc *)dev;
448 u_int16_t re8139_reg = 0;
449 int s;
450
451 s = splnet();
452
453 if (sc->rl_type == RL_8169) {
454 re_gmii_writereg(dev, phy, reg, data);
455 splx(s);
456 return;
457 }
458
459
460 if (phy) {
461 splx(s);
462 return;
463 }
464 switch(reg) {
465 case MII_BMCR:
466 re8139_reg = RL_BMCR;
467 if (sc->rl_type == RL_8139CPLUS) {
468
469 data &= ~(BMCR_LOOP | BMCR_ISO);
470 }
471 break;
472 case MII_BMSR:
473 re8139_reg = RL_BMSR;
474 break;
475 case MII_ANAR:
476 re8139_reg = RL_ANAR;
477 break;
478 case MII_ANER:
479 re8139_reg = RL_ANER;
480 break;
481 case MII_ANLPAR:
482 re8139_reg = RL_LPAR;
483 break;
484 case MII_PHYIDR1:
485 case MII_PHYIDR2:
486 splx(s);
487 return;
488 break;
489 default:
490 printf("%s: bad phy register %x\n", sc->sc_dev.dv_xname, reg);
491 splx(s);
492 return;
493 }
494 CSR_WRITE_2(sc, re8139_reg, data);
495 splx(s);
496 }
497
498 void
499 re_miibus_statchg(struct device *dev)
500 {
501 }
502
503
504
505
506 void
507 re_setmulti(struct rl_softc *sc)
508 {
509 struct ifnet *ifp;
510 int h = 0;
511 u_int32_t hashes[2] = { 0, 0 };
512 u_int32_t hwrev, rxfilt;
513 int mcnt = 0;
514 struct arpcom *ac = &sc->sc_arpcom;
515 struct ether_multi *enm;
516 struct ether_multistep step;
517
518 ifp = &sc->sc_arpcom.ac_if;
519
520 rxfilt = CSR_READ_4(sc, RL_RXCFG);
521
522 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
523 rxfilt |= RL_RXCFG_RX_MULTI;
524 CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
525 CSR_WRITE_4(sc, RL_MAR0, 0xFFFFFFFF);
526 CSR_WRITE_4(sc, RL_MAR4, 0xFFFFFFFF);
527 return;
528 }
529
530
531 CSR_WRITE_4(sc, RL_MAR0, 0);
532 CSR_WRITE_4(sc, RL_MAR4, 0);
533
534
535 ETHER_FIRST_MULTI(step, ac, enm);
536 while (enm != NULL) {
537 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
538 ifp->if_flags |= IFF_ALLMULTI;
539 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
540 }
541 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
542 break;
543
544 h = (ether_crc32_be(enm->enm_addrlo,
545 ETHER_ADDR_LEN) >> 26) & 0x0000003F;
546 if (h < 32)
547 hashes[0] |= (1 << h);
548 else
549 hashes[1] |= (1 << (h - 32));
550 mcnt++;
551 ETHER_NEXT_MULTI(step, enm);
552 }
553
554 if (mcnt)
555 rxfilt |= RL_RXCFG_RX_MULTI;
556 else
557 rxfilt &= ~RL_RXCFG_RX_MULTI;
558
559 CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
560
561
562
563
564
565
566
567 hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV;
568 if (hwrev == RL_HWREV_8100E_SPIN1 || hwrev == RL_HWREV_8100E_SPIN2 ||
569 hwrev == RL_HWREV_8101E || hwrev == RL_HWREV_8168_SPIN1 ||
570 hwrev == RL_HWREV_8168_SPIN2) {
571 CSR_WRITE_4(sc, RL_MAR0, swap32(hashes[1]));
572 CSR_WRITE_4(sc, RL_MAR4, swap32(hashes[0]));
573 } else {
574 CSR_WRITE_4(sc, RL_MAR0, hashes[0]);
575 CSR_WRITE_4(sc, RL_MAR4, hashes[1]);
576 }
577 }
578
579 void
580 re_setpromisc(struct rl_softc *sc)
581 {
582 struct ifnet *ifp;
583 u_int32_t rxcfg = 0;
584
585 ifp = &sc->sc_arpcom.ac_if;
586
587 rxcfg = CSR_READ_4(sc, RL_RXCFG);
588 if (ifp->if_flags & IFF_PROMISC)
589 rxcfg |= RL_RXCFG_RX_ALLPHYS;
590 else
591 rxcfg &= ~RL_RXCFG_RX_ALLPHYS;
592 CSR_WRITE_4(sc, RL_RXCFG, rxcfg);
593 }
594
595 void
596 re_reset(struct rl_softc *sc)
597 {
598 int i;
599
600 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET);
601
602 for (i = 0; i < RL_TIMEOUT; i++) {
603 DELAY(10);
604 if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET))
605 break;
606 }
607 if (i == RL_TIMEOUT)
608 printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
609
610 CSR_WRITE_1(sc, 0x82, 1);
611 }
612
613 #ifdef RE_DIAG
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635 int
636 re_diag(struct rl_softc *sc)
637 {
638 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
639 struct mbuf *m0;
640 struct ether_header *eh;
641 struct rl_rxsoft *rxs;
642 struct rl_desc *cur_rx;
643 bus_dmamap_t dmamap;
644 u_int16_t status;
645 u_int32_t rxstat;
646 int total_len, i, s, error = 0, phyaddr;
647 u_int8_t dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' };
648 u_int8_t src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' };
649
650 DPRINTF(("inside re_diag\n"));
651
652
653 MGETHDR(m0, M_DONTWAIT, MT_DATA);
654 if (m0 == NULL)
655 return (ENOBUFS);
656
657
658
659
660
661
662
663
664
665
666 ifp->if_flags |= IFF_PROMISC;
667 sc->rl_testmode = 1;
668 re_reset(sc);
669 re_init(ifp);
670 sc->rl_link = 1;
671 if (sc->rl_type == RL_8169)
672 phyaddr = 1;
673 else
674 phyaddr = 0;
675
676 re_miibus_writereg((struct device *)sc, phyaddr, MII_BMCR,
677 BMCR_RESET);
678 for (i = 0; i < RL_TIMEOUT; i++) {
679 status = re_miibus_readreg((struct device *)sc,
680 phyaddr, MII_BMCR);
681 if (!(status & BMCR_RESET))
682 break;
683 }
684
685 re_miibus_writereg((struct device *)sc, phyaddr, MII_BMCR,
686 BMCR_LOOP);
687 CSR_WRITE_2(sc, RL_ISR, RL_INTRS);
688
689 DELAY(100000);
690
691
692
693 eh = mtod(m0, struct ether_header *);
694 bcopy ((char *)&dst, eh->ether_dhost, ETHER_ADDR_LEN);
695 bcopy ((char *)&src, eh->ether_shost, ETHER_ADDR_LEN);
696 eh->ether_type = htons(ETHERTYPE_IP);
697 m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN;
698
699
700
701
702
703 CSR_WRITE_2(sc, RL_ISR, 0xFFFF);
704 s = splnet();
705 IFQ_ENQUEUE(&ifp->if_snd, m0, NULL, error);
706 re_start(ifp);
707 splx(s);
708 m0 = NULL;
709
710 DPRINTF(("re_diag: transmission started\n"));
711
712
713
714 DELAY(100000);
715 for (i = 0; i < RL_TIMEOUT; i++) {
716 status = CSR_READ_2(sc, RL_ISR);
717 CSR_WRITE_2(sc, RL_ISR, status);
718 if ((status & (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) ==
719 (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK))
720 break;
721 DELAY(10);
722 }
723 if (i == RL_TIMEOUT) {
724 printf("%s: diagnostic failed, failed to receive packet "
725 "in loopback mode\n", sc->sc_dev.dv_xname);
726 error = EIO;
727 goto done;
728 }
729
730
731
732
733
734
735 rxs = &sc->rl_ldata.rl_rxsoft[0];
736 dmamap = rxs->rxs_dmamap;
737 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
738 BUS_DMASYNC_POSTREAD);
739 bus_dmamap_unload(sc->sc_dmat, dmamap);
740
741 m0 = rxs->rxs_mbuf;
742 rxs->rxs_mbuf = NULL;
743 eh = mtod(m0, struct ether_header *);
744
745 RL_RXDESCSYNC(sc, 0, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
746 cur_rx = &sc->rl_ldata.rl_rx_list[0];
747 rxstat = letoh32(cur_rx->rl_cmdstat);
748 total_len = rxstat & sc->rl_rxlenmask;
749
750 if (total_len != ETHER_MIN_LEN) {
751 printf("%s: diagnostic failed, received short packet\n",
752 sc->sc_dev.dv_xname);
753 error = EIO;
754 goto done;
755 }
756
757 DPRINTF(("re_diag: packet received\n"));
758
759
760
761 if (bcmp((char *)&eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN) ||
762 bcmp((char *)&eh->ether_shost, (char *)&src, ETHER_ADDR_LEN) ||
763 ntohs(eh->ether_type) != ETHERTYPE_IP) {
764 printf("%s: WARNING, DMA FAILURE!\n", sc->sc_dev.dv_xname);
765 printf("%s: expected TX data: %s",
766 sc->sc_dev.dv_xname, ether_sprintf(dst));
767 printf("/%s/0x%x\n", ether_sprintf(src), ETHERTYPE_IP);
768 printf("%s: received RX data: %s",
769 sc->sc_dev.dv_xname,
770 ether_sprintf(eh->ether_dhost));
771 printf("/%s/0x%x\n", ether_sprintf(eh->ether_shost),
772 ntohs(eh->ether_type));
773 printf("%s: You may have a defective 32-bit NIC plugged "
774 "into a 64-bit PCI slot.\n", sc->sc_dev.dv_xname);
775 printf("%s: Please re-install the NIC in a 32-bit slot "
776 "for proper operation.\n", sc->sc_dev.dv_xname);
777 printf("%s: Read the re(4) man page for more details.\n",
778 sc->sc_dev.dv_xname);
779 error = EIO;
780 }
781
782 done:
783
784
785 sc->rl_testmode = 0;
786 sc->rl_link = 0;
787 ifp->if_flags &= ~IFF_PROMISC;
788 re_stop(ifp, 1);
789 if (m0 != NULL)
790 m_freem(m0);
791 DPRINTF(("leaving re_diag\n"));
792
793 return (error);
794 }
795
796 #endif
797
798 #ifdef __armish__
799
800
801
802
803
804 union {
805 u_int32_t eaddr_word[2];
806 u_char eaddr[ETHER_ADDR_LEN];
807 } boot_eaddr;
808 int boot_eaddr_valid;
809 #endif
810
811
812
813
814 int
815 re_attach(struct rl_softc *sc, const char *intrstr)
816 {
817 u_char eaddr[ETHER_ADDR_LEN];
818 u_int16_t as[ETHER_ADDR_LEN / 2];
819 struct ifnet *ifp;
820 u_int16_t re_did = 0;
821 int error = 0, i;
822 u_int32_t hwrev;
823 const struct re_revision *rr;
824 const char *re_name = NULL;
825
826
827 re_reset(sc);
828
829 sc->rl_eewidth = RL_9356_ADDR_LEN;
830 re_read_eeprom(sc, (caddr_t)&re_did, 0, 1);
831 if (re_did != 0x8129)
832 sc->rl_eewidth = RL_9346_ADDR_LEN;
833
834
835
836
837 re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3);
838 for (i = 0; i < ETHER_ADDR_LEN / 2; i++)
839 as[i] = letoh16(as[i]);
840 bcopy(as, eaddr, sizeof(eaddr));
841 #ifdef __armish__
842
843
844
845
846
847
848
849 if (eaddr[0] == 0x00 && eaddr[1] == 0x14 && eaddr[2] == 0xfd &&
850 eaddr[3] == 0x10 && eaddr[4] == 0x00 && eaddr[5] == 0x00) {
851 if (boot_eaddr_valid == 0) {
852 boot_eaddr.eaddr_word[1] = letoh32(CSR_READ_4(sc, RL_IDR4));
853 boot_eaddr.eaddr_word[0] = letoh32(CSR_READ_4(sc, RL_IDR0));
854 boot_eaddr_valid = 1;
855 }
856
857 bcopy(boot_eaddr.eaddr, eaddr, sizeof(eaddr));
858 eaddr[5] += sc->sc_dev.dv_unit;
859 }
860 #endif
861
862
863
864
865
866 if (sc->rl_type == RL_8169) {
867 sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN;
868 sc->rl_txstart = RL_GTXSTART;
869 sc->rl_ldata.rl_tx_desc_cnt = RL_TX_DESC_CNT_8169;
870 } else {
871 sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN;
872 sc->rl_txstart = RL_TXSTART;
873 sc->rl_ldata.rl_tx_desc_cnt = RL_TX_DESC_CNT_8139;
874 }
875
876 bcopy(eaddr, (char *)&sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
877
878 hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV;
879 for (rr = re_revisions; rr->re_name != NULL; rr++) {
880 if (rr->re_chipid == hwrev)
881 re_name = rr->re_name;
882 }
883
884 if (re_name == NULL)
885 printf(": unknown ASIC (0x%04x)", hwrev >> 16);
886 else
887 printf(": %s (0x%04x)", re_name, hwrev >> 16);
888
889 printf(", %s, address %s\n", intrstr,
890 ether_sprintf(sc->sc_arpcom.ac_enaddr));
891
892 if (sc->rl_ldata.rl_tx_desc_cnt >
893 PAGE_SIZE / sizeof(struct rl_desc)) {
894 sc->rl_ldata.rl_tx_desc_cnt =
895 PAGE_SIZE / sizeof(struct rl_desc);
896 }
897
898
899 if ((error = bus_dmamem_alloc(sc->sc_dmat, RL_TX_LIST_SZ(sc),
900 RL_RING_ALIGN, 0, &sc->rl_ldata.rl_tx_listseg, 1,
901 &sc->rl_ldata.rl_tx_listnseg, BUS_DMA_NOWAIT)) != 0) {
902 printf("%s: can't allocate tx listseg, error = %d\n",
903 sc->sc_dev.dv_xname, error);
904 goto fail_0;
905 }
906
907
908 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->rl_ldata.rl_tx_listseg,
909 sc->rl_ldata.rl_tx_listnseg, RL_TX_LIST_SZ(sc),
910 (caddr_t *)&sc->rl_ldata.rl_tx_list,
911 BUS_DMA_COHERENT | BUS_DMA_NOWAIT)) != 0) {
912 printf("%s: can't map tx list, error = %d\n",
913 sc->sc_dev.dv_xname, error);
914 goto fail_1;
915 }
916 memset(sc->rl_ldata.rl_tx_list, 0, RL_TX_LIST_SZ(sc));
917
918 if ((error = bus_dmamap_create(sc->sc_dmat, RL_TX_LIST_SZ(sc), 1,
919 RL_TX_LIST_SZ(sc), 0, 0,
920 &sc->rl_ldata.rl_tx_list_map)) != 0) {
921 printf("%s: can't create tx list map, error = %d\n",
922 sc->sc_dev.dv_xname, error);
923 goto fail_2;
924 }
925
926 if ((error = bus_dmamap_load(sc->sc_dmat,
927 sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list,
928 RL_TX_LIST_SZ(sc), NULL, BUS_DMA_NOWAIT)) != 0) {
929 printf("%s: can't load tx list, error = %d\n",
930 sc->sc_dev.dv_xname, error);
931 goto fail_3;
932 }
933
934
935 for (i = 0; i < RL_TX_QLEN; i++) {
936 error = bus_dmamap_create(sc->sc_dmat,
937 RL_JUMBO_FRAMELEN,
938 RL_TX_DESC_CNT(sc) - RL_NTXDESC_RSVD, RL_TDESC_CMD_FRAGLEN,
939 0, 0, &sc->rl_ldata.rl_txq[i].txq_dmamap);
940 if (error) {
941 printf("%s: can't create DMA map for TX\n",
942 sc->sc_dev.dv_xname);
943 goto fail_4;
944 }
945 }
946
947
948 if ((error = bus_dmamem_alloc(sc->sc_dmat, RL_RX_DMAMEM_SZ,
949 RL_RING_ALIGN, 0, &sc->rl_ldata.rl_rx_listseg, 1,
950 &sc->rl_ldata.rl_rx_listnseg, BUS_DMA_NOWAIT)) != 0) {
951 printf("%s: can't allocate rx listnseg, error = %d\n",
952 sc->sc_dev.dv_xname, error);
953 goto fail_4;
954 }
955
956
957 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->rl_ldata.rl_rx_listseg,
958 sc->rl_ldata.rl_rx_listnseg, RL_RX_DMAMEM_SZ,
959 (caddr_t *)&sc->rl_ldata.rl_rx_list,
960 BUS_DMA_COHERENT | BUS_DMA_NOWAIT)) != 0) {
961 printf("%s: can't map rx list, error = %d\n",
962 sc->sc_dev.dv_xname, error);
963 goto fail_5;
964
965 }
966 memset(sc->rl_ldata.rl_rx_list, 0, RL_RX_DMAMEM_SZ);
967
968 if ((error = bus_dmamap_create(sc->sc_dmat, RL_RX_DMAMEM_SZ, 1,
969 RL_RX_DMAMEM_SZ, 0, 0,
970 &sc->rl_ldata.rl_rx_list_map)) != 0) {
971 printf("%s: can't create rx list map, error = %d\n",
972 sc->sc_dev.dv_xname, error);
973 goto fail_6;
974 }
975
976 if ((error = bus_dmamap_load(sc->sc_dmat,
977 sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list,
978 RL_RX_DMAMEM_SZ, NULL, BUS_DMA_NOWAIT)) != 0) {
979 printf("%s: can't load rx list, error = %d\n",
980 sc->sc_dev.dv_xname, error);
981 goto fail_7;
982 }
983
984
985 for (i = 0; i < RL_RX_DESC_CNT; i++) {
986 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
987 0, 0, &sc->rl_ldata.rl_rxsoft[i].rxs_dmamap);
988 if (error) {
989 printf("%s: can't create DMA map for RX\n",
990 sc->sc_dev.dv_xname);
991 goto fail_8;
992 }
993 }
994
995 ifp = &sc->sc_arpcom.ac_if;
996 ifp->if_softc = sc;
997 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
998 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
999 ifp->if_ioctl = re_ioctl;
1000 ifp->if_start = re_start;
1001 ifp->if_watchdog = re_watchdog;
1002 ifp->if_init = re_init;
1003 if (sc->rl_type == RL_8169)
1004 ifp->if_hardmtu = RL_JUMBO_MTU;
1005 IFQ_SET_MAXLEN(&ifp->if_snd, RL_TX_QLEN);
1006 IFQ_SET_READY(&ifp->if_snd);
1007
1008 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
1009 IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
1010
1011 #if NVLAN > 0
1012 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1013 #endif
1014
1015 timeout_set(&sc->timer_handle, re_tick, sc);
1016
1017
1018 sc->sc_mii.mii_ifp = ifp;
1019 sc->sc_mii.mii_readreg = re_miibus_readreg;
1020 sc->sc_mii.mii_writereg = re_miibus_writereg;
1021 sc->sc_mii.mii_statchg = re_miibus_statchg;
1022 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, re_ifmedia_upd,
1023 re_ifmedia_sts);
1024 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
1025 MII_OFFSET_ANY, MIIF_DOPAUSE);
1026 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
1027 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
1028 ifmedia_add(&sc->sc_mii.mii_media,
1029 IFM_ETHER|IFM_NONE, 0, NULL);
1030 ifmedia_set(&sc->sc_mii.mii_media,
1031 IFM_ETHER|IFM_NONE);
1032 } else
1033 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1034
1035
1036
1037
1038 re_reset(sc);
1039 if_attach(ifp);
1040 ether_ifattach(ifp);
1041
1042 #ifdef RE_DIAG
1043
1044
1045
1046
1047
1048 if (sc->rl_type == RL_8169) {
1049 error = re_diag(sc);
1050 if (error) {
1051 printf("%s: attach aborted due to hardware diag failure\n",
1052 sc->sc_dev.dv_xname);
1053 ether_ifdetach(ifp);
1054 goto fail_8;
1055 }
1056 }
1057 #endif
1058
1059 return (0);
1060
1061 fail_8:
1062
1063 for (i = 0; i < RL_RX_DESC_CNT; i++) {
1064 if (sc->rl_ldata.rl_rxsoft[i].rxs_dmamap != NULL)
1065 bus_dmamap_destroy(sc->sc_dmat,
1066 sc->rl_ldata.rl_rxsoft[i].rxs_dmamap);
1067 }
1068
1069
1070 bus_dmamap_unload(sc->sc_dmat, sc->rl_ldata.rl_rx_list_map);
1071 fail_7:
1072 bus_dmamap_destroy(sc->sc_dmat, sc->rl_ldata.rl_rx_list_map);
1073 fail_6:
1074 bus_dmamem_unmap(sc->sc_dmat,
1075 (caddr_t)sc->rl_ldata.rl_rx_list, RL_RX_DMAMEM_SZ);
1076 fail_5:
1077 bus_dmamem_free(sc->sc_dmat,
1078 &sc->rl_ldata.rl_rx_listseg, sc->rl_ldata.rl_rx_listnseg);
1079
1080 fail_4:
1081
1082 for (i = 0; i < RL_TX_QLEN; i++) {
1083 if (sc->rl_ldata.rl_txq[i].txq_dmamap != NULL)
1084 bus_dmamap_destroy(sc->sc_dmat,
1085 sc->rl_ldata.rl_txq[i].txq_dmamap);
1086 }
1087
1088
1089 bus_dmamap_unload(sc->sc_dmat, sc->rl_ldata.rl_tx_list_map);
1090 fail_3:
1091 bus_dmamap_destroy(sc->sc_dmat, sc->rl_ldata.rl_tx_list_map);
1092 fail_2:
1093 bus_dmamem_unmap(sc->sc_dmat,
1094 (caddr_t)sc->rl_ldata.rl_tx_list, RL_TX_LIST_SZ(sc));
1095 fail_1:
1096 bus_dmamem_free(sc->sc_dmat,
1097 &sc->rl_ldata.rl_tx_listseg, sc->rl_ldata.rl_tx_listnseg);
1098 fail_0:
1099 return (1);
1100 }
1101
1102
1103 int
1104 re_newbuf(struct rl_softc *sc, int idx, struct mbuf *m)
1105 {
1106 struct mbuf *n = NULL;
1107 bus_dmamap_t map;
1108 struct rl_desc *d;
1109 struct rl_rxsoft *rxs;
1110 u_int32_t cmdstat;
1111 int error;
1112
1113 if (m == NULL) {
1114 MGETHDR(n, M_DONTWAIT, MT_DATA);
1115 if (n == NULL)
1116 return (ENOBUFS);
1117
1118 MCLGET(n, M_DONTWAIT);
1119 if (!(n->m_flags & M_EXT)) {
1120 m_freem(n);
1121 return (ENOBUFS);
1122 }
1123 m = n;
1124 } else
1125 m->m_data = m->m_ext.ext_buf;
1126
1127
1128
1129
1130
1131
1132 m->m_len = m->m_pkthdr.len = RE_RX_DESC_BUFLEN;
1133 m->m_data += RE_ETHER_ALIGN;
1134
1135 rxs = &sc->rl_ldata.rl_rxsoft[idx];
1136 map = rxs->rxs_dmamap;
1137 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1138 BUS_DMA_READ|BUS_DMA_NOWAIT);
1139
1140 if (error)
1141 goto out;
1142
1143 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1144 BUS_DMASYNC_PREREAD);
1145
1146 d = &sc->rl_ldata.rl_rx_list[idx];
1147 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1148 cmdstat = letoh32(d->rl_cmdstat);
1149 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
1150 if (cmdstat & RL_RDESC_STAT_OWN) {
1151 printf("%s: tried to map busy RX descriptor\n",
1152 sc->sc_dev.dv_xname);
1153 goto out;
1154 }
1155
1156 rxs->rxs_mbuf = m;
1157
1158 d->rl_vlanctl = 0;
1159 cmdstat = map->dm_segs[0].ds_len;
1160 if (idx == (RL_RX_DESC_CNT - 1))
1161 cmdstat |= RL_RDESC_CMD_EOR;
1162 re_set_bufaddr(d, map->dm_segs[0].ds_addr);
1163 d->rl_cmdstat = htole32(cmdstat);
1164 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1165 cmdstat |= RL_RDESC_CMD_OWN;
1166 d->rl_cmdstat = htole32(cmdstat);
1167 RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1168
1169 return (0);
1170 out:
1171 if (n != NULL)
1172 m_freem(n);
1173 return (ENOMEM);
1174 }
1175
1176
1177 int
1178 re_tx_list_init(struct rl_softc *sc)
1179 {
1180 int i;
1181
1182 memset(sc->rl_ldata.rl_tx_list, 0, RL_TX_LIST_SZ(sc));
1183 for (i = 0; i < RL_TX_QLEN; i++) {
1184 sc->rl_ldata.rl_txq[i].txq_mbuf = NULL;
1185 }
1186
1187 bus_dmamap_sync(sc->sc_dmat,
1188 sc->rl_ldata.rl_tx_list_map, 0,
1189 sc->rl_ldata.rl_tx_list_map->dm_mapsize,
1190 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1191 sc->rl_ldata.rl_txq_prodidx = 0;
1192 sc->rl_ldata.rl_txq_considx = 0;
1193 sc->rl_ldata.rl_tx_free = RL_TX_DESC_CNT(sc);
1194 sc->rl_ldata.rl_tx_nextfree = 0;
1195
1196 return (0);
1197 }
1198
1199 int
1200 re_rx_list_init(struct rl_softc *sc)
1201 {
1202 int i;
1203
1204 memset((char *)sc->rl_ldata.rl_rx_list, 0, RL_RX_LIST_SZ);
1205
1206 for (i = 0; i < RL_RX_DESC_CNT; i++) {
1207 if (re_newbuf(sc, i, NULL) == ENOBUFS)
1208 return (ENOBUFS);
1209 }
1210
1211 sc->rl_ldata.rl_rx_prodidx = 0;
1212 sc->rl_head = sc->rl_tail = NULL;
1213
1214 return (0);
1215 }
1216
1217
1218
1219
1220
1221
1222 void
1223 re_rxeof(struct rl_softc *sc)
1224 {
1225 struct mbuf *m;
1226 struct ifnet *ifp;
1227 int i, total_len;
1228 struct rl_desc *cur_rx;
1229 struct rl_rxsoft *rxs;
1230 u_int32_t rxstat;
1231
1232 ifp = &sc->sc_arpcom.ac_if;
1233
1234 for (i = sc->rl_ldata.rl_rx_prodidx;; i = RL_NEXT_RX_DESC(sc, i)) {
1235 cur_rx = &sc->rl_ldata.rl_rx_list[i];
1236 RL_RXDESCSYNC(sc, i,
1237 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1238 rxstat = letoh32(cur_rx->rl_cmdstat);
1239 RL_RXDESCSYNC(sc, i, BUS_DMASYNC_PREREAD);
1240 if ((rxstat & RL_RDESC_STAT_OWN) != 0)
1241 break;
1242 total_len = rxstat & sc->rl_rxlenmask;
1243 rxs = &sc->rl_ldata.rl_rxsoft[i];
1244 m = rxs->rxs_mbuf;
1245
1246
1247
1248 bus_dmamap_sync(sc->sc_dmat,
1249 rxs->rxs_dmamap, 0, rxs->rxs_dmamap->dm_mapsize,
1250 BUS_DMASYNC_POSTREAD);
1251 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1252
1253 if (!(rxstat & RL_RDESC_STAT_EOF)) {
1254 m->m_len = RE_RX_DESC_BUFLEN;
1255 if (sc->rl_head == NULL)
1256 sc->rl_head = sc->rl_tail = m;
1257 else {
1258 m->m_flags &= ~M_PKTHDR;
1259 sc->rl_tail->m_next = m;
1260 sc->rl_tail = m;
1261 }
1262 re_newbuf(sc, i, NULL);
1263 continue;
1264 }
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282 if (sc->rl_type == RL_8169)
1283 rxstat >>= 1;
1284
1285
1286
1287
1288
1289 if (rxstat & RL_RDESC_STAT_RXERRSUM && !(total_len > 8191 &&
1290 (rxstat & RL_RDESC_STAT_ERRS) == RL_RDESC_STAT_GIANT)) {
1291 ifp->if_ierrors++;
1292
1293
1294
1295
1296 if (sc->rl_head != NULL) {
1297 m_freem(sc->rl_head);
1298 sc->rl_head = sc->rl_tail = NULL;
1299 }
1300 re_newbuf(sc, i, m);
1301 continue;
1302 }
1303
1304
1305
1306
1307
1308
1309 if (re_newbuf(sc, i, NULL)) {
1310 ifp->if_ierrors++;
1311 if (sc->rl_head != NULL) {
1312 m_freem(sc->rl_head);
1313 sc->rl_head = sc->rl_tail = NULL;
1314 }
1315 re_newbuf(sc, i, m);
1316 continue;
1317 }
1318
1319 if (sc->rl_head != NULL) {
1320 m->m_len = total_len % RE_RX_DESC_BUFLEN;
1321 if (m->m_len == 0)
1322 m->m_len = RE_RX_DESC_BUFLEN;
1323
1324
1325
1326
1327
1328
1329 if (m->m_len <= ETHER_CRC_LEN) {
1330 sc->rl_tail->m_len -=
1331 (ETHER_CRC_LEN - m->m_len);
1332 m_freem(m);
1333 } else {
1334 m->m_len -= ETHER_CRC_LEN;
1335 m->m_flags &= ~M_PKTHDR;
1336 sc->rl_tail->m_next = m;
1337 }
1338 m = sc->rl_head;
1339 sc->rl_head = sc->rl_tail = NULL;
1340 m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1341 } else
1342 m->m_pkthdr.len = m->m_len =
1343 (total_len - ETHER_CRC_LEN);
1344
1345 ifp->if_ipackets++;
1346 m->m_pkthdr.rcvif = ifp;
1347
1348
1349
1350
1351 if ((rxstat & RL_RDESC_STAT_PROTOID) &&
1352 !(rxstat & RL_RDESC_STAT_IPSUMBAD))
1353 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1354
1355
1356 if ((RL_TCPPKT(rxstat) &&
1357 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) ||
1358 (RL_UDPPKT(rxstat) &&
1359 !(rxstat & RL_RDESC_STAT_UDPSUMBAD)))
1360 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
1361
1362 #if NBPFILTER > 0
1363 if (ifp->if_bpf)
1364 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
1365 #endif
1366 ether_input_mbuf(ifp, m);
1367 }
1368
1369 sc->rl_ldata.rl_rx_prodidx = i;
1370 }
1371
1372 void
1373 re_txeof(struct rl_softc *sc)
1374 {
1375 struct ifnet *ifp;
1376 struct rl_txq *txq;
1377 uint32_t txstat;
1378 int idx, descidx;
1379
1380 ifp = &sc->sc_arpcom.ac_if;
1381
1382 for (idx = sc->rl_ldata.rl_txq_considx;; idx = RL_NEXT_TXQ(sc, idx)) {
1383 txq = &sc->rl_ldata.rl_txq[idx];
1384
1385 if (txq->txq_mbuf == NULL) {
1386 KASSERT(idx == sc->rl_ldata.rl_txq_prodidx);
1387 break;
1388 }
1389
1390 descidx = txq->txq_descidx;
1391 RL_TXDESCSYNC(sc, descidx,
1392 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1393 txstat =
1394 letoh32(sc->rl_ldata.rl_tx_list[descidx].rl_cmdstat);
1395 RL_TXDESCSYNC(sc, descidx, BUS_DMASYNC_PREREAD);
1396 KASSERT((txstat & RL_TDESC_CMD_EOF) != 0);
1397 if (txstat & RL_TDESC_CMD_OWN)
1398 break;
1399
1400 sc->rl_ldata.rl_tx_free += txq->txq_nsegs;
1401 KASSERT(sc->rl_ldata.rl_tx_free <= RL_TX_DESC_CNT(sc));
1402 bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap,
1403 0, txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1404 bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
1405 m_freem(txq->txq_mbuf);
1406 txq->txq_mbuf = NULL;
1407
1408 if (txstat & (RL_TDESC_STAT_EXCESSCOL | RL_TDESC_STAT_COLCNT))
1409 ifp->if_collisions++;
1410 if (txstat & RL_TDESC_STAT_TXERRSUM)
1411 ifp->if_oerrors++;
1412 else
1413 ifp->if_opackets++;
1414 }
1415
1416 sc->rl_ldata.rl_txq_considx = idx;
1417
1418 if (sc->rl_ldata.rl_tx_free > RL_NTXDESC_RSVD)
1419 ifp->if_flags &= ~IFF_OACTIVE;
1420
1421 if (sc->rl_ldata.rl_tx_free < RL_TX_DESC_CNT(sc)) {
1422
1423
1424
1425
1426
1427
1428
1429 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
1430
1431
1432
1433
1434
1435
1436
1437 CSR_WRITE_4(sc, RL_TIMERCNT, 1);
1438 } else
1439 ifp->if_timer = 0;
1440 }
1441
1442 void
1443 re_tick(void *xsc)
1444 {
1445 struct rl_softc *sc = xsc;
1446 struct mii_data *mii;
1447 struct ifnet *ifp;
1448 int s;
1449
1450 ifp = &sc->sc_arpcom.ac_if;
1451 mii = &sc->sc_mii;
1452
1453 s = splnet();
1454
1455 mii_tick(mii);
1456 if (sc->rl_link) {
1457 if (!(mii->mii_media_status & IFM_ACTIVE))
1458 sc->rl_link = 0;
1459 } else {
1460 if (mii->mii_media_status & IFM_ACTIVE &&
1461 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1462 sc->rl_link = 1;
1463 if (!IFQ_IS_EMPTY(&ifp->if_snd))
1464 re_start(ifp);
1465 }
1466 }
1467 splx(s);
1468
1469 timeout_add(&sc->timer_handle, hz);
1470 }
1471
1472 int
1473 re_intr(void *arg)
1474 {
1475 struct rl_softc *sc = arg;
1476 struct ifnet *ifp;
1477 u_int16_t status;
1478 int claimed = 0;
1479
1480 ifp = &sc->sc_arpcom.ac_if;
1481
1482 if (!(ifp->if_flags & IFF_UP))
1483 return (0);
1484
1485 for (;;) {
1486
1487 status = CSR_READ_2(sc, RL_ISR);
1488
1489 if (status == 0xffff)
1490 break;
1491 if (status)
1492 CSR_WRITE_2(sc, RL_ISR, status);
1493
1494 if ((status & RL_INTRS_CPLUS) == 0)
1495 break;
1496
1497 if (status & (RL_ISR_RX_OK | RL_ISR_RX_ERR)) {
1498 re_rxeof(sc);
1499 claimed = 1;
1500 }
1501
1502 if (status & (RL_ISR_TIMEOUT_EXPIRED | RL_ISR_TX_ERR |
1503 RL_ISR_TX_DESC_UNAVAIL)) {
1504 re_txeof(sc);
1505 claimed = 1;
1506 }
1507
1508 if (status & RL_ISR_SYSTEM_ERR) {
1509 re_reset(sc);
1510 re_init(ifp);
1511 claimed = 1;
1512 }
1513
1514 if (status & RL_ISR_LINKCHG) {
1515 timeout_del(&sc->timer_handle);
1516 re_tick(sc);
1517 claimed = 1;
1518 }
1519 }
1520
1521 if (claimed && !IFQ_IS_EMPTY(&ifp->if_snd))
1522 re_start(ifp);
1523
1524 return (claimed);
1525 }
1526
1527 int
1528 re_encap(struct rl_softc *sc, struct mbuf *m, int *idx)
1529 {
1530 bus_dmamap_t map;
1531 int error, seg, nsegs, uidx, startidx, curidx, lastidx, pad;
1532 struct rl_desc *d;
1533 u_int32_t cmdstat, rl_flags = 0;
1534 struct rl_txq *txq;
1535 #if NVLAN > 0
1536 struct ifvlan *ifv = NULL;
1537
1538 if ((m->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
1539 m->m_pkthdr.rcvif != NULL)
1540 ifv = m->m_pkthdr.rcvif->if_softc;
1541 #endif
1542
1543 if (sc->rl_ldata.rl_tx_free <= RL_NTXDESC_RSVD)
1544 return (EFBIG);
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559 if ((m->m_pkthdr.csum_flags &
1560 (M_IPV4_CSUM_OUT|M_TCPV4_CSUM_OUT|M_UDPV4_CSUM_OUT)) != 0) {
1561 rl_flags |= RL_TDESC_CMD_IPCSUM;
1562 if (m->m_pkthdr.csum_flags & M_TCPV4_CSUM_OUT)
1563 rl_flags |= RL_TDESC_CMD_TCPCSUM;
1564 if (m->m_pkthdr.csum_flags & M_UDPV4_CSUM_OUT)
1565 rl_flags |= RL_TDESC_CMD_UDPCSUM;
1566 }
1567
1568 txq = &sc->rl_ldata.rl_txq[*idx];
1569 map = txq->txq_dmamap;
1570 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1571 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1572 if (error) {
1573
1574 printf("%s: can't map mbuf (error %d)\n",
1575 sc->sc_dev.dv_xname, error);
1576 return (error);
1577 }
1578
1579 nsegs = map->dm_nsegs;
1580 pad = 0;
1581 if (m->m_pkthdr.len <= RL_IP4CSUMTX_PADLEN &&
1582 (rl_flags & RL_TDESC_CMD_IPCSUM) != 0) {
1583 pad = 1;
1584 nsegs++;
1585 }
1586
1587 if (nsegs > sc->rl_ldata.rl_tx_free - RL_NTXDESC_RSVD) {
1588 error = EFBIG;
1589 goto fail_unload;
1590 }
1591
1592
1593
1594
1595
1596 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1597 BUS_DMASYNC_PREWRITE);
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610 curidx = startidx = sc->rl_ldata.rl_tx_nextfree;
1611 lastidx = -1;
1612 for (seg = 0; seg < map->dm_nsegs;
1613 seg++, curidx = RL_NEXT_TX_DESC(sc, curidx)) {
1614 d = &sc->rl_ldata.rl_tx_list[curidx];
1615 RL_TXDESCSYNC(sc, curidx,
1616 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1617 cmdstat = letoh32(d->rl_cmdstat);
1618 RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_PREREAD);
1619 if (cmdstat & RL_TDESC_STAT_OWN) {
1620 printf("%s: tried to map busy TX descriptor\n",
1621 sc->sc_dev.dv_xname);
1622 for (; seg > 0; seg --) {
1623 uidx = (curidx + RL_TX_DESC_CNT(sc) - seg) %
1624 RL_TX_DESC_CNT(sc);
1625 sc->rl_ldata.rl_tx_list[uidx].rl_cmdstat = 0;
1626 RL_TXDESCSYNC(sc, uidx,
1627 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1628 }
1629 error = ENOBUFS;
1630 goto fail_unload;
1631 }
1632
1633 d->rl_vlanctl = 0;
1634 re_set_bufaddr(d, map->dm_segs[seg].ds_addr);
1635 cmdstat = rl_flags | map->dm_segs[seg].ds_len;
1636 if (seg == 0)
1637 cmdstat |= RL_TDESC_CMD_SOF;
1638 else
1639 cmdstat |= RL_TDESC_CMD_OWN;
1640 if (curidx == (RL_TX_DESC_CNT(sc) - 1))
1641 cmdstat |= RL_TDESC_CMD_EOR;
1642 if (seg == nsegs - 1) {
1643 cmdstat |= RL_TDESC_CMD_EOF;
1644 lastidx = curidx;
1645 }
1646 d->rl_cmdstat = htole32(cmdstat);
1647 RL_TXDESCSYNC(sc, curidx,
1648 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1649 }
1650 if (pad) {
1651 bus_addr_t paddaddr;
1652
1653 d = &sc->rl_ldata.rl_tx_list[curidx];
1654 d->rl_vlanctl = 0;
1655 paddaddr = RL_TXPADDADDR(sc);
1656 re_set_bufaddr(d, paddaddr);
1657 cmdstat = rl_flags |
1658 RL_TDESC_CMD_OWN | RL_TDESC_CMD_EOF |
1659 (RL_IP4CSUMTX_PADLEN + 1 - m->m_pkthdr.len);
1660 if (curidx == (RL_TX_DESC_CNT(sc) - 1))
1661 cmdstat |= RL_TDESC_CMD_EOR;
1662 d->rl_cmdstat = htole32(cmdstat);
1663 RL_TXDESCSYNC(sc, curidx,
1664 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1665 lastidx = curidx;
1666 curidx = RL_NEXT_TX_DESC(sc, curidx);
1667 }
1668 KASSERT(lastidx != -1);
1669
1670
1671
1672
1673
1674
1675
1676 #if NVLAN > 0
1677 if (ifv != NULL) {
1678 sc->rl_ldata.rl_tx_list[startidx].rl_vlanctl =
1679 htole32(swap16(ifv->ifv_tag) |
1680 RL_TDESC_VLANCTL_TAG);
1681 }
1682 #endif
1683
1684
1685
1686 sc->rl_ldata.rl_tx_list[startidx].rl_cmdstat |=
1687 htole32(RL_TDESC_CMD_OWN);
1688 RL_TXDESCSYNC(sc, startidx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1689
1690
1691 txq->txq_mbuf = m;
1692 txq->txq_descidx = lastidx;
1693 txq->txq_nsegs = nsegs;
1694
1695 sc->rl_ldata.rl_tx_free -= nsegs;
1696 sc->rl_ldata.rl_tx_nextfree = curidx;
1697
1698 *idx = RL_NEXT_TXQ(sc, *idx);
1699
1700 return (0);
1701
1702 fail_unload:
1703 bus_dmamap_unload(sc->sc_dmat, map);
1704
1705 return (error);
1706 }
1707
1708
1709
1710
1711
1712 void
1713 re_start(struct ifnet *ifp)
1714 {
1715 struct rl_softc *sc;
1716 int idx, queued = 0;
1717
1718 sc = ifp->if_softc;
1719
1720 if (!sc->rl_link || ifp->if_flags & IFF_OACTIVE)
1721 return;
1722
1723 idx = sc->rl_ldata.rl_txq_prodidx;
1724 for (;;) {
1725 struct mbuf *m;
1726 int error;
1727
1728 IFQ_POLL(&ifp->if_snd, m);
1729 if (m == NULL)
1730 break;
1731
1732 if (sc->rl_ldata.rl_txq[idx].txq_mbuf != NULL) {
1733 KASSERT(idx == sc->rl_ldata.rl_txq_considx);
1734 ifp->if_flags |= IFF_OACTIVE;
1735 break;
1736 }
1737
1738 error = re_encap(sc, m, &idx);
1739 if (error == EFBIG &&
1740 sc->rl_ldata.rl_tx_free == RL_TX_DESC_CNT(sc)) {
1741 IFQ_DEQUEUE(&ifp->if_snd, m);
1742 m_freem(m);
1743 ifp->if_oerrors++;
1744 continue;
1745 }
1746 if (error) {
1747 ifp->if_flags |= IFF_OACTIVE;
1748 break;
1749 }
1750
1751 IFQ_DEQUEUE(&ifp->if_snd, m);
1752 queued++;
1753
1754 #if NBPFILTER > 0
1755
1756
1757
1758
1759 if (ifp->if_bpf)
1760 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1761 #endif
1762 }
1763
1764 if (queued == 0) {
1765 if (sc->rl_ldata.rl_tx_free != RL_TX_DESC_CNT(sc))
1766 CSR_WRITE_4(sc, RL_TIMERCNT, 1);
1767 return;
1768 }
1769
1770 sc->rl_ldata.rl_txq_prodidx = idx;
1771
1772 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782 CSR_WRITE_4(sc, RL_TIMERCNT, 1);
1783
1784
1785
1786
1787 ifp->if_timer = 5;
1788 }
1789
1790 int
1791 re_init(struct ifnet *ifp)
1792 {
1793 struct rl_softc *sc = ifp->if_softc;
1794 u_int32_t rxcfg = 0;
1795 int s;
1796 union {
1797 u_int32_t align_dummy;
1798 u_char eaddr[ETHER_ADDR_LEN];
1799 } eaddr;
1800
1801 s = splnet();
1802
1803
1804
1805
1806 re_stop(ifp, 0);
1807
1808
1809
1810
1811
1812 CSR_WRITE_2(sc, RL_CPLUS_CMD, RL_CPLUSCMD_RXENB|
1813 RL_CPLUSCMD_TXENB|RL_CPLUSCMD_PCI_MRW|
1814 RL_CPLUSCMD_RXCSUM_ENB);
1815
1816
1817
1818
1819
1820
1821 bcopy(sc->sc_arpcom.ac_enaddr, eaddr.eaddr, ETHER_ADDR_LEN);
1822 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
1823 CSR_WRITE_4(sc, RL_IDR4,
1824 htole32(*(u_int32_t *)(&eaddr.eaddr[4])));
1825 CSR_WRITE_4(sc, RL_IDR0,
1826 htole32(*(u_int32_t *)(&eaddr.eaddr[0])));
1827 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1828
1829
1830
1831
1832 re_rx_list_init(sc);
1833 re_tx_list_init(sc);
1834
1835
1836
1837
1838 CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI,
1839 RL_ADDR_HI(sc->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr));
1840 CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO,
1841 RL_ADDR_LO(sc->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr));
1842
1843 CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI,
1844 RL_ADDR_HI(sc->rl_ldata.rl_tx_list_map->dm_segs[0].ds_addr));
1845 CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO,
1846 RL_ADDR_LO(sc->rl_ldata.rl_tx_list_map->dm_segs[0].ds_addr));
1847
1848
1849
1850
1851 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
1852
1853
1854
1855
1856 if (sc->rl_testmode) {
1857 if (sc->rl_type == RL_8169)
1858 CSR_WRITE_4(sc, RL_TXCFG,
1859 RL_TXCFG_CONFIG|RL_LOOPTEST_ON);
1860 else
1861 CSR_WRITE_4(sc, RL_TXCFG,
1862 RL_TXCFG_CONFIG|RL_LOOPTEST_ON_CPLUS);
1863 } else
1864 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
1865
1866 CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16);
1867
1868 CSR_WRITE_4(sc, RL_RXCFG, RL_RXCFG_CONFIG);
1869
1870
1871 rxcfg = CSR_READ_4(sc, RL_RXCFG);
1872 rxcfg |= RL_RXCFG_RX_INDIV;
1873
1874
1875
1876
1877 if (ifp->if_flags & IFF_BROADCAST)
1878 rxcfg |= RL_RXCFG_RX_BROAD;
1879 else
1880 rxcfg &= ~RL_RXCFG_RX_BROAD;
1881
1882 CSR_WRITE_4(sc, RL_RXCFG, rxcfg);
1883
1884
1885 re_setpromisc(sc);
1886
1887
1888
1889
1890 re_setmulti(sc);
1891
1892
1893
1894
1895 if (sc->rl_testmode)
1896 CSR_WRITE_2(sc, RL_IMR, 0);
1897 else
1898 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
1899 CSR_WRITE_2(sc, RL_ISR, RL_INTRS_CPLUS);
1900
1901
1902 CSR_WRITE_4(sc, RL_MISSEDPKT, 0);
1903 #ifdef notdef
1904
1905 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
1906 #endif
1907
1908
1909
1910
1911
1912
1913
1914
1915 if (sc->rl_type == RL_8169)
1916 CSR_WRITE_4(sc, RL_TIMERINT_8169, 0x800);
1917 else
1918 CSR_WRITE_4(sc, RL_TIMERINT, 0x400);
1919
1920
1921
1922
1923
1924 if (sc->rl_type == RL_8169)
1925 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383);
1926
1927 if (sc->rl_testmode)
1928 return (0);
1929
1930 mii_mediachg(&sc->sc_mii);
1931
1932 CSR_WRITE_1(sc, RL_CFG1, CSR_READ_1(sc, RL_CFG1) | RL_CFG1_DRVLOAD);
1933
1934 ifp->if_flags |= IFF_RUNNING;
1935 ifp->if_flags &= ~IFF_OACTIVE;
1936
1937 splx(s);
1938
1939 sc->rl_link = 0;
1940
1941 timeout_add(&sc->timer_handle, hz);
1942
1943 return (0);
1944 }
1945
1946
1947
1948
1949 int
1950 re_ifmedia_upd(struct ifnet *ifp)
1951 {
1952 struct rl_softc *sc;
1953
1954 sc = ifp->if_softc;
1955
1956 return (mii_mediachg(&sc->sc_mii));
1957 }
1958
1959
1960
1961
1962 void
1963 re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1964 {
1965 struct rl_softc *sc;
1966
1967 sc = ifp->if_softc;
1968
1969 mii_pollstat(&sc->sc_mii);
1970 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1971 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1972 }
1973
1974 int
1975 re_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1976 {
1977 struct rl_softc *sc = ifp->if_softc;
1978 struct ifreq *ifr = (struct ifreq *) data;
1979 struct ifaddr *ifa = (struct ifaddr *)data;
1980 int s, error = 0;
1981
1982 s = splnet();
1983
1984 if ((error = ether_ioctl(ifp, &sc->sc_arpcom, command,
1985 data)) > 0) {
1986 splx(s);
1987 return (error);
1988 }
1989
1990 switch(command) {
1991 case SIOCSIFADDR:
1992 ifp->if_flags |= IFF_UP;
1993 if (!(ifp->if_flags & IFF_RUNNING))
1994 re_init(ifp);
1995 #ifdef INET
1996 if (ifa->ifa_addr->sa_family == AF_INET)
1997 arp_ifinit(&sc->sc_arpcom, ifa);
1998 #endif
1999 break;
2000 case SIOCSIFMTU:
2001 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ifp->if_hardmtu)
2002 error = EINVAL;
2003 else if (ifp->if_mtu != ifr->ifr_mtu)
2004 ifp->if_mtu = ifr->ifr_mtu;
2005 break;
2006 case SIOCSIFFLAGS:
2007 if (ifp->if_flags & IFF_UP) {
2008 if (ifp->if_flags & IFF_RUNNING &&
2009 ((ifp->if_flags ^ sc->if_flags) &
2010 IFF_PROMISC)) {
2011 re_setpromisc(sc);
2012 } else {
2013 if (!(ifp->if_flags & IFF_RUNNING))
2014 re_init(ifp);
2015 }
2016 } else {
2017 if (ifp->if_flags & IFF_RUNNING)
2018 re_stop(ifp, 1);
2019 }
2020 sc->if_flags = ifp->if_flags;
2021 break;
2022 case SIOCADDMULTI:
2023 case SIOCDELMULTI:
2024 error = (command == SIOCADDMULTI) ?
2025 ether_addmulti(ifr, &sc->sc_arpcom) :
2026 ether_delmulti(ifr, &sc->sc_arpcom);
2027 if (error == ENETRESET) {
2028
2029
2030
2031
2032 if (ifp->if_flags & IFF_RUNNING)
2033 re_setmulti(sc);
2034 error = 0;
2035 }
2036 break;
2037 case SIOCGIFMEDIA:
2038 case SIOCSIFMEDIA:
2039 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
2040 break;
2041 default:
2042 error = EINVAL;
2043 break;
2044 }
2045
2046 splx(s);
2047
2048 return (error);
2049 }
2050
2051 void
2052 re_watchdog(struct ifnet *ifp)
2053 {
2054 struct rl_softc *sc;
2055 int s;
2056
2057 sc = ifp->if_softc;
2058 s = splnet();
2059 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
2060 ifp->if_oerrors++;
2061
2062 re_txeof(sc);
2063 re_rxeof(sc);
2064
2065 re_init(ifp);
2066
2067 splx(s);
2068 }
2069
2070
2071
2072
2073
2074 void
2075 re_stop(struct ifnet *ifp, int disable)
2076 {
2077 struct rl_softc *sc;
2078 int i;
2079
2080 sc = ifp->if_softc;
2081
2082 ifp->if_timer = 0;
2083 sc->rl_link = 0;
2084
2085 timeout_del(&sc->timer_handle);
2086 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2087
2088 mii_down(&sc->sc_mii);
2089
2090 CSR_WRITE_1(sc, RL_COMMAND, 0x00);
2091 CSR_WRITE_2(sc, RL_IMR, 0x0000);
2092 CSR_WRITE_2(sc, RL_ISR, 0xFFFF);
2093
2094 if (sc->rl_head != NULL) {
2095 m_freem(sc->rl_head);
2096 sc->rl_head = sc->rl_tail = NULL;
2097 }
2098
2099
2100 for (i = 0; i < RL_TX_QLEN; i++) {
2101 if (sc->rl_ldata.rl_txq[i].txq_mbuf != NULL) {
2102 bus_dmamap_unload(sc->sc_dmat,
2103 sc->rl_ldata.rl_txq[i].txq_dmamap);
2104 m_freem(sc->rl_ldata.rl_txq[i].txq_mbuf);
2105 sc->rl_ldata.rl_txq[i].txq_mbuf = NULL;
2106 }
2107 }
2108
2109
2110 for (i = 0; i < RL_RX_DESC_CNT; i++) {
2111 if (sc->rl_ldata.rl_rxsoft[i].rxs_mbuf != NULL) {
2112 bus_dmamap_unload(sc->sc_dmat,
2113 sc->rl_ldata.rl_rxsoft[i].rxs_dmamap);
2114 m_freem(sc->rl_ldata.rl_rxsoft[i].rxs_mbuf);
2115 sc->rl_ldata.rl_rxsoft[i].rxs_mbuf = NULL;
2116 }
2117 }
2118 }