This source file includes following definitions.
- mtd_attach
- mtd_ifmedia_upd
- mtd_ifmedia_sts
- mtd_mii_command
- mtd_miibus_readreg
- mtd_miibus_writereg
- mtd_miibus_statchg
- mtd_setmulti
- mtd_encap
- mtd_list_tx_init
- mtd_list_rx_init
- mtd_newbuf
- mtd_reset
- mtd_ioctl
- mtd_init
- mtd_start
- mtd_stop
- mtd_watchdog
- mtd_intr
- mtd_rxeof
- mtd_rx_resync
- mtd_txeof
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31 #include "bpfilter.h"
32
33 #include <sys/param.h>
34 #include <sys/mbuf.h>
35 #include <sys/systm.h>
36 #include <sys/device.h>
37 #include <sys/socket.h>
38 #include <sys/ioctl.h>
39
40 #include <net/if.h>
41 #include <net/if_media.h>
42
43 #if NBPFILTER > 0
44 #include <net/bpf.h>
45 #endif
46
47 #ifdef INET
48 #include <netinet/in.h>
49 #include <netinet/if_ether.h>
50 #endif
51
52 #include <machine/bus.h>
53
54 #include <dev/mii/mii.h>
55 #include <dev/mii/miivar.h>
56
57 #include <dev/pci/pcidevs.h>
58 #include <dev/pci/pcivar.h>
59
60 #include <dev/ic/mtd8xxreg.h>
61 #include <dev/ic/mtd8xxvar.h>
62
63
64 static int mtd_ifmedia_upd(struct ifnet *);
65 static void mtd_ifmedia_sts(struct ifnet *, struct ifmediareq *);
66
67 static u_int32_t mtd_mii_command(struct mtd_softc *, int, int, int);
68 static int mtd_miibus_readreg(struct device *, int, int);
69 static void mtd_miibus_writereg(struct device *, int, int, int);
70 static void mtd_miibus_statchg(struct device *);
71 static void mtd_setmulti(struct mtd_softc *);
72
73 static int mtd_encap(struct mtd_softc *, struct mbuf *, u_int32_t *);
74 static int mtd_list_rx_init(struct mtd_softc *);
75 static void mtd_list_tx_init(struct mtd_softc *);
76 static int mtd_newbuf(struct mtd_softc *, int, struct mbuf *);
77
78 static void mtd_reset(struct mtd_softc *sc);
79 static int mtd_ioctl(struct ifnet *, u_long, caddr_t);
80 static void mtd_init(struct ifnet *);
81 static void mtd_start(struct ifnet *);
82 static void mtd_stop(struct ifnet *);
83 static void mtd_watchdog(struct ifnet *);
84
85 static void mtd_rxeof(struct mtd_softc *);
86 static int mtd_rx_resync(struct mtd_softc *);
87 static void mtd_txeof(struct mtd_softc *);
88
89
90 void
91 mtd_attach(struct mtd_softc *sc)
92 {
93 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
94 u_int32_t enaddr[2];
95 int i;
96
97
98 mtd_reset(sc);
99
100 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct mtd_list_data),
101 PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg,
102 BUS_DMA_NOWAIT) != 0) {
103 printf(": can't alloc list mem\n");
104 return;
105 }
106 if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg,
107 sizeof(struct mtd_list_data), &sc->sc_listkva,
108 BUS_DMA_NOWAIT) != 0) {
109 printf(": can't map list mem\n");
110 return;
111 }
112 if (bus_dmamap_create(sc->sc_dmat, sizeof(struct mtd_list_data), 1,
113 sizeof(struct mtd_list_data), 0, BUS_DMA_NOWAIT,
114 &sc->sc_listmap) != 0) {
115 printf(": can't alloc list map\n");
116 return;
117 }
118 if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva,
119 sizeof(struct mtd_list_data), NULL, BUS_DMA_NOWAIT) != 0) {
120 printf(": can't load list map\n");
121 return;
122 }
123 sc->mtd_ldata = (struct mtd_list_data *)sc->sc_listkva;
124 bzero(sc->mtd_ldata, sizeof(struct mtd_list_data));
125
126 for (i = 0; i < MTD_RX_LIST_CNT; i++) {
127 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
128 0, BUS_DMA_NOWAIT,
129 &sc->mtd_cdata.mtd_rx_chain[i].sd_map) != 0) {
130 printf(": can't create rx map\n");
131 return;
132 }
133 }
134 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
135 BUS_DMA_NOWAIT, &sc->sc_rx_sparemap) != 0) {
136 printf(": can't create rx spare map\n");
137 return;
138 }
139
140 for (i = 0; i < MTD_TX_LIST_CNT; i++) {
141 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,
142 MTD_TX_LIST_CNT - 5, MCLBYTES, 0, BUS_DMA_NOWAIT,
143 &sc->mtd_cdata.mtd_tx_chain[i].sd_map) != 0) {
144 printf(": can't create tx map\n");
145 return;
146 }
147 }
148 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, MTD_TX_LIST_CNT - 5,
149 MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap) != 0) {
150 printf(": can't create tx spare map\n");
151 return;
152 }
153
154
155
156 enaddr[0] = letoh32(CSR_READ_4(MTD_PAR0));
157 enaddr[1] = letoh32(CSR_READ_4(MTD_PAR4));
158 bcopy(enaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
159 printf(" address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
160
161
162 ifp->if_softc = sc;
163 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
164 ifp->if_ioctl = mtd_ioctl;
165 ifp->if_start = mtd_start;
166 ifp->if_watchdog = mtd_watchdog;
167 ifp->if_baudrate = 10000000;
168 IFQ_SET_READY(&ifp->if_snd);
169 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
170
171 ifp->if_capabilities = IFCAP_VLAN_MTU;
172
173
174
175
176 sc->sc_mii.mii_ifp = ifp;
177 sc->sc_mii.mii_readreg = mtd_miibus_readreg;
178 sc->sc_mii.mii_writereg = mtd_miibus_writereg;
179 sc->sc_mii.mii_statchg = mtd_miibus_statchg;
180 ifmedia_init(&sc->sc_mii.mii_media, 0, mtd_ifmedia_upd,
181 mtd_ifmedia_sts);
182 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
183 MII_OFFSET_ANY, 0);
184 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
185 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_NONE, 0,
186 NULL);
187 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_NONE);
188 } else
189 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
190
191
192
193
194 if_attach(ifp);
195 ether_ifattach(ifp);
196 }
197
198
199 static int
200 mtd_ifmedia_upd(struct ifnet *ifp)
201 {
202 struct mtd_softc *sc = ifp->if_softc;
203
204 return (mii_mediachg(&sc->sc_mii));
205 }
206
207
208 static void
209 mtd_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
210 {
211 struct mtd_softc *sc = ifp->if_softc;
212
213 mii_pollstat(&sc->sc_mii);
214 ifmr->ifm_active = sc->sc_mii.mii_media_active;
215 ifmr->ifm_status = sc->sc_mii.mii_media_status;
216 }
217
218
219 static u_int32_t
220 mtd_mii_command(struct mtd_softc *sc, int opcode, int phy, int reg)
221 {
222 u_int32_t miir, mask, data;
223 int i;
224
225 miir = (CSR_READ_4(MTD_MIIMGT) & ~MIIMGT_MASK) | MIIMGT_WRITE |
226 MIIMGT_MDO;
227
228 for (i = 0; i < 32; i++) {
229 miir &= ~MIIMGT_MDC;
230 CSR_WRITE_4(MTD_MIIMGT, miir);
231 miir |= MIIMGT_MDC;
232 CSR_WRITE_4(MTD_MIIMGT, miir);
233 }
234
235 data = opcode | (phy << 7) | (reg << 2);
236
237 for (mask = 0; mask; mask >>= 1) {
238 miir &= ~(MIIMGT_MDC | MIIMGT_MDO);
239 if (mask & data)
240 miir |= MIIMGT_MDO;
241 CSR_WRITE_4(MTD_MIIMGT, miir);
242 miir |= MIIMGT_MDC;
243 CSR_WRITE_4(MTD_MIIMGT, miir);
244 DELAY(30);
245
246 if (mask == 0x4 && opcode == MII_OPCODE_RD)
247 miir &= ~MIIMGT_WRITE;
248 }
249 return (miir);
250 }
251
252
253
254 static int
255 mtd_miibus_readreg(struct device *self, int phy, int reg)
256 {
257 struct mtd_softc *sc = (void *)self;
258
259 if (sc->sc_devid == PCI_PRODUCT_MYSON_MTD803)
260 return (phy ? 0 : (int)CSR_READ_2(MTD_PHYCSR + (reg << 1)));
261 else {
262 u_int32_t miir, mask, data;
263
264 miir = mtd_mii_command(sc, MII_OPCODE_RD, phy, reg);
265 for (mask = 0x8000, data = 0; mask; mask >>= 1) {
266 miir &= ~MIIMGT_MDC;
267 CSR_WRITE_4(MTD_MIIMGT, miir);
268 miir = CSR_READ_4(MTD_MIIMGT);
269 if (miir & MIIMGT_MDI)
270 data |= mask;
271 miir |= MIIMGT_MDC;
272 CSR_WRITE_4(MTD_MIIMGT, miir);
273 DELAY(30);
274 }
275 miir &= ~MIIMGT_MDC;
276 CSR_WRITE_4(MTD_MIIMGT, miir);
277
278 return ((int)data);
279 }
280 }
281
282
283 static void
284 mtd_miibus_writereg(struct device *self, int phy, int reg, int val)
285 {
286 struct mtd_softc *sc = (void *)self;
287
288 if (sc->sc_devid == PCI_PRODUCT_MYSON_MTD803) {
289 if (!phy)
290 CSR_WRITE_2(MTD_PHYCSR + (reg << 1), val);
291 } else {
292 u_int32_t miir, mask;
293
294 miir = mtd_mii_command(sc, MII_OPCODE_WR, phy, reg);
295 for (mask = 0x8000; mask; mask >>= 1) {
296 miir &= ~(MIIMGT_MDC | MIIMGT_MDO);
297 if (mask & (u_int32_t)val)
298 miir |= MIIMGT_MDO;
299 CSR_WRITE_4(MTD_MIIMGT, miir);
300 miir |= MIIMGT_MDC;
301 CSR_WRITE_4(MTD_MIIMGT, miir);
302 DELAY(1);
303 }
304 miir &= ~MIIMGT_MDC;
305 CSR_WRITE_4(MTD_MIIMGT, miir);
306 }
307 }
308
309
310 static void
311 mtd_miibus_statchg(struct device *self)
312 {
313
314 }
315
316
317 void
318 mtd_setmulti(struct mtd_softc *sc)
319 {
320 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
321 u_int32_t rxfilt, crc, hash[2] = { 0, 0 };
322 struct ether_multistep step;
323 struct ether_multi *enm;
324 int mcnt = 0;
325
326 allmulti:
327 rxfilt = CSR_READ_4(MTD_TCRRCR) & ~RCR_AM;
328 if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) {
329 rxfilt |= RCR_AM;
330 CSR_WRITE_4(MTD_TCRRCR, rxfilt);
331 CSR_WRITE_4(MTD_MAR0, 0xffffffff);
332 CSR_WRITE_4(MTD_MAR4, 0xffffffff);
333 return;
334 }
335
336
337 CSR_WRITE_4(MTD_MAR0, 0);
338 CSR_WRITE_4(MTD_MAR4, 0);
339
340
341 ETHER_FIRST_MULTI(step, &sc->sc_arpcom, enm);
342 while (enm != NULL) {
343 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
344 ifp->if_flags |= IFF_ALLMULTI;
345 goto allmulti;
346 }
347 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26;
348 hash[crc >> 5] |= 1 << (crc & 0xf);
349 ++mcnt;
350 ETHER_NEXT_MULTI(step, enm);
351 }
352
353 if (mcnt)
354 rxfilt |= RCR_AM;
355 CSR_WRITE_4(MTD_MAR0, hash[0]);
356 CSR_WRITE_4(MTD_MAR4, hash[1]);
357 CSR_WRITE_4(MTD_TCRRCR, rxfilt);
358 }
359
360
361
362
363
364
365 int
366 mtd_encap(struct mtd_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
367 {
368 struct mtd_tx_desc *f = NULL;
369 int frag, cur, cnt = 0, i, total_len = 0;
370 bus_dmamap_t map;
371
372
373
374
375
376
377 map = sc->sc_tx_sparemap;
378
379 if (bus_dmamap_load_mbuf(sc->sc_dmat, map,
380 m_head, BUS_DMA_NOWAIT) != 0)
381 return (1);
382
383 cur = frag = *txidx;
384
385 for (i = 0; i < map->dm_nsegs; i++) {
386 if ((MTD_TX_LIST_CNT -
387 (sc->mtd_cdata.mtd_tx_cnt + cnt)) < 5) {
388 bus_dmamap_unload(sc->sc_dmat, map);
389 return (1);
390 }
391
392 f = &sc->mtd_ldata->mtd_tx_list[frag];
393 f->td_tcw = htole32(map->dm_segs[i].ds_len);
394 total_len += map->dm_segs[i].ds_len;
395 if (cnt == 0) {
396 f->td_tsw = 0;
397 f->td_tcw |= htole32(TCW_FD | TCW_CRC | TCW_PAD);
398 } else
399 f->td_tsw = htole32(TSW_OWN);
400 f->td_buf = htole32(map->dm_segs[i].ds_addr);
401 cur = frag;
402 frag = (frag + 1) % MTD_TX_LIST_CNT;
403 cnt++;
404 }
405
406 sc->mtd_cdata.mtd_tx_cnt += cnt;
407 sc->mtd_cdata.mtd_tx_chain[cur].sd_mbuf = m_head;
408 sc->sc_tx_sparemap = sc->mtd_cdata.mtd_tx_chain[cur].sd_map;
409 sc->mtd_cdata.mtd_tx_chain[cur].sd_map = map;
410 sc->mtd_ldata->mtd_tx_list[cur].td_tcw |= htole32(TCW_LD | TCW_IC);
411 if (sc->sc_devid == PCI_PRODUCT_MYSON_MTD891)
412 sc->mtd_ldata->mtd_tx_list[cur].td_tcw |=
413 htole32(TCW_EIC | TCW_RTLC);
414
415 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
416 BUS_DMASYNC_PREWRITE);
417
418 sc->mtd_ldata->mtd_tx_list[*txidx].td_tsw = htole32(TSW_OWN);
419 sc->mtd_ldata->mtd_tx_list[*txidx].td_tcw |=
420 htole32(total_len << TCW_PKTS_SHIFT);
421
422 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
423 offsetof(struct mtd_list_data, mtd_tx_list[0]),
424 sizeof(struct mtd_tx_desc) * MTD_TX_LIST_CNT,
425 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
426
427 *txidx = frag;
428
429 return (0);
430 }
431
432
433
434
435
436 static void
437 mtd_list_tx_init(struct mtd_softc *sc)
438 {
439 struct mtd_chain_data *cd;
440 struct mtd_list_data *ld;
441 int i;
442
443 cd = &sc->mtd_cdata;
444 ld = sc->mtd_ldata;
445 for (i = 0; i < MTD_TX_LIST_CNT; i++) {
446 cd->mtd_tx_chain[i].sd_mbuf = NULL;
447 ld->mtd_tx_list[i].td_tsw = 0;
448 ld->mtd_tx_list[i].td_tcw = 0;
449 ld->mtd_tx_list[i].td_buf = 0;
450 ld->mtd_tx_list[i].td_next = htole32(
451 sc->sc_listmap->dm_segs[0].ds_addr +
452 offsetof(struct mtd_list_data,
453 mtd_tx_list[(i + 1) % MTD_TX_LIST_CNT]));
454 }
455
456 cd->mtd_tx_prod = cd->mtd_tx_cons = cd->mtd_tx_cnt = 0;
457 }
458
459
460
461
462
463
464
465 static int
466 mtd_list_rx_init(struct mtd_softc *sc)
467 {
468 struct mtd_list_data *ld;
469 int i;
470
471 ld = sc->mtd_ldata;
472
473 for (i = 0; i < MTD_RX_LIST_CNT; i++) {
474 if (mtd_newbuf(sc, i, NULL))
475 return (1);
476 ld->mtd_rx_list[i].rd_next = htole32(
477 sc->sc_listmap->dm_segs[0].ds_addr +
478 offsetof(struct mtd_list_data,
479 mtd_rx_list[(i + 1) % MTD_RX_LIST_CNT])
480 );
481 }
482
483 sc->mtd_cdata.mtd_rx_prod = 0;
484
485 return (0);
486 }
487
488
489
490
491
492 static int
493 mtd_newbuf(struct mtd_softc *sc, int i, struct mbuf *m)
494 {
495 struct mbuf *m_new = NULL;
496 struct mtd_rx_desc *c;
497 bus_dmamap_t map;
498
499 c = &sc->mtd_ldata->mtd_rx_list[i];
500
501 if (m == NULL) {
502 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
503 if (m_new == NULL)
504 return (1);
505
506 MCLGET(m_new, M_DONTWAIT);
507 if (!(m_new->m_flags & M_EXT)) {
508 m_freem(m_new);
509 return (1);
510 }
511 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
512 if (bus_dmamap_load(sc->sc_dmat, sc->sc_rx_sparemap,
513 mtod(m_new, caddr_t), MCLBYTES, NULL,
514 BUS_DMA_NOWAIT) != 0) {
515 m_freem(m_new);
516 return (1);
517 }
518 map = sc->mtd_cdata.mtd_rx_chain[i].sd_map;
519 sc->mtd_cdata.mtd_rx_chain[i].sd_map = sc->sc_rx_sparemap;
520 sc->sc_rx_sparemap = map;
521 } else {
522 m_new = m;
523 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
524 m_new->m_data = m_new->m_ext.ext_buf;
525 }
526
527 m_adj(m_new, sizeof(u_int64_t));
528
529 bus_dmamap_sync(sc->sc_dmat, sc->mtd_cdata.mtd_rx_chain[i].sd_map, 0,
530 sc->mtd_cdata.mtd_rx_chain[i].sd_map->dm_mapsize,
531 BUS_DMASYNC_PREREAD);
532
533 sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf = m_new;
534 c->rd_buf = htole32(
535 sc->mtd_cdata.mtd_rx_chain[i].sd_map->dm_segs[0].ds_addr +
536 sizeof(u_int64_t));
537 c->rd_rcw = htole32(ETHER_MAX_DIX_LEN);
538 c->rd_rsr = htole32(RSR_OWN);
539
540 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
541 offsetof(struct mtd_list_data, mtd_rx_list[i]),
542 sizeof(struct mtd_rx_desc),
543 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
544
545 return (0);
546 }
547
548
549 static void
550 mtd_reset(struct mtd_softc *sc)
551 {
552 int i;
553
554
555 CSR_WRITE_4(MTD_BCR, BCR_SWR);
556
557
558
559
560 for (i = 0; i < MTD_TIMEOUT; ++i) {
561 DELAY(10);
562 if (!(CSR_READ_4(MTD_BCR) & BCR_SWR)) {
563
564
565
566
567 DELAY(1000);
568 return;
569 }
570 }
571
572
573 printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
574 }
575
576
577 static int
578 mtd_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
579 {
580 struct mtd_softc *sc = ifp->if_softc;
581 struct ifreq *ifr = (struct ifreq *)data;
582 struct ifaddr *ifa = (struct ifaddr *)data;
583 int s, error;
584
585 s = splnet();
586 if ((error = ether_ioctl(ifp, &sc->sc_arpcom, command, data)) > 0) {
587 splx(s);
588 return (error);
589 }
590
591 switch (command) {
592 case SIOCSIFADDR:
593 ifp->if_flags |= IFF_UP;
594 mtd_init(ifp);
595 switch (ifa->ifa_addr->sa_family) {
596 #ifdef INET
597 case AF_INET:
598 arp_ifinit(&sc->sc_arpcom, ifa);
599 break;
600 #endif
601 }
602 break;
603 case SIOCSIFMTU:
604 if (ifr->ifr_mtu >= ETHERMIN && ifr->ifr_mtu <= ETHERMTU)
605 ifp->if_mtu = ifr->ifr_mtu;
606 else
607 error = EINVAL;
608 break;
609
610 case SIOCSIFFLAGS:
611 if (ifp->if_flags & IFF_UP)
612 mtd_init(ifp);
613 else {
614 if (ifp->if_flags & IFF_RUNNING)
615 mtd_stop(ifp);
616 }
617 error = 0;
618 break;
619 case SIOCADDMULTI:
620 case SIOCDELMULTI:
621 error = (command == SIOCADDMULTI) ?
622 ether_addmulti(ifr, &sc->sc_arpcom) :
623 ether_delmulti(ifr, &sc->sc_arpcom);
624
625 if (error == ENETRESET) {
626
627
628
629
630 if (ifp->if_flags & IFF_RUNNING)
631 mtd_setmulti(sc);
632 error = 0;
633 }
634 break;
635 case SIOCGIFMEDIA:
636 case SIOCSIFMEDIA:
637 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
638 break;
639 default:
640 error = EINVAL;
641 break;
642 }
643
644 splx(s);
645 return (error);
646 }
647
648
649 static void
650 mtd_init(struct ifnet *ifp)
651 {
652 struct mtd_softc *sc = ifp->if_softc;
653 int s;
654
655 s = splnet();
656
657
658
659
660 mtd_stop(ifp);
661
662
663
664
665 CSR_WRITE_4(MTD_BCR, BCR_PBL8);
666 CSR_WRITE_4(MTD_TCRRCR, TCR_TFTSF | RCR_RBLEN | RCR_RPBL512);
667 if (sc->sc_devid == PCI_PRODUCT_MYSON_MTD891) {
668 CSR_SETBIT(MTD_BCR, BCR_PROG);
669 CSR_SETBIT(MTD_TCRRCR, TCR_ENHANCED);
670 }
671
672 if (ifp->if_flags & IFF_PROMISC)
673 CSR_SETBIT(MTD_TCRRCR, RCR_PROM);
674 else
675 CSR_CLRBIT(MTD_TCRRCR, RCR_PROM);
676
677 if (ifp->if_flags & IFF_BROADCAST)
678 CSR_SETBIT(MTD_TCRRCR, RCR_AB);
679 else
680 CSR_CLRBIT(MTD_TCRRCR, RCR_AB);
681
682 mtd_setmulti(sc);
683
684 if (mtd_list_rx_init(sc)) {
685 printf("%s: can't allocate memeory for rx buffers\n",
686 sc->sc_dev.dv_xname);
687 splx(s);
688 return;
689 }
690 mtd_list_tx_init(sc);
691
692 CSR_WRITE_4(MTD_RXLBA, sc->sc_listmap->dm_segs[0].ds_addr +
693 offsetof(struct mtd_list_data, mtd_rx_list[0]));
694 CSR_WRITE_4(MTD_TXLBA, sc->sc_listmap->dm_segs[0].ds_addr +
695 offsetof(struct mtd_list_data, mtd_tx_list[0]));
696
697
698
699
700 CSR_WRITE_4(MTD_IMR, IMR_INTRS);
701 CSR_WRITE_4(MTD_ISR, 0xffffffff);
702
703
704 CSR_SETBIT(MTD_TCRRCR, TCR_TE | RCR_RE);
705 CSR_WRITE_4(MTD_RXPDR, 0xffffffff);
706
707 ifp->if_flags |= IFF_RUNNING;
708 ifp->if_flags &= ~IFF_OACTIVE;
709 splx(s);
710 }
711
712
713
714
715
716
717
718
719 static void
720 mtd_start(struct ifnet *ifp)
721 {
722 struct mtd_softc *sc = ifp->if_softc;
723 struct mbuf *m_head = NULL;
724 int idx;
725
726 if (sc->mtd_cdata.mtd_tx_cnt) {
727 ifp->if_flags |= IFF_OACTIVE;
728 return;
729 }
730
731 idx = sc->mtd_cdata.mtd_tx_prod;
732 while (sc->mtd_cdata.mtd_tx_chain[idx].sd_mbuf == NULL) {
733 IFQ_DEQUEUE(&ifp->if_snd, m_head);
734 if (m_head == NULL)
735 break;
736
737 if (mtd_encap(sc, m_head, &idx)) {
738 ifp->if_flags |= IFF_OACTIVE;
739 break;
740 }
741
742
743
744
745
746 #if NBPFILTER > 0
747 if (ifp->if_bpf != NULL)
748 bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
749 #endif
750 }
751
752 if (idx == sc->mtd_cdata.mtd_tx_prod)
753 return;
754
755
756 sc->mtd_cdata.mtd_tx_prod = idx;
757 CSR_WRITE_4(MTD_TXPDR, 0xffffffff);
758
759
760
761
762 ifp->if_timer = 5;
763 }
764
765
766 static void
767 mtd_stop(struct ifnet *ifp)
768 {
769 struct mtd_softc *sc = ifp->if_softc;
770 int i;
771
772 ifp->if_timer = 0;
773 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
774
775 CSR_CLRBIT(MTD_TCRRCR, (RCR_RE | TCR_TE));
776 CSR_WRITE_4(MTD_IMR, 0);
777 CSR_WRITE_4(MTD_TXLBA, 0);
778 CSR_WRITE_4(MTD_RXLBA, 0);
779
780
781
782
783 for (i = 0; i < MTD_RX_LIST_CNT; i++) {
784 if (sc->mtd_cdata.mtd_rx_chain[i].sd_map->dm_nsegs != 0) {
785 bus_dmamap_t map = sc->mtd_cdata.mtd_rx_chain[i].sd_map;
786
787 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
788 BUS_DMASYNC_POSTREAD);
789 bus_dmamap_unload(sc->sc_dmat, map);
790 }
791 if (sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf != NULL) {
792 m_freem(sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf);
793 sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf = NULL;
794 }
795 }
796 bzero((char *)&sc->mtd_ldata->mtd_rx_list,
797 sizeof(sc->mtd_ldata->mtd_rx_list));
798
799
800
801
802 for (i = 0; i < MTD_TX_LIST_CNT; i++) {
803 if (sc->mtd_cdata.mtd_tx_chain[i].sd_map->dm_nsegs != 0) {
804 bus_dmamap_t map = sc->mtd_cdata.mtd_tx_chain[i].sd_map;
805
806 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
807 BUS_DMASYNC_POSTWRITE);
808 bus_dmamap_unload(sc->sc_dmat, map);
809 }
810 if (sc->mtd_cdata.mtd_tx_chain[i].sd_mbuf != NULL) {
811 m_freem(sc->mtd_cdata.mtd_tx_chain[i].sd_mbuf);
812 sc->mtd_cdata.mtd_tx_chain[i].sd_mbuf = NULL;
813 }
814 }
815
816 bzero((char *)&sc->mtd_ldata->mtd_tx_list,
817 sizeof(sc->mtd_ldata->mtd_tx_list));
818
819 }
820
821
822 static void
823 mtd_watchdog(struct ifnet *ifp)
824 {
825 struct mtd_softc *sc = ifp->if_softc;
826
827 ifp->if_oerrors++;
828 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
829
830 mtd_stop(ifp);
831 mtd_reset(sc);
832 mtd_init(ifp);
833
834 if (!IFQ_IS_EMPTY(&ifp->if_snd))
835 mtd_start(ifp);
836 }
837
838
839 int
840 mtd_intr(void *xsc)
841 {
842 struct mtd_softc *sc = xsc;
843 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
844 u_int32_t status;
845 int claimed = 0;
846
847
848 if (!(ifp->if_flags & IFF_RUNNING)) {
849 if (CSR_READ_4(MTD_ISR) & ISR_INTRS)
850 mtd_stop(ifp);
851 return (claimed);
852 }
853
854
855 CSR_WRITE_4(MTD_IMR, 0);
856
857 while((status = CSR_READ_4(MTD_ISR)) & ISR_INTRS) {
858 claimed = 1;
859
860 CSR_WRITE_4(MTD_ISR, status);
861
862
863 if (status & ISR_RI) {
864 int curpkts = ifp->if_ipackets;
865
866 mtd_rxeof(sc);
867 if (curpkts == ifp->if_ipackets)
868 while(mtd_rx_resync(sc))
869 mtd_rxeof(sc);
870 }
871
872
873 if (status & (ISR_RXERI | ISR_RBU))
874 ifp->if_ierrors++;
875
876
877 if (status & (ISR_TI | ISR_ETI | ISR_TBU))
878 mtd_txeof(sc);
879
880
881 if (status & ISR_FBE) {
882 mtd_reset(sc);
883 mtd_start(ifp);
884 }
885 }
886
887
888 CSR_WRITE_4(MTD_IMR, IMR_INTRS);
889
890 if (!IFQ_IS_EMPTY(&ifp->if_snd))
891 mtd_start(ifp);
892
893 return (claimed);
894 }
895
896
897
898
899
900
901 static void
902 mtd_rxeof(struct mtd_softc *sc)
903 {
904 struct mbuf *m;
905 struct ifnet *ifp;
906 struct mtd_rx_desc *cur_rx;
907 int i, total_len = 0;
908 u_int32_t rxstat;
909
910 ifp = &sc->sc_arpcom.ac_if;
911 i = sc->mtd_cdata.mtd_rx_prod;
912
913 while(!(sc->mtd_ldata->mtd_rx_list[i].rd_rsr & htole32(RSR_OWN))) {
914 struct mbuf *m0 = NULL;
915
916 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
917 offsetof(struct mtd_list_data, mtd_rx_list[i]),
918 sizeof(struct mtd_rx_desc),
919 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
920
921 cur_rx = &sc->mtd_ldata->mtd_rx_list[i];
922 rxstat = letoh32(cur_rx->rd_rsr);
923 m = sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf;
924 total_len = RSR_FLNG_GET(rxstat);
925
926 sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf = NULL;
927
928
929
930
931
932
933
934 if (rxstat & RSR_RXER) {
935 ifp->if_ierrors++;
936 mtd_newbuf(sc, i, m);
937 if (rxstat & RSR_CRC) {
938 i = (i + 1) % MTD_RX_LIST_CNT;
939 continue;
940 } else {
941 mtd_init(ifp);
942 return;
943 }
944 }
945
946
947 total_len -= ETHER_CRC_LEN;
948
949 bus_dmamap_sync(sc->sc_dmat, sc->mtd_cdata.mtd_rx_chain[i].sd_map,
950 0, sc->mtd_cdata.mtd_rx_chain[i].sd_map->dm_mapsize,
951 BUS_DMASYNC_POSTREAD);
952
953 m0 = m_devget(mtod(m, char *) - ETHER_ALIGN, total_len + ETHER_ALIGN,
954 0, ifp, NULL);
955 mtd_newbuf(sc, i, m);
956 i = (i + 1) % MTD_RX_LIST_CNT;
957 if (m0 == NULL) {
958 ifp->if_ierrors++;
959 continue;
960 }
961 m_adj(m0, ETHER_ALIGN);
962 m = m0;
963
964 ifp->if_ipackets++;
965
966 #if NBPFILTER > 0
967 if (ifp->if_bpf)
968 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
969 #endif
970 ether_input_mbuf(ifp, m);
971 }
972
973 sc->mtd_cdata.mtd_rx_prod = i;
974 }
975
976
977
978
979
980
981
982
983
984
985
986
987
988 static int
989 mtd_rx_resync(sc)
990 struct mtd_softc *sc;
991 {
992 int i, pos;
993 struct mtd_rx_desc *cur_rx;
994
995 pos = sc->mtd_cdata.mtd_rx_prod;
996
997 for (i = 0; i < MTD_RX_LIST_CNT; i++) {
998 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
999 offsetof(struct mtd_list_data, mtd_rx_list[pos]),
1000 sizeof(struct mtd_rx_desc),
1001 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1002
1003 cur_rx = &sc->mtd_ldata->mtd_rx_list[pos];
1004 if (!(cur_rx->rd_rsr & htole32(RSR_OWN)))
1005 break;
1006 pos = (pos + 1) % MTD_RX_LIST_CNT;
1007 }
1008
1009
1010 if (i == MTD_RX_LIST_CNT)
1011 return (0);
1012
1013
1014 sc->mtd_cdata.mtd_rx_prod = pos;
1015
1016 return (EAGAIN);
1017 }
1018
1019
1020
1021
1022
1023
1024 static void
1025 mtd_txeof(struct mtd_softc *sc)
1026 {
1027 struct mtd_tx_desc *cur_tx = NULL;
1028 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1029 int idx;
1030
1031
1032 ifp->if_timer = 0;
1033
1034
1035
1036
1037
1038 idx = sc->mtd_cdata.mtd_tx_cons;
1039 while(idx != sc->mtd_cdata.mtd_tx_prod) {
1040 u_int32_t txstat;
1041
1042 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1043 offsetof(struct mtd_list_data, mtd_tx_list[idx]),
1044 sizeof(struct mtd_tx_desc),
1045 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1046
1047 cur_tx = &sc->mtd_ldata->mtd_tx_list[idx];
1048 txstat = letoh32(cur_tx->td_tsw);
1049
1050 if (txstat & TSW_OWN || txstat == TSW_UNSENT)
1051 break;
1052
1053 if (!(cur_tx->td_tcw & htole32(TCW_LD))) {
1054 sc->mtd_cdata.mtd_tx_cnt--;
1055 idx = (idx + 1) % MTD_TX_LIST_CNT;
1056 continue;
1057 }
1058
1059 if (CSR_READ_4(MTD_TCRRCR) & TCR_ENHANCED)
1060 ifp->if_collisions += TSR_NCR_GET(CSR_READ_4(MTD_TSR));
1061 else {
1062 if (txstat & TSW_TXERR) {
1063 ifp->if_oerrors++;
1064 if (txstat & TSW_EC)
1065 ifp->if_collisions++;
1066 if (txstat & TSW_LC)
1067 ifp->if_collisions++;
1068 }
1069 ifp->if_collisions += TSW_NCR_GET(txstat);
1070 }
1071
1072 ifp->if_opackets++;
1073 if (sc->mtd_cdata.mtd_tx_chain[idx].sd_map->dm_nsegs != 0) {
1074 bus_dmamap_t map =
1075 sc->mtd_cdata.mtd_tx_chain[idx].sd_map;
1076 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1077 BUS_DMASYNC_POSTWRITE);
1078 bus_dmamap_unload(sc->sc_dmat, map);
1079 }
1080 if (sc->mtd_cdata.mtd_tx_chain[idx].sd_mbuf != NULL) {
1081 m_freem(sc->mtd_cdata.mtd_tx_chain[idx].sd_mbuf);
1082 sc->mtd_cdata.mtd_tx_chain[idx].sd_mbuf = NULL;
1083 }
1084 sc->mtd_cdata.mtd_tx_cnt--;
1085 idx = (idx + 1) % MTD_TX_LIST_CNT;
1086 }
1087
1088 if (cur_tx != NULL) {
1089 ifp->if_flags &= ~IFF_OACTIVE;
1090 sc->mtd_cdata.mtd_tx_cons = idx;
1091 } else
1092 if (sc->mtd_ldata->mtd_tx_list[idx].td_tsw ==
1093 htole32(TSW_UNSENT)) {
1094 sc->mtd_ldata->mtd_tx_list[idx].td_tsw =
1095 htole32(TSW_OWN);
1096 ifp->if_timer = 5;
1097 CSR_WRITE_4(MTD_TXPDR, 0xffffffff);
1098 }
1099 }
1100
1101 struct cfdriver mtd_cd = {
1102 0, "mtd", DV_IFNET
1103 };