This source file includes following definitions.
- qematch
- qeattach
- qe_get
- qe_put
- qe_read
- qestart
- qestop
- qereset
- qewatchdog
- qeintr
- qe_tint
- qe_rint
- qe_eint
- qeioctl
- qeinit
- qe_mcreset
- qe_ifmedia_sts
- qe_ifmedia_upd
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74 #define QEDEBUG
75
76 #include "bpfilter.h"
77
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/kernel.h>
81 #include <sys/errno.h>
82 #include <sys/ioctl.h>
83 #include <sys/mbuf.h>
84 #include <sys/socket.h>
85 #include <sys/syslog.h>
86 #include <sys/device.h>
87 #include <sys/malloc.h>
88
89 #include <net/if.h>
90 #include <net/if_dl.h>
91 #include <net/if_types.h>
92 #include <net/netisr.h>
93 #include <net/if_media.h>
94
95 #ifdef INET
96 #include <netinet/in.h>
97 #include <netinet/in_systm.h>
98 #include <netinet/in_var.h>
99 #include <netinet/ip.h>
100 #include <netinet/if_ether.h>
101 #endif
102
103 #if NBPFILTER > 0
104 #include <net/bpf.h>
105 #endif
106
107 #include <machine/bus.h>
108 #include <machine/intr.h>
109 #include <machine/autoconf.h>
110
111 #include <dev/sbus/sbusvar.h>
112 #include <dev/sbus/qecreg.h>
113 #include <dev/sbus/qecvar.h>
114 #include <dev/sbus/qereg.h>
115
116 struct qe_softc {
117 struct device sc_dev;
118 bus_space_tag_t sc_bustag;
119 bus_dma_tag_t sc_dmatag;
120 bus_dmamap_t sc_dmamap;
121 struct arpcom sc_arpcom;
122 struct ifmedia sc_ifmedia;
123
124 struct qec_softc *sc_qec;
125
126 bus_space_handle_t sc_qr;
127 bus_space_handle_t sc_mr;
128 bus_space_handle_t sc_cr;
129
130 int sc_channel;
131 u_int sc_rev;
132
133 int sc_burst;
134
135 struct qec_ring sc_rb;
136
137 #ifdef QEDEBUG
138 int sc_debug;
139 #endif
140 };
141
142 int qematch(struct device *, void *, void *);
143 void qeattach(struct device *, struct device *, void *);
144
145 void qeinit(struct qe_softc *);
146 void qestart(struct ifnet *);
147 void qestop(struct qe_softc *);
148 void qewatchdog(struct ifnet *);
149 int qeioctl(struct ifnet *, u_long, caddr_t);
150 void qereset(struct qe_softc *);
151
152 int qeintr(void *);
153 int qe_eint(struct qe_softc *, u_int32_t);
154 int qe_rint(struct qe_softc *);
155 int qe_tint(struct qe_softc *);
156 void qe_mcreset(struct qe_softc *);
157
158 int qe_put(struct qe_softc *, int, struct mbuf *);
159 void qe_read(struct qe_softc *, int, int);
160 struct mbuf *qe_get(struct qe_softc *, int, int);
161
162
163 void qe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
164 int qe_ifmedia_upd(struct ifnet *);
165
166 struct cfattach qe_ca = {
167 sizeof(struct qe_softc), qematch, qeattach
168 };
169
170 struct cfdriver qe_cd = {
171 NULL, "qe", DV_IFNET
172 };
173
174 int
175 qematch(parent, vcf, aux)
176 struct device *parent;
177 void *vcf;
178 void *aux;
179 {
180 struct cfdata *cf = vcf;
181 struct sbus_attach_args *sa = aux;
182
183 return (strcmp(cf->cf_driver->cd_name, sa->sa_name) == 0);
184 }
185
186 void
187 qeattach(parent, self, aux)
188 struct device *parent, *self;
189 void *aux;
190 {
191 struct sbus_attach_args *sa = aux;
192 struct qec_softc *qec = (struct qec_softc *)parent;
193 struct qe_softc *sc = (struct qe_softc *)self;
194 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
195 int node = sa->sa_node;
196 bus_dma_tag_t dmatag = sa->sa_dmatag;
197 bus_dma_segment_t seg;
198 bus_size_t size;
199 int rseg, error;
200 extern void myetheraddr(u_char *);
201
202
203 sc->sc_bustag = sa->sa_bustag;
204 sc->sc_dmatag = sa->sa_dmatag;
205
206 if (sa->sa_nreg < 2) {
207 printf("%s: only %d register sets\n",
208 self->dv_xname, sa->sa_nreg);
209 return;
210 }
211
212 if (sbus_bus_map(sa->sa_bustag, sa->sa_reg[0].sbr_slot,
213 (bus_addr_t)sa->sa_reg[0].sbr_offset,
214 (bus_size_t)sa->sa_reg[0].sbr_size, 0, 0, &sc->sc_cr) != 0) {
215 printf("%s: cannot map registers\n", self->dv_xname);
216 return;
217 }
218
219 if (sbus_bus_map(sa->sa_bustag, sa->sa_reg[1].sbr_slot,
220 (bus_addr_t)sa->sa_reg[1].sbr_offset,
221 (bus_size_t)sa->sa_reg[1].sbr_size, 0, 0, &sc->sc_mr) != 0) {
222 printf("%s: cannot map registers\n", self->dv_xname);
223 return;
224 }
225
226 sc->sc_rev = getpropint(node, "mace-version", -1);
227 printf(" rev %x", sc->sc_rev);
228
229 sc->sc_qec = qec;
230 sc->sc_qr = qec->sc_regs;
231
232 sc->sc_channel = getpropint(node, "channel#", -1);
233 sc->sc_burst = qec->sc_burst;
234
235 qestop(sc);
236
237
238 if (bus_intr_establish(sa->sa_bustag, 0, IPL_NET, 0, qeintr, sc,
239 self->dv_xname) == NULL) {
240 printf(": no interrupt established\n");
241 return;
242 }
243
244 myetheraddr(sc->sc_arpcom.ac_enaddr);
245
246
247
248
249
250
251 sc->sc_rb.rb_ntbuf = QEC_XD_RING_MAXSIZE;
252 sc->sc_rb.rb_nrbuf = QEC_XD_RING_MAXSIZE;
253
254 size =
255 QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) +
256 QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) +
257 sc->sc_rb.rb_ntbuf * QE_PKT_BUF_SZ +
258 sc->sc_rb.rb_nrbuf * QE_PKT_BUF_SZ;
259
260
261 if ((error = bus_dmamap_create(dmatag, size, 1, size, 0,
262 BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
263 printf("%s: DMA map create error %d\n", self->dv_xname, error);
264 return;
265 }
266
267
268 if ((error = bus_dmamem_alloc(dmatag, size, 0, 0,
269 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
270 printf("%s: DMA buffer alloc error %d\n",
271 self->dv_xname, error);
272 return;
273 }
274
275
276 if ((error = bus_dmamem_map(dmatag, &seg, rseg, size,
277 &sc->sc_rb.rb_membase,
278 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
279 printf("%s: DMA buffer map error %d\n",
280 self->dv_xname, error);
281 bus_dmamem_free(dmatag, &seg, rseg);
282 return;
283 }
284
285
286 if ((error = bus_dmamap_load(dmatag, sc->sc_dmamap,
287 sc->sc_rb.rb_membase, size, NULL, BUS_DMA_NOWAIT)) != 0) {
288 printf("%s: DMA buffer map load error %d\n",
289 self->dv_xname, error);
290 bus_dmamem_unmap(dmatag, sc->sc_rb.rb_membase, size);
291 bus_dmamem_free(dmatag, &seg, rseg);
292 return;
293 }
294 sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr;
295
296
297 ifmedia_init(&sc->sc_ifmedia, 0, qe_ifmedia_upd, qe_ifmedia_sts);
298 ifmedia_add(&sc->sc_ifmedia,
299 IFM_MAKEWORD(IFM_ETHER,IFM_10_T,0,0), 0, NULL);
300 ifmedia_add(&sc->sc_ifmedia,
301 IFM_MAKEWORD(IFM_ETHER,IFM_10_5,0,0), 0, NULL);
302 ifmedia_add(&sc->sc_ifmedia,
303 IFM_MAKEWORD(IFM_ETHER,IFM_AUTO,0,0), 0, NULL);
304 ifmedia_set(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO);
305
306 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
307 ifp->if_softc = sc;
308 ifp->if_start = qestart;
309 ifp->if_ioctl = qeioctl;
310 ifp->if_watchdog = qewatchdog;
311 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS |
312 IFF_MULTICAST;
313 IFQ_SET_READY(&ifp->if_snd);
314
315
316 if_attach(ifp);
317 ether_ifattach(ifp);
318
319 printf(" address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
320 }
321
322
323
324
325
326
327
328 struct mbuf *
329 qe_get(sc, idx, totlen)
330 struct qe_softc *sc;
331 int idx, totlen;
332 {
333 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
334 struct mbuf *m;
335 struct mbuf *top, **mp;
336 int len, pad, boff = 0;
337 caddr_t bp;
338
339 bp = sc->sc_rb.rb_rxbuf + (idx % sc->sc_rb.rb_nrbuf) * QE_PKT_BUF_SZ;
340
341 MGETHDR(m, M_DONTWAIT, MT_DATA);
342 if (m == NULL)
343 return (NULL);
344 m->m_pkthdr.rcvif = ifp;
345 m->m_pkthdr.len = totlen;
346 pad = ALIGN(sizeof(struct ether_header)) - sizeof(struct ether_header);
347 m->m_data += pad;
348 len = MHLEN - pad;
349 top = NULL;
350 mp = ⊤
351
352 while (totlen > 0) {
353 if (top) {
354 MGET(m, M_DONTWAIT, MT_DATA);
355 if (m == NULL) {
356 m_freem(top);
357 return (NULL);
358 }
359 len = MLEN;
360 }
361 if (top && totlen >= MINCLSIZE) {
362 MCLGET(m, M_DONTWAIT);
363 if (m->m_flags & M_EXT)
364 len = MCLBYTES;
365 }
366 m->m_len = len = min(totlen, len);
367 bcopy(bp + boff, mtod(m, caddr_t), len);
368 boff += len;
369 totlen -= len;
370 *mp = m;
371 mp = &m->m_next;
372 }
373
374 return (top);
375 }
376
377
378
379
380
381 __inline__ int
382 qe_put(sc, idx, m)
383 struct qe_softc *sc;
384 int idx;
385 struct mbuf *m;
386 {
387 struct mbuf *n;
388 int len, tlen = 0, boff = 0;
389 caddr_t bp;
390
391 bp = sc->sc_rb.rb_txbuf + (idx % sc->sc_rb.rb_ntbuf) * QE_PKT_BUF_SZ;
392
393 for (; m; m = n) {
394 len = m->m_len;
395 if (len == 0) {
396 MFREE(m, n);
397 continue;
398 }
399 bcopy(mtod(m, caddr_t), bp+boff, len);
400 boff += len;
401 tlen += len;
402 MFREE(m, n);
403 }
404 return (tlen);
405 }
406
407
408
409
410 __inline__ void
411 qe_read(sc, idx, len)
412 struct qe_softc *sc;
413 int idx, len;
414 {
415 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
416 struct mbuf *m;
417
418 if (len <= sizeof(struct ether_header) ||
419 len > ETHERMTU + sizeof(struct ether_header)) {
420
421 printf("%s: invalid packet size %d; dropping\n",
422 ifp->if_xname, len);
423
424 ifp->if_ierrors++;
425 return;
426 }
427
428
429
430
431 m = qe_get(sc, idx, len);
432 if (m == NULL) {
433 ifp->if_ierrors++;
434 return;
435 }
436 ifp->if_ipackets++;
437
438 #if NBPFILTER > 0
439
440
441
442
443 if (ifp->if_bpf)
444 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
445 #endif
446
447 ether_input_mbuf(ifp, m);
448 }
449
450
451
452
453
454
455
456
457
458
459 void
460 qestart(ifp)
461 struct ifnet *ifp;
462 {
463 struct qe_softc *sc = (struct qe_softc *)ifp->if_softc;
464 struct qec_xd *txd = sc->sc_rb.rb_txd;
465 struct mbuf *m;
466 unsigned int bix, len;
467 unsigned int ntbuf = sc->sc_rb.rb_ntbuf;
468
469 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
470 return;
471
472 bix = sc->sc_rb.rb_tdhead;
473
474 for (;;) {
475 IFQ_POLL(&ifp->if_snd, m);
476 if (m == NULL)
477 break;
478
479 IFQ_DEQUEUE(&ifp->if_snd, m);
480
481 #if NBPFILTER > 0
482
483
484
485
486 if (ifp->if_bpf)
487 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
488 #endif
489
490
491
492
493 len = qe_put(sc, bix, m);
494
495
496
497
498 txd[bix].xd_flags = QEC_XD_OWN | QEC_XD_SOP | QEC_XD_EOP |
499 (len & QEC_XD_LENGTH);
500 bus_space_write_4(sc->sc_bustag, sc->sc_cr, QE_CRI_CTRL,
501 QE_CR_CTRL_TWAKEUP);
502
503 if (++bix == QEC_XD_RING_MAXSIZE)
504 bix = 0;
505
506 if (++sc->sc_rb.rb_td_nbusy == ntbuf) {
507 ifp->if_flags |= IFF_OACTIVE;
508 break;
509 }
510 }
511
512 sc->sc_rb.rb_tdhead = bix;
513 }
514
515 void
516 qestop(sc)
517 struct qe_softc *sc;
518 {
519 bus_space_tag_t t = sc->sc_bustag;
520 bus_space_handle_t mr = sc->sc_mr;
521 bus_space_handle_t cr = sc->sc_cr;
522 int n;
523
524
525 bus_space_write_1(t, mr, QE_MRI_BIUCC, QE_MR_BIUCC_SWRST);
526 for (n = 200; n > 0; n--) {
527 if ((bus_space_read_1(t, mr, QE_MRI_BIUCC) &
528 QE_MR_BIUCC_SWRST) == 0)
529 break;
530 DELAY(20);
531 }
532
533
534 bus_space_write_4(t, cr, QE_CRI_CTRL, QE_CR_CTRL_RESET);
535 for (n = 200; n > 0; n--) {
536 if ((bus_space_read_4(t, cr, QE_CRI_CTRL) &
537 QE_CR_CTRL_RESET) == 0)
538 break;
539 DELAY(20);
540 }
541 }
542
543
544
545
546 void
547 qereset(sc)
548 struct qe_softc *sc;
549 {
550 int s;
551
552 s = splnet();
553 qestop(sc);
554 qeinit(sc);
555 splx(s);
556 }
557
558 void
559 qewatchdog(ifp)
560 struct ifnet *ifp;
561 {
562 struct qe_softc *sc = ifp->if_softc;
563
564 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
565 ifp->if_oerrors++;
566
567 qereset(sc);
568 }
569
570
571
572
573 int
574 qeintr(arg)
575 void *arg;
576 {
577 struct qe_softc *sc = (struct qe_softc *)arg;
578 bus_space_tag_t t = sc->sc_bustag;
579 u_int32_t qecstat, qestat;
580 int r = 0;
581
582
583 qecstat = bus_space_read_4(t, sc->sc_qr, QEC_QRI_STAT);
584 #ifdef QEDEBUG
585 if (sc->sc_debug) {
586 printf("qe%d: intr: qecstat=%x\n", sc->sc_channel, qecstat);
587 }
588 #endif
589
590
591 qecstat = qecstat >> (4 * sc->sc_channel);
592 if ((qecstat & 0xf) == 0)
593 return (r);
594
595 qestat = bus_space_read_4(t, sc->sc_cr, QE_CRI_STAT);
596
597 #ifdef QEDEBUG
598 if (sc->sc_debug) {
599 int i;
600 bus_space_tag_t t = sc->sc_bustag;
601 bus_space_handle_t mr = sc->sc_mr;
602
603 printf("qe%d: intr: qestat=%b\n", sc->sc_channel,
604 qestat, QE_CR_STAT_BITS);
605
606 printf("MACE registers:\n");
607 for (i = 0 ; i < 32; i++) {
608 printf(" m[%d]=%x,", i, bus_space_read_1(t, mr, i));
609 if (((i+1) & 7) == 0)
610 printf("\n");
611 }
612 }
613 #endif
614
615 if (qestat & QE_CR_STAT_ALLERRORS) {
616 #ifdef QEDEBUG
617 if (sc->sc_debug)
618 printf("qe%d: eint: qestat=%b\n", sc->sc_channel,
619 qestat, QE_CR_STAT_BITS);
620 #endif
621 r |= qe_eint(sc, qestat);
622 if (r == -1)
623 return (1);
624 }
625
626 if (qestat & QE_CR_STAT_TXIRQ)
627 r |= qe_tint(sc);
628
629 if (qestat & QE_CR_STAT_RXIRQ)
630 r |= qe_rint(sc);
631
632 return (1);
633 }
634
635
636
637
638 int
639 qe_tint(sc)
640 struct qe_softc *sc;
641 {
642 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
643 unsigned int bix, txflags;
644
645 bix = sc->sc_rb.rb_tdtail;
646
647 for (;;) {
648 if (sc->sc_rb.rb_td_nbusy <= 0)
649 break;
650
651 txflags = sc->sc_rb.rb_txd[bix].xd_flags;
652
653 if (txflags & QEC_XD_OWN)
654 break;
655
656 ifp->if_flags &= ~IFF_OACTIVE;
657 ifp->if_opackets++;
658
659 if (++bix == QEC_XD_RING_MAXSIZE)
660 bix = 0;
661
662 --sc->sc_rb.rb_td_nbusy;
663 }
664
665 if (sc->sc_rb.rb_td_nbusy == 0)
666 ifp->if_timer = 0;
667
668 if (sc->sc_rb.rb_tdtail != bix) {
669 sc->sc_rb.rb_tdtail = bix;
670 if (ifp->if_flags & IFF_OACTIVE) {
671 ifp->if_flags &= ~IFF_OACTIVE;
672 qestart(ifp);
673 }
674 }
675
676 return (1);
677 }
678
679
680
681
682 int
683 qe_rint(sc)
684 struct qe_softc *sc;
685 {
686 struct qec_xd *xd = sc->sc_rb.rb_rxd;
687 unsigned int bix, len;
688 unsigned int nrbuf = sc->sc_rb.rb_nrbuf;
689 #ifdef QEDEBUG
690 int npackets = 0;
691 #endif
692
693 bix = sc->sc_rb.rb_rdtail;
694
695
696
697
698 for (;;) {
699 len = xd[bix].xd_flags;
700 if (len & QEC_XD_OWN)
701 break;
702
703 #ifdef QEDEBUG
704 npackets++;
705 #endif
706
707 len &= QEC_XD_LENGTH;
708 len -= 4;
709 qe_read(sc, bix, len);
710
711
712 xd[(bix+nrbuf) % QEC_XD_RING_MAXSIZE].xd_flags =
713 QEC_XD_OWN | (QE_PKT_BUF_SZ & QEC_XD_LENGTH);
714
715 if (++bix == QEC_XD_RING_MAXSIZE)
716 bix = 0;
717 }
718 #ifdef QEDEBUG
719 if (npackets == 0 && sc->sc_debug)
720 printf("%s: rint: no packets; rb index %d; status 0x%x\n",
721 sc->sc_dev.dv_xname, bix, len);
722 #endif
723
724 sc->sc_rb.rb_rdtail = bix;
725
726 return (1);
727 }
728
729
730
731
732 int
733 qe_eint(sc, why)
734 struct qe_softc *sc;
735 u_int32_t why;
736 {
737 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
738 int r = 0, rst = 0;
739
740 if (why & QE_CR_STAT_EDEFER) {
741 printf("%s: excessive tx defers.\n", sc->sc_dev.dv_xname);
742 r |= 1;
743 ifp->if_oerrors++;
744 }
745
746 if (why & QE_CR_STAT_CLOSS) {
747 ifp->if_oerrors++;
748 r |= 1;
749 }
750
751 if (why & QE_CR_STAT_ERETRIES) {
752 printf("%s: excessive tx retries\n", sc->sc_dev.dv_xname);
753 ifp->if_oerrors++;
754 r |= 1;
755 rst = 1;
756 }
757
758
759 if (why & QE_CR_STAT_LCOLL) {
760 printf("%s: late tx transmission\n", sc->sc_dev.dv_xname);
761 ifp->if_oerrors++;
762 r |= 1;
763 rst = 1;
764 }
765
766 if (why & QE_CR_STAT_FUFLOW) {
767 printf("%s: tx fifo underflow\n", sc->sc_dev.dv_xname);
768 ifp->if_oerrors++;
769 r |= 1;
770 rst = 1;
771 }
772
773 if (why & QE_CR_STAT_JERROR) {
774 printf("%s: jabber seen\n", sc->sc_dev.dv_xname);
775 r |= 1;
776 }
777
778 if (why & QE_CR_STAT_BERROR) {
779 printf("%s: babble seen\n", sc->sc_dev.dv_xname);
780 r |= 1;
781 }
782
783 if (why & QE_CR_STAT_TCCOFLOW) {
784 ifp->if_collisions += 256;
785 ifp->if_oerrors += 256;
786 r |= 1;
787 }
788
789 if (why & QE_CR_STAT_TXDERROR) {
790 printf("%s: tx descriptor is bad\n", sc->sc_dev.dv_xname);
791 rst = 1;
792 r |= 1;
793 }
794
795 if (why & QE_CR_STAT_TXLERR) {
796 printf("%s: tx late error\n", sc->sc_dev.dv_xname);
797 ifp->if_oerrors++;
798 rst = 1;
799 r |= 1;
800 }
801
802 if (why & QE_CR_STAT_TXPERR) {
803 printf("%s: tx dma parity error\n", sc->sc_dev.dv_xname);
804 ifp->if_oerrors++;
805 rst = 1;
806 r |= 1;
807 }
808
809 if (why & QE_CR_STAT_TXSERR) {
810 printf("%s: tx dma sbus error ack\n", sc->sc_dev.dv_xname);
811 ifp->if_oerrors++;
812 rst = 1;
813 r |= 1;
814 }
815
816 if (why & QE_CR_STAT_RCCOFLOW) {
817 ifp->if_collisions += 256;
818 ifp->if_ierrors += 256;
819 r |= 1;
820 }
821
822 if (why & QE_CR_STAT_RUOFLOW) {
823 ifp->if_ierrors += 256;
824 r |= 1;
825 }
826
827 if (why & QE_CR_STAT_MCOFLOW) {
828 ifp->if_ierrors += 256;
829 r |= 1;
830 }
831
832 if (why & QE_CR_STAT_RXFOFLOW) {
833 printf("%s: rx fifo overflow\n", sc->sc_dev.dv_xname);
834 ifp->if_ierrors++;
835 r |= 1;
836 }
837
838 if (why & QE_CR_STAT_RLCOLL) {
839 printf("%s: rx late collision\n", sc->sc_dev.dv_xname);
840 ifp->if_ierrors++;
841 ifp->if_collisions++;
842 r |= 1;
843 }
844
845 if (why & QE_CR_STAT_FCOFLOW) {
846 ifp->if_ierrors += 256;
847 r |= 1;
848 }
849
850 if (why & QE_CR_STAT_CECOFLOW) {
851 ifp->if_ierrors += 256;
852 r |= 1;
853 }
854
855 if (why & QE_CR_STAT_RXDROP) {
856 printf("%s: rx packet dropped\n", sc->sc_dev.dv_xname);
857 ifp->if_ierrors++;
858 r |= 1;
859 }
860
861 if (why & QE_CR_STAT_RXSMALL) {
862 printf("%s: rx buffer too small\n", sc->sc_dev.dv_xname);
863 ifp->if_ierrors++;
864 r |= 1;
865 rst = 1;
866 }
867
868 if (why & QE_CR_STAT_RXLERR) {
869 printf("%s: rx late error\n", sc->sc_dev.dv_xname);
870 ifp->if_ierrors++;
871 r |= 1;
872 rst = 1;
873 }
874
875 if (why & QE_CR_STAT_RXPERR) {
876 printf("%s: rx dma parity error\n", sc->sc_dev.dv_xname);
877 ifp->if_ierrors++;
878 r |= 1;
879 rst = 1;
880 }
881
882 if (why & QE_CR_STAT_RXSERR) {
883 printf("%s: rx dma sbus error ack\n", sc->sc_dev.dv_xname);
884 ifp->if_ierrors++;
885 r |= 1;
886 rst = 1;
887 }
888
889 if (r == 0)
890 printf("%s: unexpected interrupt error: %08x\n",
891 sc->sc_dev.dv_xname, why);
892
893 if (rst) {
894 printf("%s: resetting...\n", sc->sc_dev.dv_xname);
895 qereset(sc);
896 return (-1);
897 }
898
899 return (r);
900 }
901
902 int
903 qeioctl(ifp, cmd, data)
904 struct ifnet *ifp;
905 u_long cmd;
906 caddr_t data;
907 {
908 struct qe_softc *sc = ifp->if_softc;
909 struct ifaddr *ifa = (struct ifaddr *)data;
910 struct ifreq *ifr = (struct ifreq *)data;
911 int s, error = 0;
912
913 s = splnet();
914
915 if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) {
916 splx(s);
917 return (error);
918 }
919
920 switch (cmd) {
921 case SIOCSIFADDR:
922 ifp->if_flags |= IFF_UP;
923 switch (ifa->ifa_addr->sa_family) {
924 #ifdef INET
925 case AF_INET:
926 qeinit(sc);
927 arp_ifinit(&sc->sc_arpcom, ifa);
928 break;
929 #endif
930 default:
931 qeinit(sc);
932 break;
933 }
934 break;
935
936 case SIOCSIFFLAGS:
937 if ((ifp->if_flags & IFF_UP) == 0 &&
938 (ifp->if_flags & IFF_RUNNING) != 0) {
939
940
941
942
943 qestop(sc);
944 ifp->if_flags &= ~IFF_RUNNING;
945 } else if ((ifp->if_flags & IFF_UP) != 0 &&
946 (ifp->if_flags & IFF_RUNNING) == 0) {
947
948
949
950
951 qeinit(sc);
952 } else {
953
954
955
956
957 qestop(sc);
958 qeinit(sc);
959 }
960 #ifdef QEDEBUG
961 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
962 #endif
963 break;
964
965 case SIOCADDMULTI:
966 case SIOCDELMULTI:
967 error = (cmd == SIOCADDMULTI) ?
968 ether_addmulti(ifr, &sc->sc_arpcom):
969 ether_delmulti(ifr, &sc->sc_arpcom);
970
971 if (error == ENETRESET) {
972
973
974
975
976 if (ifp->if_flags & IFF_RUNNING)
977 qe_mcreset(sc);
978 error = 0;
979 }
980 break;
981
982 case SIOCGIFMEDIA:
983 case SIOCSIFMEDIA:
984 error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, cmd);
985 break;
986
987 default:
988 error = EINVAL;
989 break;
990 }
991
992 splx(s);
993 return (error);
994 }
995
996
997 void
998 qeinit(sc)
999 struct qe_softc *sc;
1000 {
1001 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1002 bus_space_tag_t t = sc->sc_bustag;
1003 bus_space_handle_t cr = sc->sc_cr;
1004 bus_space_handle_t mr = sc->sc_mr;
1005 struct qec_softc *qec = sc->sc_qec;
1006 u_int32_t qecaddr;
1007 u_int8_t *ea;
1008 int s;
1009
1010 s = splnet();
1011
1012 qestop(sc);
1013
1014
1015
1016
1017 qec_meminit(&sc->sc_rb, QE_PKT_BUF_SZ);
1018
1019
1020 bus_space_write_4(t, cr, QE_CRI_RXDS, (u_int32_t)sc->sc_rb.rb_rxddma);
1021 bus_space_write_4(t, cr, QE_CRI_TXDS, (u_int32_t)sc->sc_rb.rb_txddma);
1022
1023 bus_space_write_4(t, cr, QE_CRI_RIMASK, 0);
1024 bus_space_write_4(t, cr, QE_CRI_TIMASK, 0);
1025 bus_space_write_4(t, cr, QE_CRI_QMASK, 0);
1026 bus_space_write_4(t, cr, QE_CRI_MMASK, QE_CR_MMASK_RXCOLL);
1027 bus_space_write_4(t, cr, QE_CRI_CCNT, 0);
1028 bus_space_write_4(t, cr, QE_CRI_PIPG, 0);
1029
1030 qecaddr = sc->sc_channel * qec->sc_msize;
1031 bus_space_write_4(t, cr, QE_CRI_RXWBUF, qecaddr);
1032 bus_space_write_4(t, cr, QE_CRI_RXRBUF, qecaddr);
1033 bus_space_write_4(t, cr, QE_CRI_TXWBUF, qecaddr + qec->sc_rsize);
1034 bus_space_write_4(t, cr, QE_CRI_TXRBUF, qecaddr + qec->sc_rsize);
1035
1036
1037
1038
1039
1040
1041 bus_space_read_4(t, cr, QE_CRI_QMASK);
1042
1043
1044 bus_space_write_1(t, mr, QE_MRI_PHYCC, QE_MR_PHYCC_ASEL);
1045 bus_space_write_1(t, mr, QE_MRI_XMTFC, QE_MR_XMTFC_APADXMT);
1046 bus_space_write_1(t, mr, QE_MRI_RCVFC, 0);
1047
1048
1049
1050
1051
1052 bus_space_write_1(t, mr, QE_MRI_IMR,
1053 QE_MR_IMR_CERRM | QE_MR_IMR_RCVINTM);
1054
1055 bus_space_write_1(t, mr, QE_MRI_BIUCC,
1056 QE_MR_BIUCC_BSWAP | QE_MR_BIUCC_64TS);
1057
1058 bus_space_write_1(t, mr, QE_MRI_FIFOFC,
1059 QE_MR_FIFOCC_TXF16 | QE_MR_FIFOCC_RXF32 |
1060 QE_MR_FIFOCC_RFWU | QE_MR_FIFOCC_TFWU);
1061
1062 bus_space_write_1(t, mr, QE_MRI_PLSCC, QE_MR_PLSCC_TP);
1063
1064
1065
1066
1067 ea = sc->sc_arpcom.ac_enaddr;
1068 bus_space_write_1(t, mr, QE_MRI_IAC,
1069 QE_MR_IAC_ADDRCHG | QE_MR_IAC_PHYADDR);
1070 bus_space_write_multi_1(t, mr, QE_MRI_PADR, ea, 6);
1071
1072
1073 qe_ifmedia_upd(ifp);
1074
1075
1076
1077
1078 bus_space_write_1(t, mr, QE_MRI_IAC,
1079 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR);
1080 bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0, 8);
1081 bus_space_write_1(t, mr, QE_MRI_IAC, 0);
1082
1083
1084 (void)bus_space_read_1(t, mr, QE_MRI_MPC);
1085
1086 #if 0
1087
1088 bus_space_write_1(t, mr, QE_MRI_UTR, 0);
1089 #endif
1090
1091
1092 qe_mcreset(sc);
1093
1094 ifp->if_flags |= IFF_RUNNING;
1095 ifp->if_flags &= ~IFF_OACTIVE;
1096 splx(s);
1097 }
1098
1099
1100
1101
1102 void
1103 qe_mcreset(sc)
1104 struct qe_softc *sc;
1105 {
1106 struct arpcom *ac = &sc->sc_arpcom;
1107 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1108 bus_space_tag_t t = sc->sc_bustag;
1109 bus_space_handle_t mr = sc->sc_mr;
1110 struct ether_multi *enm;
1111 struct ether_multistep step;
1112 u_int32_t crc;
1113 u_int16_t hash[4];
1114 u_int8_t octet, maccc, *ladrp = (u_int8_t *)&hash[0];
1115 int i, j;
1116
1117
1118 maccc = QE_MR_MACCC_ENXMT | QE_MR_MACCC_ENRCV;
1119
1120 if (ifp->if_flags & IFF_PROMISC) {
1121 maccc |= QE_MR_MACCC_PROM;
1122 bus_space_write_1(t, mr, QE_MRI_MACCC, maccc);
1123 return;
1124 }
1125
1126 if (ifp->if_flags & IFF_ALLMULTI) {
1127 bus_space_write_1(t, mr, QE_MRI_IAC,
1128 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR);
1129 bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0xff, 8);
1130 bus_space_write_1(t, mr, QE_MRI_IAC, 0);
1131 bus_space_write_1(t, mr, QE_MRI_MACCC, maccc);
1132 return;
1133 }
1134
1135 hash[3] = hash[2] = hash[1] = hash[0] = 0;
1136
1137 ETHER_FIRST_MULTI(step, ac, enm);
1138 while (enm != NULL) {
1139 if (bcmp(enm->enm_addrlo, enm->enm_addrhi,
1140 ETHER_ADDR_LEN) != 0) {
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151 bus_space_write_1(t, mr, QE_MRI_IAC,
1152 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR);
1153 bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0xff, 8);
1154 bus_space_write_1(t, mr, QE_MRI_IAC, 0);
1155 ifp->if_flags |= IFF_ALLMULTI;
1156 break;
1157 }
1158
1159 crc = 0xffffffff;
1160
1161 for (i = 0; i < ETHER_ADDR_LEN; i++) {
1162 octet = enm->enm_addrlo[i];
1163
1164 for (j = 0; j < 8; j++) {
1165 if ((crc & 1) ^ (octet & 1)) {
1166 crc >>= 1;
1167 crc ^= MC_POLY_LE;
1168 }
1169 else
1170 crc >>= 1;
1171 octet >>= 1;
1172 }
1173 }
1174
1175 crc >>= 26;
1176 hash[crc >> 4] |= 1 << (crc & 0xf);
1177 ETHER_NEXT_MULTI(step, enm);
1178 }
1179
1180 bus_space_write_1(t, mr, QE_MRI_IAC,
1181 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR);
1182 bus_space_write_multi_1(t, mr, QE_MRI_LADRF, ladrp, 8);
1183 bus_space_write_1(t, mr, QE_MRI_IAC, 0);
1184 bus_space_write_1(t, mr, QE_MRI_MACCC, maccc);
1185 }
1186
1187
1188
1189
1190 void
1191 qe_ifmedia_sts(ifp, ifmr)
1192 struct ifnet *ifp;
1193 struct ifmediareq *ifmr;
1194 {
1195 struct qe_softc *sc = ifp->if_softc;
1196 u_int8_t phycc;
1197
1198 ifmr->ifm_active = IFM_ETHER | IFM_10_T;
1199 phycc = bus_space_read_1(sc->sc_bustag, sc->sc_mr, QE_MRI_PHYCC);
1200 if ((phycc & QE_MR_PHYCC_DLNKTST) == 0) {
1201 ifmr->ifm_status |= IFM_AVALID;
1202 if (phycc & QE_MR_PHYCC_LNKFL)
1203 ifmr->ifm_status &= ~IFM_ACTIVE;
1204 else
1205 ifmr->ifm_status |= IFM_ACTIVE;
1206 }
1207 }
1208
1209
1210
1211
1212 int
1213 qe_ifmedia_upd(ifp)
1214 struct ifnet *ifp;
1215 {
1216 struct qe_softc *sc = ifp->if_softc;
1217 int media = sc->sc_ifmedia.ifm_media;
1218
1219 if (IFM_TYPE(media) != IFM_ETHER)
1220 return (EINVAL);
1221
1222 if (IFM_SUBTYPE(media) != IFM_10_T)
1223 return (EINVAL);
1224
1225 return (0);
1226 }