This source file includes following definitions.
- gem_config
- gem_tick
- gem_bitwait
- gem_reset
- gem_rxdrain
- gem_stop
- gem_reset_rx
- gem_reset_tx
- gem_disable_rx
- gem_disable_tx
- gem_meminit
- gem_ringsize
- gem_init
- gem_init_regs
- gem_rint
- gem_add_rxbuf
- gem_eint
- gem_pint
- gem_intr
- gem_watchdog
- gem_mifinit
- gem_mii_readreg
- gem_mii_writereg
- gem_mii_statchg
- gem_pcs_readreg
- gem_pcs_writereg
- gem_mediachange
- gem_mediastatus
- gem_ioctl
- gem_shutdown
- gem_setladrf
- gem_encap
- gem_tint
- gem_start
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37 #include "bpfilter.h"
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/timeout.h>
42 #include <sys/mbuf.h>
43 #include <sys/syslog.h>
44 #include <sys/malloc.h>
45 #include <sys/kernel.h>
46 #include <sys/socket.h>
47 #include <sys/ioctl.h>
48 #include <sys/errno.h>
49 #include <sys/device.h>
50
51 #include <machine/endian.h>
52
53 #include <net/if.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56
57 #ifdef INET
58 #include <netinet/in.h>
59 #include <netinet/if_ether.h>
60 #endif
61
62 #if NBPFILTER > 0
63 #include <net/bpf.h>
64 #endif
65
66 #include <machine/bus.h>
67 #include <machine/intr.h>
68
69 #include <dev/mii/mii.h>
70 #include <dev/mii/miivar.h>
71 #include <dev/mii/mii_bitbang.h>
72
73 #include <dev/ic/gemreg.h>
74 #include <dev/ic/gemvar.h>
75
76 #define TRIES 10000
77
78 struct cfdriver gem_cd = {
79 NULL, "gem", DV_IFNET
80 };
81
82 void gem_start(struct ifnet *);
83 void gem_stop(struct ifnet *, int);
84 int gem_ioctl(struct ifnet *, u_long, caddr_t);
85 void gem_tick(void *);
86 void gem_watchdog(struct ifnet *);
87 void gem_shutdown(void *);
88 int gem_init(struct ifnet *);
89 void gem_init_regs(struct gem_softc *);
90 int gem_ringsize(int);
91 int gem_meminit(struct gem_softc *);
92 void gem_mifinit(struct gem_softc *);
93 int gem_bitwait(struct gem_softc *, bus_space_handle_t, int,
94 u_int32_t, u_int32_t);
95 void gem_reset(struct gem_softc *);
96 int gem_reset_rx(struct gem_softc *);
97 int gem_reset_tx(struct gem_softc *);
98 int gem_disable_rx(struct gem_softc *);
99 int gem_disable_tx(struct gem_softc *);
100 void gem_rxdrain(struct gem_softc *);
101 int gem_add_rxbuf(struct gem_softc *, int idx);
102 void gem_setladrf(struct gem_softc *);
103 int gem_encap(struct gem_softc *, struct mbuf *, u_int32_t *);
104
105
106 int gem_mii_readreg(struct device *, int, int);
107 void gem_mii_writereg(struct device *, int, int, int);
108 void gem_mii_statchg(struct device *);
109 int gem_pcs_readreg(struct device *, int, int);
110 void gem_pcs_writereg(struct device *, int, int, int);
111
112 int gem_mediachange(struct ifnet *);
113 void gem_mediastatus(struct ifnet *, struct ifmediareq *);
114
115 struct mbuf *gem_get(struct gem_softc *, int, int);
116 int gem_eint(struct gem_softc *, u_int);
117 int gem_rint(struct gem_softc *);
118 int gem_tint(struct gem_softc *, u_int32_t);
119 int gem_pint(struct gem_softc *);
120
121 #ifdef GEM_DEBUG
122 #define DPRINTF(sc, x) if ((sc)->sc_arpcom.ac_if.if_flags & IFF_DEBUG) \
123 printf x
124 #else
125 #define DPRINTF(sc, x)
126 #endif
127
128
129
130
131 void
132 gem_config(struct gem_softc *sc)
133 {
134 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
135 struct mii_data *mii = &sc->sc_mii;
136 struct mii_softc *child;
137 int i, error, phyad;
138 struct ifmedia_entry *ifm;
139
140
141 ifp->if_softc = sc;
142 gem_reset(sc);
143
144
145
146
147
148 if ((error = bus_dmamem_alloc(sc->sc_dmatag,
149 sizeof(struct gem_control_data), PAGE_SIZE, 0, &sc->sc_cdseg,
150 1, &sc->sc_cdnseg, 0)) != 0) {
151 printf("\n%s: unable to allocate control data, error = %d\n",
152 sc->sc_dev.dv_xname, error);
153 goto fail_0;
154 }
155
156
157 if ((error = bus_dmamem_map(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg,
158 sizeof(struct gem_control_data), (caddr_t *)&sc->sc_control_data,
159 BUS_DMA_COHERENT)) != 0) {
160 printf("\n%s: unable to map control data, error = %d\n",
161 sc->sc_dev.dv_xname, error);
162 goto fail_1;
163 }
164
165 if ((error = bus_dmamap_create(sc->sc_dmatag,
166 sizeof(struct gem_control_data), 1,
167 sizeof(struct gem_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
168 printf("\n%s: unable to create control data DMA map, "
169 "error = %d\n", sc->sc_dev.dv_xname, error);
170 goto fail_2;
171 }
172
173 if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_cddmamap,
174 sc->sc_control_data, sizeof(struct gem_control_data), NULL,
175 0)) != 0) {
176 printf("\n%s: unable to load control data DMA map, error = %d\n",
177 sc->sc_dev.dv_xname, error);
178 goto fail_3;
179 }
180
181
182
183
184 for (i = 0; i < GEM_NRXDESC; i++) {
185 if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1,
186 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
187 printf("\n%s: unable to create rx DMA map %d, "
188 "error = %d\n", sc->sc_dev.dv_xname, i, error);
189 goto fail_5;
190 }
191 sc->sc_rxsoft[i].rxs_mbuf = NULL;
192 }
193
194
195
196 for (i = 0; i < GEM_NTXDESC; i++) {
197 if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES,
198 GEM_NTXSEGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
199 &sc->sc_txd[i].sd_map)) != 0) {
200 printf("\n%s: unable to create tx DMA map %d, "
201 "error = %d\n", sc->sc_dev.dv_xname, i, error);
202 goto fail_6;
203 }
204 sc->sc_txd[i].sd_mbuf = NULL;
205 }
206
207
208
209
210
211
212
213
214 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
215
216
217 sc->sc_rxfifosize = 64 *
218 bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_RX_FIFO_SIZE);
219
220
221 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof ifp->if_xname);
222 ifp->if_softc = sc;
223 ifp->if_flags =
224 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
225 ifp->if_start = gem_start;
226 ifp->if_ioctl = gem_ioctl;
227 ifp->if_watchdog = gem_watchdog;
228 IFQ_SET_MAXLEN(&ifp->if_snd, GEM_NTXDESC - 1);
229 IFQ_SET_READY(&ifp->if_snd);
230
231 ifp->if_capabilities = IFCAP_VLAN_MTU;
232
233
234 mii->mii_ifp = ifp;
235 mii->mii_readreg = gem_mii_readreg;
236 mii->mii_writereg = gem_mii_writereg;
237 mii->mii_statchg = gem_mii_statchg;
238
239 ifmedia_init(&mii->mii_media, 0, gem_mediachange, gem_mediastatus);
240
241
242 if (sc->sc_variant != GEM_SUN_ERI)
243 bus_space_write_4(sc->sc_bustag, sc->sc_h1,
244 GEM_MII_DATAPATH_MODE, 0);
245
246 gem_mifinit(sc);
247
248
249
250
251 if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) {
252 sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL;
253 bus_space_write_4(sc->sc_bustag, sc->sc_h1,
254 GEM_MIF_CONFIG, sc->sc_mif_config);
255
256 switch (sc->sc_variant) {
257 case GEM_SUN_ERI:
258 phyad = GEM_PHYAD_EXTERNAL;
259 break;
260 default:
261 phyad = MII_PHY_ANY;
262 break;
263 }
264
265 mii_attach(&sc->sc_dev, mii, 0xffffffff, phyad,
266 MII_OFFSET_ANY, 0);
267 }
268
269
270
271
272 child = LIST_FIRST(&mii->mii_phys);
273 if (child == NULL && sc->sc_mif_config & GEM_MIF_CONFIG_MDI0) {
274 sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL;
275 bus_space_write_4(sc->sc_bustag, sc->sc_h1,
276 GEM_MIF_CONFIG, sc->sc_mif_config);
277
278 switch (sc->sc_variant) {
279 case GEM_SUN_ERI:
280 case GEM_APPLE_K2_GMAC:
281 phyad = GEM_PHYAD_INTERNAL;
282 break;
283 case GEM_APPLE_GMAC:
284 phyad = GEM_PHYAD_EXTERNAL;
285 break;
286 default:
287 phyad = MII_PHY_ANY;
288 break;
289 }
290
291 mii_attach(&sc->sc_dev, mii, 0xffffffff, phyad,
292 MII_OFFSET_ANY, 0);
293 }
294
295
296
297
298
299 child = LIST_FIRST(&mii->mii_phys);
300 if (child == NULL && sc->sc_variant != GEM_SUN_ERI &&
301 sc->sc_mif_config & (GEM_MIF_CONFIG_MDI0|GEM_MIF_CONFIG_MDI1)) {
302 bus_space_write_4(sc->sc_bustag, sc->sc_h1,
303 GEM_MII_DATAPATH_MODE, GEM_MII_DATAPATH_SERDES);
304
305 bus_space_write_4(sc->sc_bustag, sc->sc_h1,
306 GEM_MII_SLINK_CONTROL,
307 GEM_MII_SLINK_LOOPBACK|GEM_MII_SLINK_EN_SYNC_D);
308
309 bus_space_write_4(sc->sc_bustag, sc->sc_h1,
310 GEM_MII_CONFIG, GEM_MII_CONFIG_ENABLE);
311
312 mii->mii_readreg = gem_pcs_readreg;
313 mii->mii_writereg = gem_pcs_writereg;
314
315 mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
316 MII_OFFSET_ANY, MIIF_NOISOLATE);
317 }
318
319 child = LIST_FIRST(&mii->mii_phys);
320 if (child == NULL) {
321
322 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
323 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
324 } else {
325
326
327
328
329 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
330 }
331
332
333 TAILQ_FOREACH(ifm, &sc->sc_media.ifm_list, ifm_list) {
334 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_T ||
335 IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_SX ||
336 IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_LX ||
337 IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_CX) {
338 sc->sc_flags |= GEM_GIGABIT;
339 break;
340 }
341 }
342
343
344 if_attach(ifp);
345 ether_ifattach(ifp);
346
347 sc->sc_sh = shutdownhook_establish(gem_shutdown, sc);
348 if (sc->sc_sh == NULL)
349 panic("gem_config: can't establish shutdownhook");
350
351 timeout_set(&sc->sc_tick_ch, gem_tick, sc);
352 return;
353
354
355
356
357
358 fail_6:
359 for (i = 0; i < GEM_NTXDESC; i++) {
360 if (sc->sc_txd[i].sd_map != NULL)
361 bus_dmamap_destroy(sc->sc_dmatag,
362 sc->sc_txd[i].sd_map);
363 }
364 fail_5:
365 for (i = 0; i < GEM_NRXDESC; i++) {
366 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
367 bus_dmamap_destroy(sc->sc_dmatag,
368 sc->sc_rxsoft[i].rxs_dmamap);
369 }
370 bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap);
371 fail_3:
372 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap);
373 fail_2:
374 bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sc_control_data,
375 sizeof(struct gem_control_data));
376 fail_1:
377 bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg);
378 fail_0:
379 return;
380 }
381
382
383 void
384 gem_tick(void *arg)
385 {
386 struct gem_softc *sc = arg;
387 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
388 bus_space_tag_t t = sc->sc_bustag;
389 bus_space_handle_t mac = sc->sc_h1;
390 int s;
391
392
393 ifp->if_collisions +=
394 bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) +
395 bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT) +
396 bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) +
397 bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT);
398
399
400 bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0);
401 bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0);
402 bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0);
403 bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0);
404
405 s = splnet();
406 mii_tick(&sc->sc_mii);
407 splx(s);
408
409 timeout_add(&sc->sc_tick_ch, hz);
410 }
411
412 int
413 gem_bitwait(struct gem_softc *sc, bus_space_handle_t h, int r,
414 u_int32_t clr, u_int32_t set)
415 {
416 int i;
417 u_int32_t reg;
418
419 for (i = TRIES; i--; DELAY(100)) {
420 reg = bus_space_read_4(sc->sc_bustag, h, r);
421 if ((reg & clr) == 0 && (reg & set) == set)
422 return (1);
423 }
424
425 return (0);
426 }
427
428 void
429 gem_reset(struct gem_softc *sc)
430 {
431 bus_space_tag_t t = sc->sc_bustag;
432 bus_space_handle_t h = sc->sc_h2;
433 int s;
434
435 s = splnet();
436 DPRINTF(sc, ("%s: gem_reset\n", sc->sc_dev.dv_xname));
437 gem_reset_rx(sc);
438 gem_reset_tx(sc);
439
440
441 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX|GEM_RESET_TX);
442 if (!gem_bitwait(sc, h, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0))
443 printf("%s: cannot reset device\n", sc->sc_dev.dv_xname);
444 splx(s);
445 }
446
447
448
449
450
451 void
452 gem_rxdrain(struct gem_softc *sc)
453 {
454 struct gem_rxsoft *rxs;
455 int i;
456
457 for (i = 0; i < GEM_NRXDESC; i++) {
458 rxs = &sc->sc_rxsoft[i];
459 if (rxs->rxs_mbuf != NULL) {
460 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
461 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
462 bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap);
463 m_freem(rxs->rxs_mbuf);
464 rxs->rxs_mbuf = NULL;
465 }
466 }
467 }
468
469
470
471
472 void
473 gem_stop(struct ifnet *ifp, int disable)
474 {
475 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
476 struct gem_sxd *sd;
477 u_int32_t i;
478
479 DPRINTF(sc, ("%s: gem_stop\n", sc->sc_dev.dv_xname));
480
481 timeout_del(&sc->sc_tick_ch);
482
483
484
485
486 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
487 ifp->if_timer = 0;
488
489 mii_down(&sc->sc_mii);
490
491 gem_reset_rx(sc);
492 gem_reset_tx(sc);
493
494
495
496
497 for (i = 0; i < GEM_NTXDESC; i++) {
498 sd = &sc->sc_txd[i];
499 if (sd->sd_mbuf != NULL) {
500 bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0,
501 sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
502 bus_dmamap_unload(sc->sc_dmatag, sd->sd_map);
503 m_freem(sd->sd_mbuf);
504 sd->sd_mbuf = NULL;
505 }
506 }
507 sc->sc_tx_cnt = sc->sc_tx_prod = sc->sc_tx_cons = 0;
508
509 if (disable)
510 gem_rxdrain(sc);
511 }
512
513
514
515
516
517 int
518 gem_reset_rx(struct gem_softc *sc)
519 {
520 bus_space_tag_t t = sc->sc_bustag;
521 bus_space_handle_t h = sc->sc_h1, h2 = sc->sc_h2;
522
523
524
525
526
527 gem_disable_rx(sc);
528 bus_space_write_4(t, h, GEM_RX_CONFIG, 0);
529
530 if (!gem_bitwait(sc, h, GEM_RX_CONFIG, 1, 0))
531 printf("%s: cannot disable rx dma\n", sc->sc_dev.dv_xname);
532
533 delay(5000);
534
535
536 bus_space_write_4(t, h2, GEM_RESET, GEM_RESET_RX);
537
538 if (!gem_bitwait(sc, h2, GEM_RESET, GEM_RESET_RX, 0)) {
539 printf("%s: cannot reset receiver\n", sc->sc_dev.dv_xname);
540 return (1);
541 }
542 return (0);
543 }
544
545
546
547
548
549 int
550 gem_reset_tx(struct gem_softc *sc)
551 {
552 bus_space_tag_t t = sc->sc_bustag;
553 bus_space_handle_t h = sc->sc_h1, h2 = sc->sc_h2;
554
555
556
557
558
559 gem_disable_tx(sc);
560 bus_space_write_4(t, h, GEM_TX_CONFIG, 0);
561
562 if (!gem_bitwait(sc, h, GEM_TX_CONFIG, 1, 0))
563 printf("%s: cannot disable tx dma\n", sc->sc_dev.dv_xname);
564
565 delay(5000);
566
567
568 bus_space_write_4(t, h2, GEM_RESET, GEM_RESET_TX);
569
570 if (!gem_bitwait(sc, h2, GEM_RESET, GEM_RESET_TX, 0)) {
571 printf("%s: cannot reset transmitter\n",
572 sc->sc_dev.dv_xname);
573 return (1);
574 }
575 return (0);
576 }
577
578
579
580
581 int
582 gem_disable_rx(struct gem_softc *sc)
583 {
584 bus_space_tag_t t = sc->sc_bustag;
585 bus_space_handle_t h = sc->sc_h1;
586 u_int32_t cfg;
587
588
589 cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
590 cfg &= ~GEM_MAC_RX_ENABLE;
591 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg);
592
593
594 return (gem_bitwait(sc, h, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0));
595 }
596
597
598
599
600 int
601 gem_disable_tx(struct gem_softc *sc)
602 {
603 bus_space_tag_t t = sc->sc_bustag;
604 bus_space_handle_t h = sc->sc_h1;
605 u_int32_t cfg;
606
607
608 cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG);
609 cfg &= ~GEM_MAC_TX_ENABLE;
610 bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg);
611
612
613 return (gem_bitwait(sc, h, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0));
614 }
615
616
617
618
619 int
620 gem_meminit(struct gem_softc *sc)
621 {
622 struct gem_rxsoft *rxs;
623 int i, error;
624
625
626
627
628 for (i = 0; i < GEM_NTXDESC; i++) {
629 sc->sc_txdescs[i].gd_flags = 0;
630 sc->sc_txdescs[i].gd_addr = 0;
631 }
632 GEM_CDTXSYNC(sc, 0, GEM_NTXDESC,
633 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
634
635
636
637
638
639 for (i = 0; i < GEM_NRXDESC; i++) {
640 rxs = &sc->sc_rxsoft[i];
641 if (rxs->rxs_mbuf == NULL) {
642 if ((error = gem_add_rxbuf(sc, i)) != 0) {
643 printf("%s: unable to allocate or map rx "
644 "buffer %d, error = %d\n",
645 sc->sc_dev.dv_xname, i, error);
646
647
648
649
650 gem_rxdrain(sc);
651 return (1);
652 }
653 } else
654 GEM_INIT_RXDESC(sc, i);
655 }
656 sc->sc_rxptr = 0;
657
658 return (0);
659 }
660
661 int
662 gem_ringsize(int sz)
663 {
664 switch (sz) {
665 case 32:
666 return GEM_RING_SZ_32;
667 case 64:
668 return GEM_RING_SZ_64;
669 case 128:
670 return GEM_RING_SZ_128;
671 case 256:
672 return GEM_RING_SZ_256;
673 case 512:
674 return GEM_RING_SZ_512;
675 case 1024:
676 return GEM_RING_SZ_1024;
677 case 2048:
678 return GEM_RING_SZ_2048;
679 case 4096:
680 return GEM_RING_SZ_4096;
681 case 8192:
682 return GEM_RING_SZ_8192;
683 default:
684 printf("gem: invalid Receive Descriptor ring size %d\n", sz);
685 return GEM_RING_SZ_32;
686 }
687 }
688
689
690
691
692
693 int
694 gem_init(struct ifnet *ifp)
695 {
696
697 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
698 bus_space_tag_t t = sc->sc_bustag;
699 bus_space_handle_t h = sc->sc_h1;
700 int s;
701 u_int max_frame_size;
702 u_int32_t v;
703
704 s = splnet();
705
706 DPRINTF(sc, ("%s: gem_init: calling stop\n", sc->sc_dev.dv_xname));
707
708
709
710
711
712
713
714
715 gem_stop(ifp, 0);
716 gem_reset(sc);
717 DPRINTF(sc, ("%s: gem_init: restarting\n", sc->sc_dev.dv_xname));
718
719
720 gem_mifinit(sc);
721
722
723 if (sc->sc_hwreset)
724 (*sc->sc_hwreset)(sc);
725
726
727 gem_meminit(sc);
728
729
730 gem_init_regs(sc);
731 max_frame_size = ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN;
732 v = (max_frame_size) | (0x2000 << 16) ;
733 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, v);
734
735
736 gem_setladrf(sc);
737
738
739 bus_space_write_4(t, h, GEM_TX_RING_PTR_HI,
740 (((uint64_t)GEM_CDTXADDR(sc,0)) >> 32));
741 bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0));
742
743 bus_space_write_4(t, h, GEM_RX_RING_PTR_HI,
744 (((uint64_t)GEM_CDRXADDR(sc,0)) >> 32));
745 bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
746
747
748 bus_space_write_4(t, h, GEM_INTMASK,
749 ~(GEM_INTR_TX_INTME|
750 GEM_INTR_TX_EMPTY|
751 GEM_INTR_RX_DONE|GEM_INTR_RX_NOBUF|
752 GEM_INTR_RX_TAG_ERR|GEM_INTR_PCS|
753 GEM_INTR_MAC_CONTROL|GEM_INTR_MIF|
754 GEM_INTR_BERR));
755 bus_space_write_4(t, h, GEM_MAC_RX_MASK,
756 GEM_MAC_RX_DONE|GEM_MAC_RX_FRAME_CNT);
757 bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff);
758 bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK, 0);
759
760
761
762
763 v = gem_ringsize(GEM_NTXDESC );
764 bus_space_write_4(t, h, GEM_TX_CONFIG,
765 v|GEM_TX_CONFIG_TXDMA_EN|
766 ((0x400<<10)&GEM_TX_CONFIG_TXFIFO_TH));
767 bus_space_write_4(t, h, GEM_TX_KICK, 0);
768
769
770
771
772 v = gem_ringsize(GEM_NRXDESC );
773
774
775 bus_space_write_4(t, h, GEM_RX_CONFIG,
776 v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)|
777 (2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN|
778 (0<<GEM_RX_CONFIG_CXM_START_SHFT));
779
780
781
782
783 bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH,
784 (3 * sc->sc_rxfifosize / 256) |
785 ( (sc->sc_rxfifosize / 256) << 12));
786 bus_space_write_4(t, h, GEM_RX_BLANKING, (6<<12)|6);
787
788
789 mii_mediachg(&sc->sc_mii);
790
791
792 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
793 v |= GEM_MAC_RX_ENABLE | GEM_MAC_RX_STRIP_CRC;
794 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v);
795
796
797
798
799 if (sc->sc_hwinit)
800 (*sc->sc_hwinit)(sc);
801
802
803
804 bus_space_write_4(t, h, GEM_RX_KICK, GEM_NRXDESC-4);
805
806
807 timeout_add(&sc->sc_tick_ch, hz);
808
809 ifp->if_flags |= IFF_RUNNING;
810 ifp->if_flags &= ~IFF_OACTIVE;
811 ifp->if_timer = 0;
812 splx(s);
813
814 return (0);
815 }
816
817 void
818 gem_init_regs(struct gem_softc *sc)
819 {
820 bus_space_tag_t t = sc->sc_bustag;
821 bus_space_handle_t h = sc->sc_h1;
822 u_int32_t v;
823
824
825 sc->sc_inited = 0;
826 if (!sc->sc_inited) {
827
828
829 bus_space_write_4(t, h, GEM_MAC_IPG0, 0);
830 bus_space_write_4(t, h, GEM_MAC_IPG1, 8);
831 bus_space_write_4(t, h, GEM_MAC_IPG2, 4);
832
833 bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN);
834
835 v = ETHER_MAX_LEN | (0x2000 << 16) ;
836 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, v);
837
838 bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x7);
839 bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x4);
840 bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10);
841
842 bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088);
843 bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED,
844 ((sc->sc_arpcom.ac_enaddr[5]<<8)|sc->sc_arpcom.ac_enaddr[4])&0x3ff);
845
846
847 bus_space_write_4(t, h, GEM_MAC_ADDR3, 0);
848 bus_space_write_4(t, h, GEM_MAC_ADDR4, 0);
849 bus_space_write_4(t, h, GEM_MAC_ADDR5, 0);
850
851 bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001);
852 bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200);
853 bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180);
854
855
856 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0);
857 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0);
858 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0);
859
860 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0);
861 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0);
862
863 sc->sc_inited = 1;
864 }
865
866
867 bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0);
868 bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0);
869 bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0);
870 bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0);
871 bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0);
872 bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0);
873 bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0);
874 bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0);
875 bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0);
876 bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0);
877 bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0);
878
879
880 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0);
881
882
883
884
885 bus_space_write_4(t, h, GEM_MAC_ADDR0,
886 (sc->sc_arpcom.ac_enaddr[4]<<8) | sc->sc_arpcom.ac_enaddr[5]);
887 bus_space_write_4(t, h, GEM_MAC_ADDR1,
888 (sc->sc_arpcom.ac_enaddr[2]<<8) | sc->sc_arpcom.ac_enaddr[3]);
889 bus_space_write_4(t, h, GEM_MAC_ADDR2,
890 (sc->sc_arpcom.ac_enaddr[0]<<8) | sc->sc_arpcom.ac_enaddr[1]);
891
892
893
894
895
896 sc->sc_mif_config = bus_space_read_4(t, h, GEM_MIF_CONFIG);
897 v = GEM_MAC_XIF_TX_MII_ENA;
898 if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) {
899 v |= GEM_MAC_XIF_FDPLX_LED;
900 if (sc->sc_flags & GEM_GIGABIT)
901 v |= GEM_MAC_XIF_GMII_MODE;
902 }
903 bus_space_write_4(t, h, GEM_MAC_XIF_CONFIG, v);
904 }
905
906
907
908
909 int
910 gem_rint(struct gem_softc *sc)
911 {
912 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
913 bus_space_tag_t t = sc->sc_bustag;
914 bus_space_handle_t h = sc->sc_h1;
915 struct ether_header *eh;
916 struct gem_rxsoft *rxs;
917 struct mbuf *m;
918 u_int64_t rxstat;
919 int i, len;
920
921 for (i = sc->sc_rxptr;; i = GEM_NEXTRX(i)) {
922 rxs = &sc->sc_rxsoft[i];
923
924 GEM_CDRXSYNC(sc, i,
925 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
926
927 rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags);
928
929 if (rxstat & GEM_RD_OWN) {
930
931
932
933 break;
934 }
935
936 if (rxstat & GEM_RD_BAD_CRC) {
937 #ifdef GEM_DEBUG
938 printf("%s: receive error: CRC error\n",
939 sc->sc_dev.dv_xname);
940 #endif
941 GEM_INIT_RXDESC(sc, i);
942 continue;
943 }
944
945 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
946 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
947 #ifdef GEM_DEBUG
948 if (ifp->if_flags & IFF_DEBUG) {
949 printf(" rxsoft %p descriptor %d: ", rxs, i);
950 printf("gd_flags: 0x%016llx\t", (long long)
951 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags));
952 printf("gd_addr: 0x%016llx\n", (long long)
953 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr));
954 }
955 #endif
956
957
958 len = GEM_RD_BUFLEN(rxstat);
959
960
961
962
963
964
965 m = rxs->rxs_mbuf;
966 if (gem_add_rxbuf(sc, i) != 0) {
967 ifp->if_ierrors++;
968 GEM_INIT_RXDESC(sc, i);
969 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
970 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
971 continue;
972 }
973 m->m_data += 2;
974
975 ifp->if_ipackets++;
976 eh = mtod(m, struct ether_header *);
977 m->m_pkthdr.rcvif = ifp;
978 m->m_pkthdr.len = m->m_len = len;
979
980 #if NBPFILTER > 0
981
982
983
984
985 if (ifp->if_bpf)
986 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
987 #endif
988
989
990 ether_input_mbuf(ifp, m);
991 }
992
993
994 sc->sc_rxptr = i;
995 bus_space_write_4(t, h, GEM_RX_KICK, i);
996
997 DPRINTF(sc, ("gem_rint: done sc->rxptr %d, complete %d\n",
998 sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION)));
999
1000 return (1);
1001 }
1002
1003
1004
1005
1006
1007 int
1008 gem_add_rxbuf(struct gem_softc *sc, int idx)
1009 {
1010 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx];
1011 struct mbuf *m;
1012 int error;
1013
1014 MGETHDR(m, M_DONTWAIT, MT_DATA);
1015 if (m == NULL)
1016 return (ENOBUFS);
1017
1018 MCLGET(m, M_DONTWAIT);
1019 if ((m->m_flags & M_EXT) == 0) {
1020 m_freem(m);
1021 return (ENOBUFS);
1022 }
1023
1024 #ifdef GEM_DEBUG
1025
1026 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size);
1027 #endif
1028
1029 if (rxs->rxs_mbuf != NULL)
1030 bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap);
1031
1032 rxs->rxs_mbuf = m;
1033
1034 error = bus_dmamap_load(sc->sc_dmatag, rxs->rxs_dmamap,
1035 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1036 BUS_DMA_READ|BUS_DMA_NOWAIT);
1037 if (error) {
1038 printf("%s: can't load rx DMA map %d, error = %d\n",
1039 sc->sc_dev.dv_xname, idx, error);
1040 panic("gem_add_rxbuf");
1041 }
1042
1043 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
1044 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1045
1046 GEM_INIT_RXDESC(sc, idx);
1047
1048 return (0);
1049 }
1050
1051
1052 int
1053 gem_eint(struct gem_softc *sc, u_int status)
1054 {
1055 if ((status & GEM_INTR_MIF) != 0) {
1056 #ifdef GEM_DEBUG
1057 printf("%s: link status changed\n", sc->sc_dev.dv_xname);
1058 #endif
1059 return (1);
1060 }
1061
1062 printf("%s: status=%b\n", sc->sc_dev.dv_xname, status, GEM_INTR_BITS);
1063 return (1);
1064 }
1065
1066 int
1067 gem_pint(struct gem_softc *sc)
1068 {
1069 bus_space_tag_t t = sc->sc_bustag;
1070 bus_space_handle_t seb = sc->sc_h1;
1071 u_int32_t status;
1072
1073 status = bus_space_read_4(t, seb, GEM_MII_INTERRUP_STATUS);
1074 status |= bus_space_read_4(t, seb, GEM_MII_INTERRUP_STATUS);
1075 #ifdef GEM_DEBUG
1076 if (status)
1077 printf("%s: link status changed\n", sc->sc_dev.dv_xname);
1078 #endif
1079 return (1);
1080 }
1081
1082 int
1083 gem_intr(void *v)
1084 {
1085 struct gem_softc *sc = (struct gem_softc *)v;
1086 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1087 bus_space_tag_t t = sc->sc_bustag;
1088 bus_space_handle_t seb = sc->sc_h1;
1089 u_int32_t status;
1090 int r = 0;
1091
1092 status = bus_space_read_4(t, seb, GEM_STATUS);
1093 DPRINTF(sc, ("%s: gem_intr: cplt %xstatus %b\n",
1094 sc->sc_dev.dv_xname, (status>>19), status, GEM_INTR_BITS));
1095
1096 if ((status & GEM_INTR_PCS) != 0)
1097 r |= gem_pint(sc);
1098
1099 if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0)
1100 r |= gem_eint(sc, status);
1101
1102 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0)
1103 r |= gem_tint(sc, status);
1104
1105 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0)
1106 r |= gem_rint(sc);
1107
1108
1109 if (status & GEM_INTR_TX_MAC) {
1110 int txstat = bus_space_read_4(t, seb, GEM_MAC_TX_STATUS);
1111 #ifdef GEM_DEBUG
1112 if (txstat & ~GEM_MAC_TX_XMIT_DONE)
1113 printf("%s: MAC tx fault, status %x\n",
1114 sc->sc_dev.dv_xname, txstat);
1115 #endif
1116 if (txstat & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG))
1117 gem_init(ifp);
1118 }
1119 if (status & GEM_INTR_RX_MAC) {
1120 int rxstat = bus_space_read_4(t, seb, GEM_MAC_RX_STATUS);
1121 #ifdef GEM_DEBUG
1122 if (rxstat & ~GEM_MAC_RX_DONE)
1123 printf("%s: MAC rx fault, status %x\n",
1124 sc->sc_dev.dv_xname, rxstat);
1125 #endif
1126
1127
1128
1129
1130 if (rxstat & GEM_MAC_RX_OVERFLOW) {
1131 ifp->if_ierrors++;
1132 gem_init(ifp);
1133 }
1134 #ifdef GEM_DEBUG
1135 else if (rxstat & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT))
1136 printf("%s: MAC rx fault, status %x\n",
1137 sc->sc_dev.dv_xname, rxstat);
1138 #endif
1139 }
1140 return (r);
1141 }
1142
1143
1144 void
1145 gem_watchdog(struct ifnet *ifp)
1146 {
1147 struct gem_softc *sc = ifp->if_softc;
1148
1149 DPRINTF(sc, ("gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x "
1150 "GEM_MAC_RX_CONFIG %x\n",
1151 bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_RX_CONFIG),
1152 bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_MAC_RX_STATUS),
1153 bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_MAC_RX_CONFIG)));
1154
1155 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
1156 ++ifp->if_oerrors;
1157
1158
1159 gem_init(ifp);
1160 }
1161
1162
1163
1164
1165 void
1166 gem_mifinit(struct gem_softc *sc)
1167 {
1168 bus_space_tag_t t = sc->sc_bustag;
1169 bus_space_handle_t mif = sc->sc_h1;
1170
1171
1172 sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG);
1173 sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA;
1174 bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config);
1175 }
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191 int
1192 gem_mii_readreg(struct device *self, int phy, int reg)
1193 {
1194 struct gem_softc *sc = (void *)self;
1195 bus_space_tag_t t = sc->sc_bustag;
1196 bus_space_handle_t mif = sc->sc_h1;
1197 int n;
1198 u_int32_t v;
1199
1200 #ifdef GEM_DEBUG
1201 if (sc->sc_debug)
1202 printf("gem_mii_readreg: phy %d reg %d\n", phy, reg);
1203 #endif
1204
1205
1206 v = (reg << GEM_MIF_REG_SHIFT) | (phy << GEM_MIF_PHY_SHIFT) |
1207 GEM_MIF_FRAME_READ;
1208
1209 bus_space_write_4(t, mif, GEM_MIF_FRAME, v);
1210 for (n = 0; n < 100; n++) {
1211 DELAY(1);
1212 v = bus_space_read_4(t, mif, GEM_MIF_FRAME);
1213 if (v & GEM_MIF_FRAME_TA0)
1214 return (v & GEM_MIF_FRAME_DATA);
1215 }
1216
1217 printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname);
1218 return (0);
1219 }
1220
1221 void
1222 gem_mii_writereg(struct device *self, int phy, int reg, int val)
1223 {
1224 struct gem_softc *sc = (void *)self;
1225 bus_space_tag_t t = sc->sc_bustag;
1226 bus_space_handle_t mif = sc->sc_h1;
1227 int n;
1228 u_int32_t v;
1229
1230 #ifdef GEM_DEBUG
1231 if (sc->sc_debug)
1232 printf("gem_mii_writereg: phy %d reg %d val %x\n",
1233 phy, reg, val);
1234 #endif
1235
1236
1237 v = GEM_MIF_FRAME_WRITE |
1238 (phy << GEM_MIF_PHY_SHIFT) |
1239 (reg << GEM_MIF_REG_SHIFT) |
1240 (val & GEM_MIF_FRAME_DATA);
1241
1242 bus_space_write_4(t, mif, GEM_MIF_FRAME, v);
1243 for (n = 0; n < 100; n++) {
1244 DELAY(1);
1245 v = bus_space_read_4(t, mif, GEM_MIF_FRAME);
1246 if (v & GEM_MIF_FRAME_TA0)
1247 return;
1248 }
1249
1250 printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname);
1251 }
1252
1253 void
1254 gem_mii_statchg(struct device *dev)
1255 {
1256 struct gem_softc *sc = (void *)dev;
1257 #ifdef GEM_DEBUG
1258 int instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media);
1259 #endif
1260 bus_space_tag_t t = sc->sc_bustag;
1261 bus_space_handle_t mac = sc->sc_h1;
1262 u_int32_t v;
1263
1264 #ifdef GEM_DEBUG
1265 if (sc->sc_debug)
1266 printf("gem_mii_statchg: status change: phy = %d\n", instance);
1267 #endif
1268
1269
1270 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0);
1271 delay(10000);
1272 v = GEM_MAC_TX_ENA_IPG0|GEM_MAC_TX_NGU|GEM_MAC_TX_NGU_LIMIT|
1273 GEM_MAC_TX_ENABLE;
1274 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) {
1275 v |= GEM_MAC_TX_IGN_CARRIER|GEM_MAC_TX_IGN_COLLIS;
1276 }
1277 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, v);
1278
1279
1280
1281 v = bus_space_read_4(t, mac, GEM_MAC_XIF_CONFIG);
1282 v = GEM_MAC_XIF_LINK_LED;
1283 v |= GEM_MAC_XIF_TX_MII_ENA;
1284
1285 sc->sc_mif_config = bus_space_read_4(t, mac, GEM_MIF_CONFIG);
1286 if ((sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) != 0) {
1287
1288 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0)
1289
1290 v |= GEM_MAC_XIF_FDPLX_LED;
1291 else
1292
1293 v |= GEM_MAC_XIF_ECHO_DISABL;
1294
1295 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
1296 case IFM_1000_T:
1297 case IFM_1000_SX:
1298 v |= GEM_MAC_XIF_GMII_MODE;
1299 break;
1300 default:
1301 v &= ~GEM_MAC_XIF_GMII_MODE;
1302 }
1303 } else
1304
1305 v |= GEM_MAC_XIF_MII_BUF_ENA;
1306 bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v);
1307 }
1308
1309 int
1310 gem_pcs_readreg(struct device *self, int phy, int reg)
1311 {
1312 struct gem_softc *sc = (void *)self;
1313 bus_space_tag_t t = sc->sc_bustag;
1314 bus_space_handle_t pcs = sc->sc_h1;
1315
1316 #ifdef GEM_DEBUG
1317 if (sc->sc_debug)
1318 printf("gem_pcs_readreg: phy %d reg %d\n", phy, reg);
1319 #endif
1320
1321 if (phy != GEM_PHYAD_EXTERNAL)
1322 return (0);
1323
1324 switch (reg) {
1325 case MII_BMCR:
1326 reg = GEM_MII_CONTROL;
1327 break;
1328 case MII_BMSR:
1329 reg = GEM_MII_STATUS;
1330 break;
1331 case MII_ANAR:
1332 reg = GEM_MII_ANAR;
1333 break;
1334 case MII_ANLPAR:
1335 reg = GEM_MII_ANLPAR;
1336 break;
1337 case MII_EXTSR:
1338 return (EXTSR_1000XFDX|EXTSR_1000XHDX);
1339 default:
1340 return (0);
1341 }
1342
1343 return bus_space_read_4(t, pcs, reg);
1344 }
1345
1346 void
1347 gem_pcs_writereg(struct device *self, int phy, int reg, int val)
1348 {
1349 struct gem_softc *sc = (void *)self;
1350 bus_space_tag_t t = sc->sc_bustag;
1351 bus_space_handle_t pcs = sc->sc_h1;
1352
1353 #ifdef GEM_DEBUG
1354 if (sc->sc_debug)
1355 printf("gem_pcs_writereg: phy %d reg %d val %x\n",
1356 phy, reg, val);
1357 #endif
1358
1359 if (phy != GEM_PHYAD_EXTERNAL)
1360 return;
1361
1362 switch (reg) {
1363 case MII_BMCR:
1364 reg = GEM_MII_CONTROL;
1365 break;
1366 case MII_BMSR:
1367 reg = GEM_MII_STATUS;
1368 break;
1369 case MII_ANAR:
1370 reg = GEM_MII_ANAR;
1371 break;
1372 case MII_ANLPAR:
1373 reg = GEM_MII_ANLPAR;
1374 break;
1375 default:
1376 return;
1377 }
1378
1379 bus_space_write_4(t, pcs, reg, val);
1380
1381 if (reg == GEM_MII_ANAR) {
1382 bus_space_write_4(t, pcs, GEM_MII_SLINK_CONTROL,
1383 GEM_MII_SLINK_LOOPBACK|GEM_MII_SLINK_EN_SYNC_D);
1384 bus_space_write_4(t, pcs, GEM_MII_CONFIG,
1385 GEM_MII_CONFIG_ENABLE);
1386 }
1387 }
1388
1389 int
1390 gem_mediachange(struct ifnet *ifp)
1391 {
1392 struct gem_softc *sc = ifp->if_softc;
1393 struct mii_data *mii = &sc->sc_mii;
1394
1395 if (mii->mii_instance) {
1396 struct mii_softc *miisc;
1397 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1398 mii_phy_reset(miisc);
1399 }
1400
1401 return (mii_mediachg(&sc->sc_mii));
1402 }
1403
1404 void
1405 gem_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1406 {
1407 struct gem_softc *sc = ifp->if_softc;
1408
1409 mii_pollstat(&sc->sc_mii);
1410 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1411 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1412 }
1413
1414
1415
1416
1417 int
1418 gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1419 {
1420 struct gem_softc *sc = ifp->if_softc;
1421 struct ifaddr *ifa = (struct ifaddr *)data;
1422 struct ifreq *ifr = (struct ifreq *)data;
1423 int s, error = 0;
1424
1425 s = splnet();
1426
1427 if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) {
1428 splx(s);
1429 return (error);
1430 }
1431
1432 switch (cmd) {
1433
1434 case SIOCSIFADDR:
1435 ifp->if_flags |= IFF_UP;
1436 if ((ifp->if_flags & IFF_RUNNING) == 0)
1437 gem_init(ifp);
1438 #ifdef INET
1439 if (ifa->ifa_addr->sa_family == AF_INET)
1440 arp_ifinit(&sc->sc_arpcom, ifa);
1441 #endif
1442 break;
1443
1444 case SIOCSIFFLAGS:
1445 if (ifp->if_flags & IFF_UP) {
1446 if ((ifp->if_flags & IFF_RUNNING) &&
1447 ((ifp->if_flags ^ sc->sc_if_flags) &
1448 (IFF_ALLMULTI | IFF_PROMISC)) != 0)
1449 gem_setladrf(sc);
1450 else {
1451 if ((ifp->if_flags & IFF_RUNNING) == 0)
1452 gem_init(ifp);
1453 }
1454 } else {
1455 if (ifp->if_flags & IFF_RUNNING)
1456 gem_stop(ifp, 1);
1457 }
1458 sc->sc_if_flags = ifp->if_flags;
1459
1460 #ifdef GEM_DEBUG
1461 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
1462 #endif
1463 break;
1464
1465 case SIOCSIFMTU:
1466 if (ifr->ifr_mtu > ETHERMTU || ifr->ifr_mtu < ETHERMIN) {
1467 error = EINVAL;
1468 } else if (ifp->if_mtu != ifr->ifr_mtu) {
1469 ifp->if_mtu = ifr->ifr_mtu;
1470 }
1471 break;
1472
1473 case SIOCADDMULTI:
1474 case SIOCDELMULTI:
1475 error = (cmd == SIOCADDMULTI) ?
1476 ether_addmulti(ifr, &sc->sc_arpcom) :
1477 ether_delmulti(ifr, &sc->sc_arpcom);
1478
1479 if (error == ENETRESET) {
1480
1481
1482
1483
1484 if (ifp->if_flags & IFF_RUNNING)
1485 gem_setladrf(sc);
1486 error = 0;
1487 }
1488 break;
1489
1490 case SIOCGIFMEDIA:
1491 case SIOCSIFMEDIA:
1492 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1493 break;
1494
1495 default:
1496 error = EINVAL;
1497 break;
1498 }
1499
1500 splx(s);
1501 return (error);
1502 }
1503
1504
1505 void
1506 gem_shutdown(void *arg)
1507 {
1508 struct gem_softc *sc = (struct gem_softc *)arg;
1509 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1510
1511 gem_stop(ifp, 1);
1512 }
1513
1514
1515
1516
1517 void
1518 gem_setladrf(struct gem_softc *sc)
1519 {
1520 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1521 struct ether_multi *enm;
1522 struct ether_multistep step;
1523 struct arpcom *ac = &sc->sc_arpcom;
1524 bus_space_tag_t t = sc->sc_bustag;
1525 bus_space_handle_t h = sc->sc_h1;
1526 u_int32_t crc, hash[16], v;
1527 int i;
1528
1529
1530 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
1531
1532
1533
1534
1535
1536
1537
1538 v &= ~(GEM_MAC_RX_PROMISCUOUS|GEM_MAC_RX_HASH_FILTER|
1539 GEM_MAC_RX_PROMISC_GRP);
1540
1541 if ((ifp->if_flags & IFF_PROMISC) != 0) {
1542
1543 v |= GEM_MAC_RX_PROMISCUOUS;
1544 ifp->if_flags |= IFF_ALLMULTI;
1545 goto chipit;
1546 }
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557 for (i = 0; i < 16; i++)
1558 hash[i] = 0;
1559
1560
1561 ETHER_FIRST_MULTI(step, ac, enm);
1562 while (enm != NULL) {
1563 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573 ifp->if_flags |= IFF_ALLMULTI;
1574 v |= GEM_MAC_RX_PROMISC_GRP;
1575 goto chipit;
1576 }
1577
1578 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
1579
1580
1581 crc >>= 24;
1582
1583
1584 hash[crc >> 4] |= 1 << (15 - (crc & 15));
1585
1586 ETHER_NEXT_MULTI(step, enm);
1587 }
1588
1589 v |= GEM_MAC_RX_HASH_FILTER;
1590 ifp->if_flags &= ~IFF_ALLMULTI;
1591
1592
1593 for (i = 0; i < 16; i++) {
1594 bus_space_write_4(t, h,
1595 GEM_MAC_HASH0 + i * (GEM_MAC_HASH1-GEM_MAC_HASH0),
1596 hash[i]);
1597 }
1598
1599 chipit:
1600 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v);
1601 }
1602
1603 int
1604 gem_encap(struct gem_softc *sc, struct mbuf *mhead, u_int32_t *bixp)
1605 {
1606 u_int64_t flags;
1607 u_int32_t cur, frag, i;
1608 bus_dmamap_t map;
1609
1610 cur = frag = *bixp;
1611 map = sc->sc_txd[cur].sd_map;
1612
1613 if (bus_dmamap_load_mbuf(sc->sc_dmatag, map, mhead,
1614 BUS_DMA_NOWAIT) != 0) {
1615 return (ENOBUFS);
1616 }
1617
1618 if ((sc->sc_tx_cnt + map->dm_nsegs) > (GEM_NTXDESC - 2)) {
1619 bus_dmamap_unload(sc->sc_dmatag, map);
1620 return (ENOBUFS);
1621 }
1622
1623 bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize,
1624 BUS_DMASYNC_PREWRITE);
1625
1626 for (i = 0; i < map->dm_nsegs; i++) {
1627 sc->sc_txdescs[frag].gd_addr =
1628 GEM_DMA_WRITE(sc, map->dm_segs[i].ds_addr);
1629 flags = (map->dm_segs[i].ds_len & GEM_TD_BUFSIZE) |
1630 (i == 0 ? GEM_TD_START_OF_PACKET : 0) |
1631 ((i == (map->dm_nsegs - 1)) ? GEM_TD_END_OF_PACKET : 0);
1632 sc->sc_txdescs[frag].gd_flags = GEM_DMA_WRITE(sc, flags);
1633 bus_dmamap_sync(sc->sc_dmatag, sc->sc_cddmamap,
1634 GEM_CDTXOFF(frag), sizeof(struct gem_desc),
1635 BUS_DMASYNC_PREWRITE);
1636 cur = frag;
1637 if (++frag == GEM_NTXDESC)
1638 frag = 0;
1639 }
1640
1641 sc->sc_tx_cnt += map->dm_nsegs;
1642 sc->sc_txd[*bixp].sd_map = sc->sc_txd[cur].sd_map;
1643 sc->sc_txd[cur].sd_map = map;
1644 sc->sc_txd[cur].sd_mbuf = mhead;
1645
1646 bus_space_write_4(sc->sc_bustag, sc->sc_h1, GEM_TX_KICK, frag);
1647
1648 *bixp = frag;
1649
1650
1651
1652 return (0);
1653 }
1654
1655
1656
1657
1658 int
1659 gem_tint(struct gem_softc *sc, u_int32_t status)
1660 {
1661 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1662 struct gem_sxd *sd;
1663 u_int32_t cons, hwcons;
1664
1665 hwcons = status >> 19;
1666 cons = sc->sc_tx_cons;
1667 while (cons != hwcons) {
1668 sd = &sc->sc_txd[cons];
1669 if (sd->sd_mbuf != NULL) {
1670 bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0,
1671 sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1672 bus_dmamap_unload(sc->sc_dmatag, sd->sd_map);
1673 m_freem(sd->sd_mbuf);
1674 sd->sd_mbuf = NULL;
1675 }
1676 sc->sc_tx_cnt--;
1677 ifp->if_opackets++;
1678 if (++cons == GEM_NTXDESC)
1679 cons = 0;
1680 }
1681 sc->sc_tx_cons = cons;
1682
1683 gem_start(ifp);
1684
1685 if (sc->sc_tx_cnt == 0)
1686 ifp->if_timer = 0;
1687
1688 return (1);
1689 }
1690
1691 void
1692 gem_start(struct ifnet *ifp)
1693 {
1694 struct gem_softc *sc = ifp->if_softc;
1695 struct mbuf *m;
1696 u_int32_t bix;
1697
1698 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1699 return;
1700
1701 bix = sc->sc_tx_prod;
1702 while (sc->sc_txd[bix].sd_mbuf == NULL) {
1703 IFQ_POLL(&ifp->if_snd, m);
1704 if (m == NULL)
1705 break;
1706
1707 #if NBPFILTER > 0
1708
1709
1710
1711
1712 if (ifp->if_bpf)
1713 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1714 #endif
1715
1716
1717
1718
1719
1720 if (gem_encap(sc, m, &bix)) {
1721 ifp->if_timer = 2;
1722 break;
1723 }
1724
1725 IFQ_DEQUEUE(&ifp->if_snd, m);
1726 ifp->if_timer = 5;
1727 }
1728
1729 sc->sc_tx_prod = bix;
1730 }