This source file includes following definitions.
- sk_win_read_4
- sk_win_read_2
- sk_win_read_1
- sk_win_write_4
- sk_win_write_2
- sk_win_write_1
- sk_xmac_miibus_readreg
- sk_xmac_miibus_writereg
- sk_xmac_miibus_statchg
- sk_marv_miibus_readreg
- sk_marv_miibus_writereg
- sk_marv_miibus_statchg
- sk_xmac_hash
- sk_yukon_hash
- sk_setfilt
- sk_setmulti
- sk_setpromisc
- sk_init_rx_ring
- sk_init_tx_ring
- sk_newbuf
- sk_alloc_jumbo_mem
- sk_jalloc
- sk_jfree
- sk_ifmedia_upd
- sk_ifmedia_sts
- sk_ioctl
- skc_probe
- sk_reset
- sk_probe
- sk_attach
- skcprint
- skc_attach
- sk_encap
- sk_start
- sk_watchdog
- skc_shutdown
- sk_rxvalid
- sk_rxeof
- sk_rxcsum
- sk_txeof
- sk_tick
- sk_yukon_tick
- sk_intr_bcom
- sk_intr_xmac
- sk_intr_yukon
- sk_intr
- sk_init_xmac
- sk_init_yukon
- sk_init
- sk_stop
- sk_dump_txdesc
- sk_dump_bytes
- sk_dump_mbuf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89 #include "bpfilter.h"
90
91 #include <sys/param.h>
92 #include <sys/systm.h>
93 #include <sys/sockio.h>
94 #include <sys/mbuf.h>
95 #include <sys/malloc.h>
96 #include <sys/kernel.h>
97 #include <sys/socket.h>
98 #include <sys/timeout.h>
99 #include <sys/device.h>
100 #include <sys/queue.h>
101
102 #include <net/if.h>
103 #include <net/if_dl.h>
104 #include <net/if_types.h>
105
106 #ifdef INET
107 #include <netinet/in.h>
108 #include <netinet/in_systm.h>
109 #include <netinet/in_var.h>
110 #include <netinet/ip.h>
111 #include <netinet/udp.h>
112 #include <netinet/tcp.h>
113 #include <netinet/if_ether.h>
114 #endif
115
116 #include <net/if_media.h>
117 #include <net/if_vlan_var.h>
118
119 #if NBPFILTER > 0
120 #include <net/bpf.h>
121 #endif
122
123 #include <dev/mii/mii.h>
124 #include <dev/mii/miivar.h>
125 #include <dev/mii/brgphyreg.h>
126
127 #include <dev/pci/pcireg.h>
128 #include <dev/pci/pcivar.h>
129 #include <dev/pci/pcidevs.h>
130
131 #include <dev/pci/if_skreg.h>
132 #include <dev/pci/if_skvar.h>
133
134 int skc_probe(struct device *, void *, void *);
135 void skc_attach(struct device *, struct device *self, void *aux);
136 void skc_shutdown(void *);
137 int sk_probe(struct device *, void *, void *);
138 void sk_attach(struct device *, struct device *self, void *aux);
139 int skcprint(void *, const char *);
140 int sk_intr(void *);
141 void sk_intr_bcom(struct sk_if_softc *);
142 void sk_intr_xmac(struct sk_if_softc *);
143 void sk_intr_yukon(struct sk_if_softc *);
144 static __inline int sk_rxvalid(struct sk_softc *, u_int32_t, u_int32_t);
145 void sk_rxeof(struct sk_if_softc *);
146 void sk_txeof(struct sk_if_softc *);
147 int sk_encap(struct sk_if_softc *, struct mbuf *, u_int32_t *);
148 void sk_start(struct ifnet *);
149 int sk_ioctl(struct ifnet *, u_long, caddr_t);
150 void sk_init(void *);
151 void sk_init_xmac(struct sk_if_softc *);
152 void sk_init_yukon(struct sk_if_softc *);
153 void sk_stop(struct sk_if_softc *);
154 void sk_watchdog(struct ifnet *);
155 int sk_ifmedia_upd(struct ifnet *);
156 void sk_ifmedia_sts(struct ifnet *, struct ifmediareq *);
157 void sk_reset(struct sk_softc *);
158 int sk_newbuf(struct sk_if_softc *, int, struct mbuf *, bus_dmamap_t);
159 int sk_alloc_jumbo_mem(struct sk_if_softc *);
160 void *sk_jalloc(struct sk_if_softc *);
161 void sk_jfree(caddr_t, u_int, void *);
162 int sk_init_rx_ring(struct sk_if_softc *);
163 int sk_init_tx_ring(struct sk_if_softc *);
164
165 int sk_xmac_miibus_readreg(struct device *, int, int);
166 void sk_xmac_miibus_writereg(struct device *, int, int, int);
167 void sk_xmac_miibus_statchg(struct device *);
168
169 int sk_marv_miibus_readreg(struct device *, int, int);
170 void sk_marv_miibus_writereg(struct device *, int, int, int);
171 void sk_marv_miibus_statchg(struct device *);
172
173 u_int32_t sk_xmac_hash(caddr_t);
174 u_int32_t sk_yukon_hash(caddr_t);
175 void sk_setfilt(struct sk_if_softc *, caddr_t, int);
176 void sk_setmulti(struct sk_if_softc *);
177 void sk_setpromisc(struct sk_if_softc *);
178 void sk_tick(void *);
179 void sk_yukon_tick(void *);
180 void sk_rxcsum(struct ifnet *, struct mbuf *, const u_int16_t, const u_int16_t);
181
182 #ifdef SK_DEBUG
183 #define DPRINTF(x) if (skdebug) printf x
184 #define DPRINTFN(n,x) if (skdebug >= (n)) printf x
185 int skdebug = 0;
186
187 void sk_dump_txdesc(struct sk_tx_desc *, int);
188 void sk_dump_mbuf(struct mbuf *);
189 void sk_dump_bytes(const char *, int);
190 #else
191 #define DPRINTF(x)
192 #define DPRINTFN(n,x)
193 #endif
194
195
196 const struct pci_matchid skc_devices[] = {
197 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C940 },
198 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C940B },
199 { PCI_VENDOR_CNET, PCI_PRODUCT_CNET_GIGACARD },
200 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE530T_A1 },
201 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE530T_B1 },
202 { PCI_VENDOR_LINKSYS, PCI_PRODUCT_LINKSYS_EG1064 },
203 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON },
204 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_BELKIN },
205 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK98XX },
206 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK98XX2 },
207 };
208
209 #define SK_LINKSYS_EG1032_SUBID 0x00151737
210
211 static inline u_int32_t
212 sk_win_read_4(struct sk_softc *sc, u_int32_t reg)
213 {
214 return CSR_READ_4(sc, reg);
215 }
216
217 static inline u_int16_t
218 sk_win_read_2(struct sk_softc *sc, u_int32_t reg)
219 {
220 return CSR_READ_2(sc, reg);
221 }
222
223 static inline u_int8_t
224 sk_win_read_1(struct sk_softc *sc, u_int32_t reg)
225 {
226 return CSR_READ_1(sc, reg);
227 }
228
229 static inline void
230 sk_win_write_4(struct sk_softc *sc, u_int32_t reg, u_int32_t x)
231 {
232 CSR_WRITE_4(sc, reg, x);
233 }
234
235 static inline void
236 sk_win_write_2(struct sk_softc *sc, u_int32_t reg, u_int16_t x)
237 {
238 CSR_WRITE_2(sc, reg, x);
239 }
240
241 static inline void
242 sk_win_write_1(struct sk_softc *sc, u_int32_t reg, u_int8_t x)
243 {
244 CSR_WRITE_1(sc, reg, x);
245 }
246
247 int
248 sk_xmac_miibus_readreg(struct device *dev, int phy, int reg)
249 {
250 struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
251 int i;
252
253 DPRINTFN(9, ("sk_xmac_miibus_readreg\n"));
254
255 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0)
256 return (0);
257
258 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
259 SK_XM_READ_2(sc_if, XM_PHY_DATA);
260 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
261 for (i = 0; i < SK_TIMEOUT; i++) {
262 DELAY(1);
263 if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
264 XM_MMUCMD_PHYDATARDY)
265 break;
266 }
267
268 if (i == SK_TIMEOUT) {
269 printf("%s: phy failed to come ready\n",
270 sc_if->sk_dev.dv_xname);
271 return (0);
272 }
273 }
274 DELAY(1);
275 return (SK_XM_READ_2(sc_if, XM_PHY_DATA));
276 }
277
278 void
279 sk_xmac_miibus_writereg(struct device *dev, int phy, int reg, int val)
280 {
281 struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
282 int i;
283
284 DPRINTFN(9, ("sk_xmac_miibus_writereg\n"));
285
286 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
287 for (i = 0; i < SK_TIMEOUT; i++) {
288 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
289 break;
290 }
291
292 if (i == SK_TIMEOUT) {
293 printf("%s: phy failed to come ready\n",
294 sc_if->sk_dev.dv_xname);
295 return;
296 }
297
298 SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
299 for (i = 0; i < SK_TIMEOUT; i++) {
300 DELAY(1);
301 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
302 break;
303 }
304
305 if (i == SK_TIMEOUT)
306 printf("%s: phy write timed out\n", sc_if->sk_dev.dv_xname);
307 }
308
309 void
310 sk_xmac_miibus_statchg(struct device *dev)
311 {
312 struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
313 struct mii_data *mii = &sc_if->sk_mii;
314
315 DPRINTFN(9, ("sk_xmac_miibus_statchg\n"));
316
317
318
319
320
321 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
322 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
323 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
324 else
325 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
326 }
327 }
328
329 int
330 sk_marv_miibus_readreg(struct device *dev, int phy, int reg)
331 {
332 struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
333 u_int16_t val;
334 int i;
335
336 if (phy != 0 ||
337 (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER &&
338 sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER)) {
339 DPRINTFN(9, ("sk_marv_miibus_readreg (skip) phy=%d, reg=%#x\n",
340 phy, reg));
341 return (0);
342 }
343
344 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
345 YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ);
346
347 for (i = 0; i < SK_TIMEOUT; i++) {
348 DELAY(1);
349 val = SK_YU_READ_2(sc_if, YUKON_SMICR);
350 if (val & YU_SMICR_READ_VALID)
351 break;
352 }
353
354 if (i == SK_TIMEOUT) {
355 printf("%s: phy failed to come ready\n",
356 sc_if->sk_dev.dv_xname);
357 return (0);
358 }
359
360 DPRINTFN(9, ("sk_marv_miibus_readreg: i=%d, timeout=%d\n", i,
361 SK_TIMEOUT));
362
363 val = SK_YU_READ_2(sc_if, YUKON_SMIDR);
364
365 DPRINTFN(9, ("sk_marv_miibus_readreg phy=%d, reg=%#x, val=%#x\n",
366 phy, reg, val));
367
368 return (val);
369 }
370
371 void
372 sk_marv_miibus_writereg(struct device *dev, int phy, int reg, int val)
373 {
374 struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
375 int i;
376
377 DPRINTFN(9, ("sk_marv_miibus_writereg phy=%d reg=%#x val=%#x\n",
378 phy, reg, val));
379
380 SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val);
381 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
382 YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE);
383
384 for (i = 0; i < SK_TIMEOUT; i++) {
385 DELAY(1);
386 if (!(SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY))
387 break;
388 }
389
390 if (i == SK_TIMEOUT)
391 printf("%s: phy write timed out\n", sc_if->sk_dev.dv_xname);
392 }
393
394 void
395 sk_marv_miibus_statchg(struct device *dev)
396 {
397 DPRINTFN(9, ("sk_marv_miibus_statchg: gpcr=%x\n",
398 SK_YU_READ_2(((struct sk_if_softc *)dev), YUKON_GPCR)));
399 }
400
401 u_int32_t
402 sk_xmac_hash(caddr_t addr)
403 {
404 u_int32_t crc;
405
406 crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
407 return (~crc & ((1 << SK_HASH_BITS) - 1));
408 }
409
410 u_int32_t
411 sk_yukon_hash(caddr_t addr)
412 {
413 u_int32_t crc;
414
415 crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
416 return (crc & ((1 << SK_HASH_BITS) - 1));
417 }
418
419 void
420 sk_setfilt(struct sk_if_softc *sc_if, caddr_t addr, int slot)
421 {
422 int base = XM_RXFILT_ENTRY(slot);
423
424 SK_XM_WRITE_2(sc_if, base, letoh16(*(u_int16_t *)(&addr[0])));
425 SK_XM_WRITE_2(sc_if, base + 2, letoh16(*(u_int16_t *)(&addr[2])));
426 SK_XM_WRITE_2(sc_if, base + 4, letoh16(*(u_int16_t *)(&addr[4])));
427 }
428
429 void
430 sk_setmulti(struct sk_if_softc *sc_if)
431 {
432 struct sk_softc *sc = sc_if->sk_softc;
433 struct ifnet *ifp= &sc_if->arpcom.ac_if;
434 u_int32_t hashes[2] = { 0, 0 };
435 int h, i;
436 struct arpcom *ac = &sc_if->arpcom;
437 struct ether_multi *enm;
438 struct ether_multistep step;
439 u_int8_t dummy[] = { 0, 0, 0, 0, 0 ,0 };
440
441
442 switch(sc->sk_type) {
443 case SK_GENESIS:
444 for (i = 1; i < XM_RXFILT_MAX; i++)
445 sk_setfilt(sc_if, (caddr_t)&dummy, i);
446
447 SK_XM_WRITE_4(sc_if, XM_MAR0, 0);
448 SK_XM_WRITE_4(sc_if, XM_MAR2, 0);
449 break;
450 case SK_YUKON:
451 case SK_YUKON_LITE:
452 case SK_YUKON_LP:
453 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0);
454 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0);
455 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0);
456 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0);
457 break;
458 }
459
460
461 allmulti:
462 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
463 hashes[0] = 0xFFFFFFFF;
464 hashes[1] = 0xFFFFFFFF;
465 } else {
466 i = 1;
467
468 ETHER_FIRST_MULTI(step, ac, enm);
469 while (enm != NULL) {
470 if (bcmp(enm->enm_addrlo, enm->enm_addrhi,
471 ETHER_ADDR_LEN)) {
472 ifp->if_flags |= IFF_ALLMULTI;
473 goto allmulti;
474 }
475
476
477
478
479
480 if (SK_IS_GENESIS(sc) && i < XM_RXFILT_MAX) {
481 sk_setfilt(sc_if, enm->enm_addrlo, i);
482 i++;
483 }
484 else {
485 switch(sc->sk_type) {
486 case SK_GENESIS:
487 h = sk_xmac_hash(enm->enm_addrlo);
488 break;
489
490 case SK_YUKON:
491 case SK_YUKON_LITE:
492 case SK_YUKON_LP:
493 h = sk_yukon_hash(enm->enm_addrlo);
494 break;
495 }
496 if (h < 32)
497 hashes[0] |= (1 << h);
498 else
499 hashes[1] |= (1 << (h - 32));
500 }
501
502 ETHER_NEXT_MULTI(step, enm);
503 }
504 }
505
506 switch(sc->sk_type) {
507 case SK_GENESIS:
508 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH|
509 XM_MODE_RX_USE_PERFECT);
510 SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
511 SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
512 break;
513 case SK_YUKON:
514 case SK_YUKON_LITE:
515 case SK_YUKON_LP:
516 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff);
517 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff);
518 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff);
519 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff);
520 break;
521 }
522 }
523
524 void
525 sk_setpromisc(struct sk_if_softc *sc_if)
526 {
527 struct sk_softc *sc = sc_if->sk_softc;
528 struct ifnet *ifp= &sc_if->arpcom.ac_if;
529
530 switch(sc->sk_type) {
531 case SK_GENESIS:
532 if (ifp->if_flags & IFF_PROMISC)
533 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
534 else
535 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
536 break;
537 case SK_YUKON:
538 case SK_YUKON_LITE:
539 case SK_YUKON_LP:
540 if (ifp->if_flags & IFF_PROMISC) {
541 SK_YU_CLRBIT_2(sc_if, YUKON_RCR,
542 YU_RCR_UFLEN | YU_RCR_MUFLEN);
543 } else {
544 SK_YU_SETBIT_2(sc_if, YUKON_RCR,
545 YU_RCR_UFLEN | YU_RCR_MUFLEN);
546 }
547 break;
548 }
549 }
550
551 int
552 sk_init_rx_ring(struct sk_if_softc *sc_if)
553 {
554 struct sk_chain_data *cd = &sc_if->sk_cdata;
555 struct sk_ring_data *rd = sc_if->sk_rdata;
556 int i, nexti;
557
558 bzero((char *)rd->sk_rx_ring,
559 sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
560
561 for (i = 0; i < SK_RX_RING_CNT; i++) {
562 cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i];
563 if (i == (SK_RX_RING_CNT - 1))
564 nexti = 0;
565 else
566 nexti = i + 1;
567 cd->sk_rx_chain[i].sk_next = &cd->sk_rx_chain[nexti];
568 rd->sk_rx_ring[i].sk_next = htole32(SK_RX_RING_ADDR(sc_if, nexti));
569 rd->sk_rx_ring[i].sk_csum1_start = htole16(ETHER_HDR_LEN);
570 rd->sk_rx_ring[i].sk_csum2_start = htole16(ETHER_HDR_LEN +
571 sizeof(struct ip));
572 }
573
574 for (i = 0; i < SK_RX_RING_CNT; i++) {
575 if (sk_newbuf(sc_if, i, NULL,
576 sc_if->sk_cdata.sk_rx_jumbo_map) == ENOBUFS) {
577 printf("%s: failed alloc of %dth mbuf\n",
578 sc_if->sk_dev.dv_xname, i);
579 return (ENOBUFS);
580 }
581 }
582
583 sc_if->sk_cdata.sk_rx_prod = 0;
584 sc_if->sk_cdata.sk_rx_cons = 0;
585
586 return (0);
587 }
588
589 int
590 sk_init_tx_ring(struct sk_if_softc *sc_if)
591 {
592 struct sk_softc *sc = sc_if->sk_softc;
593 struct sk_chain_data *cd = &sc_if->sk_cdata;
594 struct sk_ring_data *rd = sc_if->sk_rdata;
595 bus_dmamap_t dmamap;
596 struct sk_txmap_entry *entry;
597 int i, nexti;
598
599 bzero((char *)sc_if->sk_rdata->sk_tx_ring,
600 sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
601
602 SIMPLEQ_INIT(&sc_if->sk_txmap_head);
603 for (i = 0; i < SK_TX_RING_CNT; i++) {
604 cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i];
605 if (i == (SK_TX_RING_CNT - 1))
606 nexti = 0;
607 else
608 nexti = i + 1;
609 cd->sk_tx_chain[i].sk_next = &cd->sk_tx_chain[nexti];
610 rd->sk_tx_ring[i].sk_next = htole32(SK_TX_RING_ADDR(sc_if, nexti));
611
612 if (bus_dmamap_create(sc->sc_dmatag, SK_JLEN, SK_NTXSEG,
613 SK_JLEN, 0, BUS_DMA_NOWAIT, &dmamap))
614 return (ENOBUFS);
615
616 entry = malloc(sizeof(*entry), M_DEVBUF, M_NOWAIT);
617 if (!entry) {
618 bus_dmamap_destroy(sc->sc_dmatag, dmamap);
619 return (ENOBUFS);
620 }
621 entry->dmamap = dmamap;
622 SIMPLEQ_INSERT_HEAD(&sc_if->sk_txmap_head, entry, link);
623 }
624
625 sc_if->sk_cdata.sk_tx_prod = 0;
626 sc_if->sk_cdata.sk_tx_cons = 0;
627 sc_if->sk_cdata.sk_tx_cnt = 0;
628
629 SK_CDTXSYNC(sc_if, 0, SK_TX_RING_CNT,
630 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
631
632 return (0);
633 }
634
635 int
636 sk_newbuf(struct sk_if_softc *sc_if, int i, struct mbuf *m,
637 bus_dmamap_t dmamap)
638 {
639 struct mbuf *m_new = NULL;
640 struct sk_chain *c;
641 struct sk_rx_desc *r;
642
643 if (m == NULL) {
644 caddr_t buf = NULL;
645
646 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
647 if (m_new == NULL)
648 return (ENOBUFS);
649
650
651 buf = sk_jalloc(sc_if);
652 if (buf == NULL) {
653 m_freem(m_new);
654 DPRINTFN(1, ("%s jumbo allocation failed -- packet "
655 "dropped!\n", sc_if->arpcom.ac_if.if_xname));
656 return (ENOBUFS);
657 }
658
659
660 m_new->m_len = m_new->m_pkthdr.len = SK_JLEN;
661 MEXTADD(m_new, buf, SK_JLEN, 0, sk_jfree, sc_if);
662 } else {
663
664
665
666
667
668 m_new = m;
669 m_new->m_len = m_new->m_pkthdr.len = SK_JLEN;
670 m_new->m_data = m_new->m_ext.ext_buf;
671 }
672 m_adj(m_new, ETHER_ALIGN);
673
674 c = &sc_if->sk_cdata.sk_rx_chain[i];
675 r = c->sk_desc;
676 c->sk_mbuf = m_new;
677 r->sk_data_lo = htole32(dmamap->dm_segs[0].ds_addr +
678 (((vaddr_t)m_new->m_data
679 - (vaddr_t)sc_if->sk_cdata.sk_jumbo_buf)));
680 r->sk_ctl = htole32(SK_JLEN | SK_RXSTAT);
681
682 SK_CDRXSYNC(sc_if, i, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
683
684 return (0);
685 }
686
687
688
689
690
691 int
692 sk_alloc_jumbo_mem(struct sk_if_softc *sc_if)
693 {
694 struct sk_softc *sc = sc_if->sk_softc;
695 caddr_t ptr, kva;
696 bus_dma_segment_t seg;
697 int i, rseg, state, error;
698 struct sk_jpool_entry *entry;
699
700 state = error = 0;
701
702
703 if (bus_dmamem_alloc(sc->sc_dmatag, SK_JMEM, PAGE_SIZE, 0,
704 &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
705 printf(": can't alloc rx buffers");
706 return (ENOBUFS);
707 }
708
709 state = 1;
710 if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg, SK_JMEM, &kva,
711 BUS_DMA_NOWAIT)) {
712 printf(": can't map dma buffers (%d bytes)", SK_JMEM);
713 error = ENOBUFS;
714 goto out;
715 }
716
717 state = 2;
718 if (bus_dmamap_create(sc->sc_dmatag, SK_JMEM, 1, SK_JMEM, 0,
719 BUS_DMA_NOWAIT, &sc_if->sk_cdata.sk_rx_jumbo_map)) {
720 printf(": can't create dma map");
721 error = ENOBUFS;
722 goto out;
723 }
724
725 state = 3;
726 if (bus_dmamap_load(sc->sc_dmatag, sc_if->sk_cdata.sk_rx_jumbo_map,
727 kva, SK_JMEM, NULL, BUS_DMA_NOWAIT)) {
728 printf(": can't load dma map");
729 error = ENOBUFS;
730 goto out;
731 }
732
733 state = 4;
734 sc_if->sk_cdata.sk_jumbo_buf = (caddr_t)kva;
735 DPRINTFN(1,("sk_jumbo_buf = 0x%08X\n", sc_if->sk_cdata.sk_jumbo_buf));
736
737 LIST_INIT(&sc_if->sk_jfree_listhead);
738 LIST_INIT(&sc_if->sk_jinuse_listhead);
739
740
741
742
743
744 ptr = sc_if->sk_cdata.sk_jumbo_buf;
745 for (i = 0; i < SK_JSLOTS; i++) {
746 sc_if->sk_cdata.sk_jslots[i] = ptr;
747 ptr += SK_JLEN;
748 entry = malloc(sizeof(struct sk_jpool_entry),
749 M_DEVBUF, M_NOWAIT);
750 if (entry == NULL) {
751 sc_if->sk_cdata.sk_jumbo_buf = NULL;
752 printf(": no memory for jumbo buffer queue!");
753 error = ENOBUFS;
754 goto out;
755 }
756 entry->slot = i;
757 LIST_INSERT_HEAD(&sc_if->sk_jfree_listhead,
758 entry, jpool_entries);
759 }
760 out:
761 if (error != 0) {
762 switch (state) {
763 case 4:
764 bus_dmamap_unload(sc->sc_dmatag,
765 sc_if->sk_cdata.sk_rx_jumbo_map);
766 case 3:
767 bus_dmamap_destroy(sc->sc_dmatag,
768 sc_if->sk_cdata.sk_rx_jumbo_map);
769 case 2:
770 bus_dmamem_unmap(sc->sc_dmatag, kva, SK_JMEM);
771 case 1:
772 bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
773 break;
774 default:
775 break;
776 }
777 }
778
779 return (error);
780 }
781
782
783
784
785 void *
786 sk_jalloc(struct sk_if_softc *sc_if)
787 {
788 struct sk_jpool_entry *entry;
789
790 entry = LIST_FIRST(&sc_if->sk_jfree_listhead);
791
792 if (entry == NULL)
793 return (NULL);
794
795 LIST_REMOVE(entry, jpool_entries);
796 LIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries);
797 return (sc_if->sk_cdata.sk_jslots[entry->slot]);
798 }
799
800
801
802
803 void
804 sk_jfree(caddr_t buf, u_int size, void *arg)
805 {
806 struct sk_jpool_entry *entry;
807 struct sk_if_softc *sc;
808 int i;
809
810
811 sc = (struct sk_if_softc *)arg;
812
813 if (sc == NULL)
814 panic("sk_jfree: can't find softc pointer!");
815
816
817 i = ((vaddr_t)buf
818 - (vaddr_t)sc->sk_cdata.sk_jumbo_buf) / SK_JLEN;
819
820 if ((i < 0) || (i >= SK_JSLOTS))
821 panic("sk_jfree: asked to free buffer that we don't manage!");
822
823 entry = LIST_FIRST(&sc->sk_jinuse_listhead);
824 if (entry == NULL)
825 panic("sk_jfree: buffer not in use!");
826 entry->slot = i;
827 LIST_REMOVE(entry, jpool_entries);
828 LIST_INSERT_HEAD(&sc->sk_jfree_listhead, entry, jpool_entries);
829 }
830
831
832
833
834 int
835 sk_ifmedia_upd(struct ifnet *ifp)
836 {
837 struct sk_if_softc *sc_if = ifp->if_softc;
838
839 mii_mediachg(&sc_if->sk_mii);
840 return (0);
841 }
842
843
844
845
846 void
847 sk_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
848 {
849 struct sk_if_softc *sc_if = ifp->if_softc;
850
851 mii_pollstat(&sc_if->sk_mii);
852 ifmr->ifm_active = sc_if->sk_mii.mii_media_active;
853 ifmr->ifm_status = sc_if->sk_mii.mii_media_status;
854 }
855
856 int
857 sk_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
858 {
859 struct sk_if_softc *sc_if = ifp->if_softc;
860 struct ifreq *ifr = (struct ifreq *) data;
861 struct ifaddr *ifa = (struct ifaddr *) data;
862 struct mii_data *mii;
863 int s, error = 0;
864
865 s = splnet();
866
867 if ((error = ether_ioctl(ifp, &sc_if->arpcom, command, data)) > 0) {
868 splx(s);
869 return (error);
870 }
871
872 switch(command) {
873 case SIOCSIFADDR:
874 ifp->if_flags |= IFF_UP;
875 if (!(ifp->if_flags & IFF_RUNNING))
876 sk_init(sc_if);
877 #ifdef INET
878 if (ifa->ifa_addr->sa_family == AF_INET)
879 arp_ifinit(&sc_if->arpcom, ifa);
880 #endif
881 break;
882 case SIOCSIFMTU:
883 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ifp->if_hardmtu)
884 error = EINVAL;
885 else if (ifp->if_mtu != ifr->ifr_mtu)
886 ifp->if_mtu = ifr->ifr_mtu;
887 break;
888 case SIOCSIFFLAGS:
889 if (ifp->if_flags & IFF_UP) {
890 if (ifp->if_flags & IFF_RUNNING &&
891 (ifp->if_flags ^ sc_if->sk_if_flags)
892 & IFF_PROMISC) {
893 sk_setpromisc(sc_if);
894 sk_setmulti(sc_if);
895 } else {
896 if (!(ifp->if_flags & IFF_RUNNING))
897 sk_init(sc_if);
898 }
899 } else {
900 if (ifp->if_flags & IFF_RUNNING)
901 sk_stop(sc_if);
902 }
903 sc_if->sk_if_flags = ifp->if_flags;
904 break;
905 case SIOCADDMULTI:
906 case SIOCDELMULTI:
907 error = (command == SIOCADDMULTI) ?
908 ether_addmulti(ifr, &sc_if->arpcom) :
909 ether_delmulti(ifr, &sc_if->arpcom);
910
911 if (error == ENETRESET) {
912
913
914
915
916 if (ifp->if_flags & IFF_RUNNING)
917 sk_setmulti(sc_if);
918 error = 0;
919 }
920 break;
921 case SIOCGIFMEDIA:
922 case SIOCSIFMEDIA:
923 mii = &sc_if->sk_mii;
924 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
925 break;
926 default:
927 error = ENOTTY;
928 break;
929 }
930
931 splx(s);
932
933 return (error);
934 }
935
936
937
938
939
940 int
941 skc_probe(struct device *parent, void *match, void *aux)
942 {
943 struct pci_attach_args *pa = aux;
944 pci_chipset_tag_t pc = pa->pa_pc;
945 pcireg_t subid;
946
947 subid = pci_conf_read(pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
948
949 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_LINKSYS &&
950 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_LINKSYS_EG1032 &&
951 subid == SK_LINKSYS_EG1032_SUBID)
952 return (1);
953
954 return (pci_matchbyid((struct pci_attach_args *)aux, skc_devices,
955 sizeof(skc_devices)/sizeof(skc_devices[0])));
956 }
957
958
959
960
961 void
962 sk_reset(struct sk_softc *sc)
963 {
964 u_int32_t imtimer_ticks;
965
966 DPRINTFN(2, ("sk_reset\n"));
967
968 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET);
969 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET);
970 if (SK_IS_YUKON(sc))
971 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET);
972
973 DELAY(1000);
974 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET);
975 DELAY(2);
976 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
977 if (SK_IS_YUKON(sc))
978 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR);
979
980 DPRINTFN(2, ("sk_reset: sk_csr=%x\n", CSR_READ_2(sc, SK_CSR)));
981 DPRINTFN(2, ("sk_reset: sk_link_ctrl=%x\n",
982 CSR_READ_2(sc, SK_LINK_CTRL)));
983
984 if (SK_IS_GENESIS(sc)) {
985
986 sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
987 sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
988 sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
989 sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
990 sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
991 }
992
993
994 sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
995
996
997
998
999
1000
1001
1002
1003
1004
1005 switch (sc->sk_type) {
1006 case SK_GENESIS:
1007 imtimer_ticks = SK_IMTIMER_TICKS_GENESIS;
1008 break;
1009 default:
1010 imtimer_ticks = SK_IMTIMER_TICKS_YUKON;
1011 }
1012 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(100));
1013 sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
1014 SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
1015 sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
1016 }
1017
1018 int
1019 sk_probe(struct device *parent, void *match, void *aux)
1020 {
1021 struct skc_attach_args *sa = aux;
1022
1023 if (sa->skc_port != SK_PORT_A && sa->skc_port != SK_PORT_B)
1024 return (0);
1025
1026 switch (sa->skc_type) {
1027 case SK_GENESIS:
1028 case SK_YUKON:
1029 case SK_YUKON_LITE:
1030 case SK_YUKON_LP:
1031 return (1);
1032 }
1033
1034 return (0);
1035 }
1036
1037
1038
1039
1040
1041 void
1042 sk_attach(struct device *parent, struct device *self, void *aux)
1043 {
1044 struct sk_if_softc *sc_if = (struct sk_if_softc *) self;
1045 struct sk_softc *sc = (struct sk_softc *)parent;
1046 struct skc_attach_args *sa = aux;
1047 struct ifnet *ifp;
1048 caddr_t kva;
1049 bus_dma_segment_t seg;
1050 int i, rseg;
1051
1052 sc_if->sk_port = sa->skc_port;
1053 sc_if->sk_softc = sc;
1054 sc->sk_if[sa->skc_port] = sc_if;
1055
1056 if (sa->skc_port == SK_PORT_A)
1057 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
1058 if (sa->skc_port == SK_PORT_B)
1059 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
1060
1061 DPRINTFN(2, ("begin sk_attach: port=%d\n", sc_if->sk_port));
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072 for (i = 0; i < ETHER_ADDR_LEN; i++)
1073 sc_if->arpcom.ac_enaddr[i] =
1074 sk_win_read_1(sc, SK_MAC0_0 + (sa->skc_port * 8) + i);
1075
1076 printf(": address %s\n",
1077 ether_sprintf(sc_if->arpcom.ac_enaddr));
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087 if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
1088 u_int32_t chunk, val;
1089
1090 chunk = sc->sk_ramsize / 2;
1091 val = sc->sk_rboff / sizeof(u_int64_t);
1092 sc_if->sk_rx_ramstart = val;
1093 val += (chunk / sizeof(u_int64_t));
1094 sc_if->sk_rx_ramend = val - 1;
1095 sc_if->sk_tx_ramstart = val;
1096 val += (chunk / sizeof(u_int64_t));
1097 sc_if->sk_tx_ramend = val - 1;
1098 } else {
1099 u_int32_t chunk, val;
1100
1101 chunk = sc->sk_ramsize / 4;
1102 val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
1103 sizeof(u_int64_t);
1104 sc_if->sk_rx_ramstart = val;
1105 val += (chunk / sizeof(u_int64_t));
1106 sc_if->sk_rx_ramend = val - 1;
1107 sc_if->sk_tx_ramstart = val;
1108 val += (chunk / sizeof(u_int64_t));
1109 sc_if->sk_tx_ramend = val - 1;
1110 }
1111
1112 DPRINTFN(2, ("sk_attach: rx_ramstart=%#x rx_ramend=%#x\n"
1113 " tx_ramstart=%#x tx_ramend=%#x\n",
1114 sc_if->sk_rx_ramstart, sc_if->sk_rx_ramend,
1115 sc_if->sk_tx_ramstart, sc_if->sk_tx_ramend));
1116
1117
1118 sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
1119
1120
1121 if (SK_IS_GENESIS(sc)) {
1122 switch (sc_if->sk_phytype) {
1123 case SK_PHYTYPE_XMAC:
1124 sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
1125 break;
1126 case SK_PHYTYPE_BCOM:
1127 sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
1128 break;
1129 default:
1130 printf("%s: unsupported PHY type: %d\n",
1131 sc->sk_dev.dv_xname, sc_if->sk_phytype);
1132 return;
1133 }
1134 }
1135
1136 if (SK_IS_YUKON(sc)) {
1137 if ((sc_if->sk_phytype < SK_PHYTYPE_MARV_COPPER &&
1138 sc->sk_pmd != 'L' && sc->sk_pmd != 'S')) {
1139
1140 sc_if->sk_phytype = SK_PHYTYPE_MARV_COPPER;
1141
1142 sc->sk_coppertype = 1;
1143 }
1144
1145 sc_if->sk_phyaddr = SK_PHYADDR_MARV;
1146
1147 if (!(sc->sk_coppertype))
1148 sc_if->sk_phytype = SK_PHYTYPE_MARV_FIBER;
1149 }
1150
1151
1152 if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct sk_ring_data),
1153 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
1154 printf(": can't alloc rx buffers\n");
1155 goto fail;
1156 }
1157 if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg,
1158 sizeof(struct sk_ring_data), &kva, BUS_DMA_NOWAIT)) {
1159 printf(": can't map dma buffers (%lu bytes)\n",
1160 (ulong)sizeof(struct sk_ring_data));
1161 goto fail_1;
1162 }
1163 if (bus_dmamap_create(sc->sc_dmatag, sizeof(struct sk_ring_data), 1,
1164 sizeof(struct sk_ring_data), 0, BUS_DMA_NOWAIT,
1165 &sc_if->sk_ring_map)) {
1166 printf(": can't create dma map\n");
1167 goto fail_2;
1168 }
1169 if (bus_dmamap_load(sc->sc_dmatag, sc_if->sk_ring_map, kva,
1170 sizeof(struct sk_ring_data), NULL, BUS_DMA_NOWAIT)) {
1171 printf(": can't load dma map\n");
1172 goto fail_3;
1173 }
1174 sc_if->sk_rdata = (struct sk_ring_data *)kva;
1175 bzero(sc_if->sk_rdata, sizeof(struct sk_ring_data));
1176
1177
1178 if (sk_alloc_jumbo_mem(sc_if)) {
1179 printf(": jumbo buffer allocation failed\n");
1180 goto fail_3;
1181 }
1182
1183 ifp = &sc_if->arpcom.ac_if;
1184 ifp->if_softc = sc_if;
1185 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1186 ifp->if_ioctl = sk_ioctl;
1187 ifp->if_start = sk_start;
1188 ifp->if_watchdog = sk_watchdog;
1189 ifp->if_baudrate = 1000000000;
1190 ifp->if_hardmtu = SK_JUMBO_MTU;
1191 IFQ_SET_MAXLEN(&ifp->if_snd, SK_TX_RING_CNT - 1);
1192 IFQ_SET_READY(&ifp->if_snd);
1193 bcopy(sc_if->sk_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1194
1195 ifp->if_capabilities = IFCAP_VLAN_MTU;
1196
1197
1198
1199
1200 switch (sc->sk_type) {
1201 case SK_GENESIS:
1202 sk_init_xmac(sc_if);
1203 break;
1204 case SK_YUKON:
1205 case SK_YUKON_LITE:
1206 case SK_YUKON_LP:
1207 sk_init_yukon(sc_if);
1208 break;
1209 default:
1210 printf(": unknown device type %d\n", sc->sk_type);
1211
1212 goto fail_3;
1213 }
1214
1215 DPRINTFN(2, ("sk_attach: 1\n"));
1216
1217 sc_if->sk_mii.mii_ifp = ifp;
1218 if (SK_IS_GENESIS(sc)) {
1219 sc_if->sk_mii.mii_readreg = sk_xmac_miibus_readreg;
1220 sc_if->sk_mii.mii_writereg = sk_xmac_miibus_writereg;
1221 sc_if->sk_mii.mii_statchg = sk_xmac_miibus_statchg;
1222 } else {
1223 sc_if->sk_mii.mii_readreg = sk_marv_miibus_readreg;
1224 sc_if->sk_mii.mii_writereg = sk_marv_miibus_writereg;
1225 sc_if->sk_mii.mii_statchg = sk_marv_miibus_statchg;
1226 }
1227
1228 ifmedia_init(&sc_if->sk_mii.mii_media, 0,
1229 sk_ifmedia_upd, sk_ifmedia_sts);
1230 if (SK_IS_GENESIS(sc)) {
1231 mii_attach(self, &sc_if->sk_mii, 0xffffffff, MII_PHY_ANY,
1232 MII_OFFSET_ANY, 0);
1233 } else {
1234 mii_attach(self, &sc_if->sk_mii, 0xffffffff, MII_PHY_ANY,
1235 MII_OFFSET_ANY, MIIF_DOPAUSE);
1236 }
1237 if (LIST_FIRST(&sc_if->sk_mii.mii_phys) == NULL) {
1238 printf("%s: no PHY found!\n", sc_if->sk_dev.dv_xname);
1239 ifmedia_add(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_MANUAL,
1240 0, NULL);
1241 ifmedia_set(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_MANUAL);
1242 } else
1243 ifmedia_set(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_AUTO);
1244
1245 if (SK_IS_GENESIS(sc)) {
1246 timeout_set(&sc_if->sk_tick_ch, sk_tick, sc_if);
1247 timeout_add(&sc_if->sk_tick_ch, hz);
1248 } else
1249 timeout_set(&sc_if->sk_tick_ch, sk_yukon_tick, sc_if);
1250
1251
1252
1253
1254 if_attach(ifp);
1255 ether_ifattach(ifp);
1256
1257 shutdownhook_establish(skc_shutdown, sc);
1258
1259 DPRINTFN(2, ("sk_attach: end\n"));
1260 return;
1261
1262 fail_3:
1263 bus_dmamap_destroy(sc->sc_dmatag, sc_if->sk_ring_map);
1264 fail_2:
1265 bus_dmamem_unmap(sc->sc_dmatag, kva, sizeof(struct sk_ring_data));
1266 fail_1:
1267 bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
1268 fail:
1269 sc->sk_if[sa->skc_port] = NULL;
1270 }
1271
1272 int
1273 skcprint(void *aux, const char *pnp)
1274 {
1275 struct skc_attach_args *sa = aux;
1276
1277 if (pnp)
1278 printf("sk port %c at %s",
1279 (sa->skc_port == SK_PORT_A) ? 'A' : 'B', pnp);
1280 else
1281 printf(" port %c", (sa->skc_port == SK_PORT_A) ? 'A' : 'B');
1282 return (UNCONF);
1283 }
1284
1285
1286
1287
1288
1289 void
1290 skc_attach(struct device *parent, struct device *self, void *aux)
1291 {
1292 struct sk_softc *sc = (struct sk_softc *)self;
1293 struct pci_attach_args *pa = aux;
1294 struct skc_attach_args skca;
1295 pci_chipset_tag_t pc = pa->pa_pc;
1296 pcireg_t command, memtype;
1297 pci_intr_handle_t ih;
1298 const char *intrstr = NULL;
1299 bus_size_t size;
1300 u_int8_t skrs;
1301 char *revstr = NULL;
1302
1303 DPRINTFN(2, ("begin skc_attach\n"));
1304
1305
1306
1307
1308 command = pci_conf_read(pc, pa->pa_tag, SK_PCI_CAPID) & 0x000000FF;
1309
1310 if (command == 0x01) {
1311 command = pci_conf_read(pc, pa->pa_tag, SK_PCI_PWRMGMTCTRL);
1312 if (command & SK_PSTATE_MASK) {
1313 u_int32_t iobase, membase, irq;
1314
1315
1316 iobase = pci_conf_read(pc, pa->pa_tag, SK_PCI_LOIO);
1317 membase = pci_conf_read(pc, pa->pa_tag, SK_PCI_LOMEM);
1318 irq = pci_conf_read(pc, pa->pa_tag, SK_PCI_INTLINE);
1319
1320
1321 printf("%s chip is in D%d power mode "
1322 "-- setting to D0\n", sc->sk_dev.dv_xname,
1323 command & SK_PSTATE_MASK);
1324 command &= 0xFFFFFFFC;
1325 pci_conf_write(pc, pa->pa_tag,
1326 SK_PCI_PWRMGMTCTRL, command);
1327
1328
1329 pci_conf_write(pc, pa->pa_tag, SK_PCI_LOIO, iobase);
1330 pci_conf_write(pc, pa->pa_tag, SK_PCI_LOMEM, membase);
1331 pci_conf_write(pc, pa->pa_tag, SK_PCI_INTLINE, irq);
1332 }
1333 }
1334
1335
1336
1337
1338
1339 memtype = pci_mapreg_type(pc, pa->pa_tag, SK_PCI_LOMEM);
1340 switch (memtype) {
1341 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1342 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1343 if (pci_mapreg_map(pa, SK_PCI_LOMEM,
1344 memtype, 0, &sc->sk_btag, &sc->sk_bhandle,
1345 NULL, &size, 0) == 0)
1346 break;
1347 default:
1348 printf(": can't map mem space\n");
1349 return;
1350 }
1351
1352 sc->sc_dmatag = pa->pa_dmat;
1353
1354 sc->sk_type = sk_win_read_1(sc, SK_CHIPVER);
1355 sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4);
1356
1357
1358 if (! SK_IS_GENESIS(sc) && ! SK_IS_YUKON(sc)) {
1359 printf(": unknown chip type: %d\n", sc->sk_type);
1360 goto fail_1;
1361 }
1362 DPRINTFN(2, ("skc_attach: allocate interrupt\n"));
1363
1364
1365 if (pci_intr_map(pa, &ih)) {
1366 printf(": couldn't map interrupt\n");
1367 goto fail_1;
1368 }
1369
1370 intrstr = pci_intr_string(pc, ih);
1371 sc->sk_intrhand = pci_intr_establish(pc, ih, IPL_NET, sk_intr, sc,
1372 self->dv_xname);
1373 if (sc->sk_intrhand == NULL) {
1374 printf(": couldn't establish interrupt");
1375 if (intrstr != NULL)
1376 printf(" at %s", intrstr);
1377 printf("\n");
1378 goto fail_1;
1379 }
1380
1381
1382 sk_reset(sc);
1383
1384 skrs = sk_win_read_1(sc, SK_EPROM0);
1385 if (SK_IS_GENESIS(sc)) {
1386
1387 switch(skrs) {
1388 case SK_RAMSIZE_512K_64:
1389 sc->sk_ramsize = 0x80000;
1390 sc->sk_rboff = SK_RBOFF_0;
1391 break;
1392 case SK_RAMSIZE_1024K_64:
1393 sc->sk_ramsize = 0x100000;
1394 sc->sk_rboff = SK_RBOFF_80000;
1395 break;
1396 case SK_RAMSIZE_1024K_128:
1397 sc->sk_ramsize = 0x100000;
1398 sc->sk_rboff = SK_RBOFF_0;
1399 break;
1400 case SK_RAMSIZE_2048K_128:
1401 sc->sk_ramsize = 0x200000;
1402 sc->sk_rboff = SK_RBOFF_0;
1403 break;
1404 default:
1405 printf(": unknown ram size: %d\n", skrs);
1406 goto fail_2;
1407 break;
1408 }
1409 } else {
1410 if (skrs == 0x00)
1411 sc->sk_ramsize = 0x20000;
1412 else
1413 sc->sk_ramsize = skrs * (1<<12);
1414 sc->sk_rboff = SK_RBOFF_0;
1415 }
1416
1417 DPRINTFN(2, ("skc_attach: ramsize=%d (%dk), rboff=%d\n",
1418 sc->sk_ramsize, sc->sk_ramsize / 1024,
1419 sc->sk_rboff));
1420
1421
1422 sc->sk_pmd = sk_win_read_1(sc, SK_PMDTYPE);
1423
1424 if (sc->sk_pmd == 'T' || sc->sk_pmd == '1')
1425 sc->sk_coppertype = 1;
1426 else
1427 sc->sk_coppertype = 0;
1428
1429 switch (sc->sk_type) {
1430 case SK_GENESIS:
1431 sc->sk_name = "GEnesis";
1432 break;
1433 case SK_YUKON:
1434 sc->sk_name = "Yukon";
1435 break;
1436 case SK_YUKON_LITE:
1437 sc->sk_name = "Yukon Lite";
1438 break;
1439 case SK_YUKON_LP:
1440 sc->sk_name = "Yukon LP";
1441 break;
1442 default:
1443 sc->sk_name = "Yukon (Unknown)";
1444 }
1445
1446
1447 if (sc->sk_type == SK_YUKON || sc->sk_type == SK_YUKON_LP) {
1448 u_int32_t flashaddr;
1449 u_int8_t testbyte;
1450
1451 flashaddr = sk_win_read_4(sc, SK_EP_ADDR);
1452
1453
1454 sk_win_write_1(sc, SK_EP_ADDR+3, 0xff);
1455 testbyte = sk_win_read_1(sc, SK_EP_ADDR+3);
1456
1457 if (testbyte != 0) {
1458
1459 sc->sk_type = SK_YUKON_LITE;
1460 sc->sk_rev = SK_YUKON_LITE_REV_A0;
1461
1462 sk_win_write_4(sc, SK_EP_ADDR, flashaddr);
1463 }
1464 }
1465
1466 if (sc->sk_type == SK_YUKON_LITE) {
1467 switch (sc->sk_rev) {
1468 case SK_YUKON_LITE_REV_A0:
1469 revstr = "A0";
1470 break;
1471 case SK_YUKON_LITE_REV_A1:
1472 revstr = "A1";
1473 break;
1474 case SK_YUKON_LITE_REV_A3:
1475 revstr = "A3";
1476 break;
1477 default:
1478 ;
1479 }
1480 }
1481
1482
1483 printf(", %s", sc->sk_name);
1484 if (revstr != NULL)
1485 printf(" rev. %s", revstr);
1486 printf(" (0x%x): %s\n", sc->sk_rev, intrstr);
1487
1488 sc->sk_macs = 1;
1489
1490 if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC))
1491 sc->sk_macs++;
1492
1493 skca.skc_port = SK_PORT_A;
1494 skca.skc_type = sc->sk_type;
1495 skca.skc_rev = sc->sk_rev;
1496 (void)config_found(&sc->sk_dev, &skca, skcprint);
1497
1498 if (sc->sk_macs > 1) {
1499 skca.skc_port = SK_PORT_B;
1500 skca.skc_type = sc->sk_type;
1501 skca.skc_rev = sc->sk_rev;
1502 (void)config_found(&sc->sk_dev, &skca, skcprint);
1503 }
1504
1505
1506 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1507
1508 return;
1509
1510 fail_2:
1511 pci_intr_disestablish(pc, sc->sk_intrhand);
1512 fail_1:
1513 bus_space_unmap(sc->sk_btag, sc->sk_bhandle, size);
1514 }
1515
1516 int
1517 sk_encap(struct sk_if_softc *sc_if, struct mbuf *m_head, u_int32_t *txidx)
1518 {
1519 struct sk_softc *sc = sc_if->sk_softc;
1520 struct sk_tx_desc *f = NULL;
1521 u_int32_t frag, cur, sk_ctl;
1522 int i;
1523 struct sk_txmap_entry *entry;
1524 bus_dmamap_t txmap;
1525
1526 DPRINTFN(2, ("sk_encap\n"));
1527
1528 entry = SIMPLEQ_FIRST(&sc_if->sk_txmap_head);
1529 if (entry == NULL) {
1530 DPRINTFN(2, ("sk_encap: no txmap available\n"));
1531 return (ENOBUFS);
1532 }
1533 txmap = entry->dmamap;
1534
1535 cur = frag = *txidx;
1536
1537 #ifdef SK_DEBUG
1538 if (skdebug >= 2)
1539 sk_dump_mbuf(m_head);
1540 #endif
1541
1542
1543
1544
1545
1546
1547 if (bus_dmamap_load_mbuf(sc->sc_dmatag, txmap, m_head,
1548 BUS_DMA_NOWAIT)) {
1549 DPRINTFN(2, ("sk_encap: dmamap failed\n"));
1550 return (ENOBUFS);
1551 }
1552
1553 if (txmap->dm_nsegs > (SK_TX_RING_CNT - sc_if->sk_cdata.sk_tx_cnt - 2)) {
1554 DPRINTFN(2, ("sk_encap: too few descriptors free\n"));
1555 bus_dmamap_unload(sc->sc_dmatag, txmap);
1556 return (ENOBUFS);
1557 }
1558
1559 DPRINTFN(2, ("sk_encap: dm_nsegs=%d\n", txmap->dm_nsegs));
1560
1561
1562 bus_dmamap_sync(sc->sc_dmatag, txmap, 0, txmap->dm_mapsize,
1563 BUS_DMASYNC_PREWRITE);
1564
1565 for (i = 0; i < txmap->dm_nsegs; i++) {
1566 f = &sc_if->sk_rdata->sk_tx_ring[frag];
1567 f->sk_data_lo = htole32(txmap->dm_segs[i].ds_addr);
1568 sk_ctl = txmap->dm_segs[i].ds_len | SK_OPCODE_DEFAULT;
1569 if (i == 0)
1570 sk_ctl |= SK_TXCTL_FIRSTFRAG;
1571 else
1572 sk_ctl |= SK_TXCTL_OWN;
1573 f->sk_ctl = htole32(sk_ctl);
1574 cur = frag;
1575 SK_INC(frag, SK_TX_RING_CNT);
1576 }
1577
1578 sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head;
1579 SIMPLEQ_REMOVE_HEAD(&sc_if->sk_txmap_head, link);
1580
1581 sc_if->sk_cdata.sk_tx_map[cur] = entry;
1582 sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |=
1583 htole32(SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR);
1584
1585
1586 SK_CDTXSYNC(sc_if, *txidx, txmap->dm_nsegs,
1587 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1588
1589 sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |=
1590 htole32(SK_TXCTL_OWN);
1591
1592
1593 SK_CDTXSYNC(sc_if, *txidx, 1, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1594
1595 sc_if->sk_cdata.sk_tx_cnt += txmap->dm_nsegs;
1596
1597 #ifdef SK_DEBUG
1598 if (skdebug >= 2) {
1599 struct sk_tx_desc *desc;
1600 u_int32_t idx;
1601 for (idx = *txidx; idx != frag; SK_INC(idx, SK_TX_RING_CNT)) {
1602 desc = &sc_if->sk_rdata->sk_tx_ring[idx];
1603 sk_dump_txdesc(desc, idx);
1604 }
1605 }
1606 #endif
1607
1608 *txidx = frag;
1609
1610 DPRINTFN(2, ("sk_encap: completed successfully\n"));
1611
1612 return (0);
1613 }
1614
1615 void
1616 sk_start(struct ifnet *ifp)
1617 {
1618 struct sk_if_softc *sc_if = ifp->if_softc;
1619 struct sk_softc *sc = sc_if->sk_softc;
1620 struct mbuf *m_head = NULL;
1621 u_int32_t idx = sc_if->sk_cdata.sk_tx_prod;
1622 int pkts = 0;
1623
1624 DPRINTFN(2, ("sk_start\n"));
1625
1626 while (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) {
1627 IFQ_POLL(&ifp->if_snd, m_head);
1628 if (m_head == NULL)
1629 break;
1630
1631
1632
1633
1634
1635
1636 if (sk_encap(sc_if, m_head, &idx)) {
1637 ifp->if_flags |= IFF_OACTIVE;
1638 break;
1639 }
1640
1641
1642 IFQ_DEQUEUE(&ifp->if_snd, m_head);
1643 pkts++;
1644
1645
1646
1647
1648
1649 #if NBPFILTER > 0
1650 if (ifp->if_bpf)
1651 bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
1652 #endif
1653 }
1654 if (pkts == 0)
1655 return;
1656
1657
1658 if (idx != sc_if->sk_cdata.sk_tx_prod) {
1659 sc_if->sk_cdata.sk_tx_prod = idx;
1660 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
1661
1662
1663 ifp->if_timer = 5;
1664 }
1665 }
1666
1667
1668 void
1669 sk_watchdog(struct ifnet *ifp)
1670 {
1671 struct sk_if_softc *sc_if = ifp->if_softc;
1672
1673
1674
1675
1676
1677 sk_txeof(sc_if);
1678 if (sc_if->sk_cdata.sk_tx_cnt != 0) {
1679 printf("%s: watchdog timeout\n", sc_if->sk_dev.dv_xname);
1680
1681 ifp->if_oerrors++;
1682
1683 sk_init(sc_if);
1684 }
1685 }
1686
1687 void
1688 skc_shutdown(void *v)
1689 {
1690 struct sk_softc *sc = v;
1691
1692 DPRINTFN(2, ("sk_shutdown\n"));
1693
1694
1695 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
1696
1697
1698
1699
1700
1701 sk_reset(sc);
1702 }
1703
1704 static __inline int
1705 sk_rxvalid(struct sk_softc *sc, u_int32_t stat, u_int32_t len)
1706 {
1707 if (sc->sk_type == SK_GENESIS) {
1708 if ((stat & XM_RXSTAT_ERRFRAME) == XM_RXSTAT_ERRFRAME ||
1709 XM_RXSTAT_BYTES(stat) != len)
1710 return (0);
1711 } else {
1712 if ((stat & (YU_RXSTAT_CRCERR | YU_RXSTAT_LONGERR |
1713 YU_RXSTAT_MIIERR | YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC |
1714 YU_RXSTAT_JABBER)) != 0 ||
1715 (stat & YU_RXSTAT_RXOK) != YU_RXSTAT_RXOK ||
1716 YU_RXSTAT_BYTES(stat) != len)
1717 return (0);
1718 }
1719
1720 return (1);
1721 }
1722
1723 void
1724 sk_rxeof(struct sk_if_softc *sc_if)
1725 {
1726 struct sk_softc *sc = sc_if->sk_softc;
1727 struct ifnet *ifp = &sc_if->arpcom.ac_if;
1728 struct mbuf *m;
1729 struct sk_chain *cur_rx;
1730 struct sk_rx_desc *cur_desc;
1731 int i, cur, total_len = 0;
1732 u_int32_t rxstat, sk_ctl;
1733 bus_dmamap_t dmamap;
1734 u_int16_t csum1, csum2;
1735
1736 DPRINTFN(2, ("sk_rxeof\n"));
1737
1738 i = sc_if->sk_cdata.sk_rx_prod;
1739
1740 for (;;) {
1741 cur = i;
1742
1743
1744 SK_CDRXSYNC(sc_if, cur,
1745 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1746
1747 sk_ctl = letoh32(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl);
1748 if ((sk_ctl & SK_RXCTL_OWN) != 0) {
1749
1750 SK_CDRXSYNC(sc_if, cur, BUS_DMASYNC_PREREAD);
1751 sc_if->sk_cdata.sk_rx_prod = i;
1752 break;
1753 }
1754
1755 cur_rx = &sc_if->sk_cdata.sk_rx_chain[cur];
1756 cur_desc = &sc_if->sk_rdata->sk_rx_ring[cur];
1757 dmamap = sc_if->sk_cdata.sk_rx_jumbo_map;
1758
1759 bus_dmamap_sync(sc_if->sk_softc->sc_dmatag, dmamap, 0,
1760 dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1761
1762 rxstat = letoh32(cur_desc->sk_xmac_rxstat);
1763 m = cur_rx->sk_mbuf;
1764 cur_rx->sk_mbuf = NULL;
1765 total_len = SK_RXBYTES(letoh32(cur_desc->sk_ctl));
1766
1767 csum1 = letoh16(sc_if->sk_rdata->sk_rx_ring[i].sk_csum1);
1768 csum2 = letoh16(sc_if->sk_rdata->sk_rx_ring[i].sk_csum2);
1769
1770 SK_INC(i, SK_RX_RING_CNT);
1771
1772 if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG |
1773 SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID |
1774 SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) ||
1775 total_len < SK_MIN_FRAMELEN ||
1776 total_len > SK_JUMBO_FRAMELEN ||
1777 sk_rxvalid(sc, rxstat, total_len) == 0) {
1778 ifp->if_ierrors++;
1779 sk_newbuf(sc_if, cur, m, dmamap);
1780 continue;
1781 }
1782
1783
1784
1785
1786
1787
1788
1789
1790 if (sk_newbuf(sc_if, cur, NULL, dmamap) == ENOBUFS) {
1791 struct mbuf *m0;
1792 m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
1793 total_len + ETHER_ALIGN, 0, ifp, NULL);
1794 sk_newbuf(sc_if, cur, m, dmamap);
1795 if (m0 == NULL) {
1796 ifp->if_ierrors++;
1797 continue;
1798 }
1799 m_adj(m0, ETHER_ALIGN);
1800 m = m0;
1801 } else {
1802 m->m_pkthdr.rcvif = ifp;
1803 m->m_pkthdr.len = m->m_len = total_len;
1804 }
1805
1806 ifp->if_ipackets++;
1807
1808 sk_rxcsum(ifp, m, csum1, csum2);
1809
1810 #if NBPFILTER > 0
1811 if (ifp->if_bpf)
1812 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
1813 #endif
1814
1815
1816 ether_input_mbuf(ifp, m);
1817 }
1818 }
1819
1820 void
1821 sk_rxcsum(struct ifnet *ifp, struct mbuf *m, const u_int16_t csum1, const u_int16_t csum2)
1822 {
1823 struct ether_header *eh;
1824 struct ip *ip;
1825 u_int8_t *pp;
1826 int hlen, len, plen;
1827 u_int16_t iph_csum, ipo_csum, ipd_csum, csum;
1828
1829 pp = mtod(m, u_int8_t *);
1830 plen = m->m_pkthdr.len;
1831 if (plen < sizeof(*eh))
1832 return;
1833 eh = (struct ether_header *)pp;
1834 iph_csum = in_cksum_addword(csum1, (~csum2 & 0xffff));
1835
1836 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1837 u_int16_t *xp = (u_int16_t *)pp;
1838
1839 xp = (u_int16_t *)pp;
1840 if (xp[1] != htons(ETHERTYPE_IP))
1841 return;
1842 iph_csum = in_cksum_addword(iph_csum, (~xp[0] & 0xffff));
1843 iph_csum = in_cksum_addword(iph_csum, (~xp[1] & 0xffff));
1844 xp = (u_int16_t *)(pp + sizeof(struct ip));
1845 iph_csum = in_cksum_addword(iph_csum, xp[0]);
1846 iph_csum = in_cksum_addword(iph_csum, xp[1]);
1847 pp += EVL_ENCAPLEN;
1848 } else if (eh->ether_type != htons(ETHERTYPE_IP))
1849 return;
1850
1851 pp += sizeof(*eh);
1852 plen -= sizeof(*eh);
1853
1854 ip = (struct ip *)pp;
1855
1856 if (ip->ip_v != IPVERSION)
1857 return;
1858
1859 hlen = ip->ip_hl << 2;
1860 if (hlen < sizeof(struct ip))
1861 return;
1862 if (hlen > ntohs(ip->ip_len))
1863 return;
1864
1865
1866 if (plen != ntohs(ip->ip_len))
1867 return;
1868
1869 len = hlen - sizeof(struct ip);
1870 if (len > 0) {
1871 u_int16_t *p;
1872
1873 p = (u_int16_t *)(ip + 1);
1874 ipo_csum = 0;
1875 for (ipo_csum = 0; len > 0; len -= sizeof(*p), p++)
1876 ipo_csum = in_cksum_addword(ipo_csum, *p);
1877 iph_csum = in_cksum_addword(iph_csum, ipo_csum);
1878 ipd_csum = in_cksum_addword(csum2, (~ipo_csum & 0xffff));
1879 } else
1880 ipd_csum = csum2;
1881
1882 if (iph_csum != 0xffff)
1883 return;
1884 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1885
1886 if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
1887 return;
1888
1889 pp += hlen;
1890
1891
1892 if (ip->ip_p == IPPROTO_UDP) {
1893 struct udphdr *uh = (struct udphdr *)pp;
1894
1895 if (uh->uh_sum == 0)
1896 return;
1897 } else if (ip->ip_p != IPPROTO_TCP)
1898 return;
1899
1900 csum = in_cksum_phdr(ip->ip_src.s_addr, ip->ip_dst.s_addr,
1901 htonl(ntohs(ip->ip_len) - hlen + ip->ip_p) + ipd_csum);
1902 if (csum == 0xffff) {
1903 m->m_pkthdr.csum_flags |= (ip->ip_p == IPPROTO_TCP) ?
1904 M_TCP_CSUM_IN_OK : M_UDP_CSUM_IN_OK;
1905 }
1906 }
1907
1908 void
1909 sk_txeof(struct sk_if_softc *sc_if)
1910 {
1911 struct sk_softc *sc = sc_if->sk_softc;
1912 struct sk_tx_desc *cur_tx;
1913 struct ifnet *ifp = &sc_if->arpcom.ac_if;
1914 u_int32_t idx, sk_ctl;
1915 struct sk_txmap_entry *entry;
1916
1917 DPRINTFN(2, ("sk_txeof\n"));
1918
1919
1920
1921
1922
1923 idx = sc_if->sk_cdata.sk_tx_cons;
1924 while (idx != sc_if->sk_cdata.sk_tx_prod) {
1925 SK_CDTXSYNC(sc_if, idx, 1,
1926 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1927
1928 cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx];
1929 sk_ctl = letoh32(cur_tx->sk_ctl);
1930 #ifdef SK_DEBUG
1931 if (skdebug >= 2)
1932 sk_dump_txdesc(cur_tx, idx);
1933 #endif
1934 if (sk_ctl & SK_TXCTL_OWN) {
1935 SK_CDTXSYNC(sc_if, idx, 1, BUS_DMASYNC_PREREAD);
1936 break;
1937 }
1938 if (sk_ctl & SK_TXCTL_LASTFRAG)
1939 ifp->if_opackets++;
1940 if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) {
1941 entry = sc_if->sk_cdata.sk_tx_map[idx];
1942
1943 m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf);
1944 sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL;
1945
1946 bus_dmamap_sync(sc->sc_dmatag, entry->dmamap, 0,
1947 entry->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1948
1949 bus_dmamap_unload(sc->sc_dmatag, entry->dmamap);
1950 SIMPLEQ_INSERT_TAIL(&sc_if->sk_txmap_head, entry,
1951 link);
1952 sc_if->sk_cdata.sk_tx_map[idx] = NULL;
1953 }
1954 sc_if->sk_cdata.sk_tx_cnt--;
1955 SK_INC(idx, SK_TX_RING_CNT);
1956 }
1957 ifp->if_timer = sc_if->sk_cdata.sk_tx_cnt > 0 ? 5 : 0;
1958
1959 if (sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 2)
1960 ifp->if_flags &= ~IFF_OACTIVE;
1961
1962 sc_if->sk_cdata.sk_tx_cons = idx;
1963 }
1964
1965 void
1966 sk_tick(void *xsc_if)
1967 {
1968 struct sk_if_softc *sc_if = xsc_if;
1969 struct mii_data *mii = &sc_if->sk_mii;
1970 struct ifnet *ifp = &sc_if->arpcom.ac_if;
1971 int i;
1972
1973 DPRINTFN(2, ("sk_tick\n"));
1974
1975 if (!(ifp->if_flags & IFF_UP))
1976 return;
1977
1978 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
1979 sk_intr_bcom(sc_if);
1980 return;
1981 }
1982
1983
1984
1985
1986
1987
1988
1989
1990 for (i = 0; i < 3; i++) {
1991 if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
1992 break;
1993 }
1994
1995 if (i != 3) {
1996 timeout_add(&sc_if->sk_tick_ch, hz);
1997 return;
1998 }
1999
2000
2001 SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2002 SK_XM_READ_2(sc_if, XM_ISR);
2003 mii_tick(mii);
2004 timeout_del(&sc_if->sk_tick_ch);
2005 }
2006
2007 void
2008 sk_yukon_tick(void *xsc_if)
2009 {
2010 struct sk_if_softc *sc_if = xsc_if;
2011 struct mii_data *mii = &sc_if->sk_mii;
2012
2013 mii_tick(mii);
2014 timeout_add(&sc_if->sk_tick_ch, hz);
2015 }
2016
2017 void
2018 sk_intr_bcom(struct sk_if_softc *sc_if)
2019 {
2020 struct mii_data *mii = &sc_if->sk_mii;
2021 struct ifnet *ifp = &sc_if->arpcom.ac_if;
2022 int status;
2023
2024 DPRINTFN(2, ("sk_intr_bcom\n"));
2025
2026 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2027
2028
2029
2030
2031
2032 status = sk_xmac_miibus_readreg((struct device *)sc_if,
2033 SK_PHYADDR_BCOM, BRGPHY_MII_ISR);
2034
2035 if (!(ifp->if_flags & IFF_RUNNING)) {
2036 sk_init_xmac(sc_if);
2037 return;
2038 }
2039
2040 if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) {
2041 int lstat;
2042 lstat = sk_xmac_miibus_readreg((struct device *)sc_if,
2043 SK_PHYADDR_BCOM, BRGPHY_MII_AUXSTS);
2044
2045 if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
2046 mii_mediachg(mii);
2047
2048 SK_IF_WRITE_1(sc_if, 0,
2049 SK_LINKLED1_CTL, SK_LINKLED_OFF);
2050 sc_if->sk_link = 0;
2051 } else if (status & BRGPHY_ISR_LNK_CHG) {
2052 sk_xmac_miibus_writereg((struct device *)sc_if,
2053 SK_PHYADDR_BCOM, BRGPHY_MII_IMR, 0xFF00);
2054 mii_tick(mii);
2055 sc_if->sk_link = 1;
2056
2057 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
2058 SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF|
2059 SK_LINKLED_BLINK_OFF);
2060 } else {
2061 mii_tick(mii);
2062 timeout_add(&sc_if->sk_tick_ch, hz);
2063 }
2064 }
2065
2066 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2067 }
2068
2069 void
2070 sk_intr_xmac(struct sk_if_softc *sc_if)
2071 {
2072 u_int16_t status = SK_XM_READ_2(sc_if, XM_ISR);
2073
2074 DPRINTFN(2, ("sk_intr_xmac\n"));
2075
2076 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) {
2077 if (status & XM_ISR_GP0_SET) {
2078 SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2079 timeout_add(&sc_if->sk_tick_ch, hz);
2080 }
2081
2082 if (status & XM_ISR_AUTONEG_DONE) {
2083 timeout_add(&sc_if->sk_tick_ch, hz);
2084 }
2085 }
2086
2087 if (status & XM_IMR_TX_UNDERRUN)
2088 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
2089
2090 if (status & XM_IMR_RX_OVERRUN)
2091 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
2092 }
2093
2094 void
2095 sk_intr_yukon(struct sk_if_softc *sc_if)
2096 {
2097 u_int8_t status;
2098
2099 status = SK_IF_READ_1(sc_if, 0, SK_GMAC_ISR);
2100
2101 if ((status & SK_GMAC_INT_RX_OVER) != 0) {
2102 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
2103 SK_RFCTL_RX_FIFO_OVER);
2104 }
2105
2106 if ((status & SK_GMAC_INT_TX_UNDER) != 0) {
2107 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST,
2108 SK_TFCTL_TX_FIFO_UNDER);
2109 }
2110
2111 DPRINTFN(2, ("sk_intr_yukon status=%#x\n", status));
2112 }
2113
2114 int
2115 sk_intr(void *xsc)
2116 {
2117 struct sk_softc *sc = xsc;
2118 struct sk_if_softc *sc_if0 = sc->sk_if[SK_PORT_A];
2119 struct sk_if_softc *sc_if1 = sc->sk_if[SK_PORT_B];
2120 struct ifnet *ifp0 = NULL, *ifp1 = NULL;
2121 u_int32_t status;
2122 int claimed = 0;
2123
2124 status = CSR_READ_4(sc, SK_ISSR);
2125 if (status == 0 || status == 0xffffffff)
2126 return (0);
2127
2128 if (sc_if0 != NULL)
2129 ifp0 = &sc_if0->arpcom.ac_if;
2130 if (sc_if1 != NULL)
2131 ifp1 = &sc_if1->arpcom.ac_if;
2132
2133 for (; (status &= sc->sk_intrmask) != 0;) {
2134 claimed = 1;
2135
2136
2137 if (sc_if0 && (status & SK_ISR_RX1_EOF)) {
2138 sk_rxeof(sc_if0);
2139 CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
2140 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
2141 }
2142 if (sc_if1 && (status & SK_ISR_RX2_EOF)) {
2143 sk_rxeof(sc_if1);
2144 CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
2145 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
2146 }
2147
2148
2149 if (sc_if0 && (status & SK_ISR_TX1_S_EOF)) {
2150 sk_txeof(sc_if0);
2151 CSR_WRITE_4(sc, SK_BMU_TXS_CSR0,
2152 SK_TXBMU_CLR_IRQ_EOF);
2153 }
2154 if (sc_if1 && (status & SK_ISR_TX2_S_EOF)) {
2155 sk_txeof(sc_if1);
2156 CSR_WRITE_4(sc, SK_BMU_TXS_CSR1,
2157 SK_TXBMU_CLR_IRQ_EOF);
2158 }
2159
2160
2161 if (sc_if0 && (status & SK_ISR_MAC1) &&
2162 (ifp0->if_flags & IFF_RUNNING)) {
2163 if (SK_IS_GENESIS(sc))
2164 sk_intr_xmac(sc_if0);
2165 else
2166 sk_intr_yukon(sc_if0);
2167 }
2168
2169 if (sc_if1 && (status & SK_ISR_MAC2) &&
2170 (ifp1->if_flags & IFF_RUNNING)) {
2171 if (SK_IS_GENESIS(sc))
2172 sk_intr_xmac(sc_if1);
2173 else
2174 sk_intr_yukon(sc_if1);
2175
2176 }
2177
2178 if (status & SK_ISR_EXTERNAL_REG) {
2179 if (sc_if0 != NULL &&
2180 sc_if0->sk_phytype == SK_PHYTYPE_BCOM)
2181 sk_intr_bcom(sc_if0);
2182
2183 if (sc_if1 != NULL &&
2184 sc_if1->sk_phytype == SK_PHYTYPE_BCOM)
2185 sk_intr_bcom(sc_if1);
2186 }
2187 status = CSR_READ_4(sc, SK_ISSR);
2188 }
2189
2190 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2191
2192 if (ifp0 != NULL && !IFQ_IS_EMPTY(&ifp0->if_snd))
2193 sk_start(ifp0);
2194 if (ifp1 != NULL && !IFQ_IS_EMPTY(&ifp1->if_snd))
2195 sk_start(ifp1);
2196
2197 return (claimed);
2198 }
2199
2200 void
2201 sk_init_xmac(struct sk_if_softc *sc_if)
2202 {
2203 struct sk_softc *sc = sc_if->sk_softc;
2204 struct ifnet *ifp = &sc_if->arpcom.ac_if;
2205 struct sk_bcom_hack bhack[] = {
2206 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
2207 { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
2208 { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
2209 { 0, 0 } };
2210
2211 DPRINTFN(2, ("sk_init_xmac\n"));
2212
2213
2214 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
2215 DELAY(1000);
2216
2217
2218 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2219
2220
2221 sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
2222
2223
2224
2225
2226
2227
2228 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2229 int i = 0;
2230 u_int32_t val;
2231
2232
2233 val = sk_win_read_4(sc, SK_GPIO);
2234 if (sc_if->sk_port == SK_PORT_A)
2235 val |= SK_GPIO_DIR0|SK_GPIO_DAT0;
2236 else
2237 val |= SK_GPIO_DIR2|SK_GPIO_DAT2;
2238 sk_win_write_4(sc, SK_GPIO, val);
2239
2240
2241 SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
2242
2243 sk_xmac_miibus_writereg((struct device *)sc_if,
2244 SK_PHYADDR_BCOM, BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET);
2245 DELAY(10000);
2246 sk_xmac_miibus_writereg((struct device *)sc_if,
2247 SK_PHYADDR_BCOM, BRGPHY_MII_IMR, 0xFFF0);
2248
2249
2250
2251
2252
2253
2254
2255 if (sk_xmac_miibus_readreg((struct device *)sc_if,
2256 SK_PHYADDR_BCOM, 0x03) == 0x6041) {
2257 while(bhack[i].reg) {
2258 sk_xmac_miibus_writereg((struct device *)sc_if,
2259 SK_PHYADDR_BCOM, bhack[i].reg,
2260 bhack[i].val);
2261 i++;
2262 }
2263 }
2264 }
2265
2266
2267 SK_XM_WRITE_2(sc_if, XM_PAR0,
2268 letoh16(*(u_int16_t *)(&sc_if->arpcom.ac_enaddr[0])));
2269 SK_XM_WRITE_2(sc_if, XM_PAR1,
2270 letoh16(*(u_int16_t *)(&sc_if->arpcom.ac_enaddr[2])));
2271 SK_XM_WRITE_2(sc_if, XM_PAR2,
2272 letoh16(*(u_int16_t *)(&sc_if->arpcom.ac_enaddr[4])));
2273 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
2274
2275 if (ifp->if_flags & IFF_BROADCAST)
2276 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
2277 else
2278 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
2279
2280
2281 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
2282
2283
2284 SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
2301 XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
2302 XM_MODE_RX_INRANGELEN);
2303
2304 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
2305
2306
2307
2308
2309
2310 SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
2311
2312
2313 sk_setpromisc(sc_if);
2314
2315
2316 sk_setmulti(sc_if);
2317
2318
2319 SK_XM_READ_2(sc_if, XM_ISR);
2320 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
2321 SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
2322 else
2323 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2324
2325
2326 switch(sc_if->sk_xmac_rev) {
2327 case XM_XMAC_REV_B2:
2328 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
2329 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
2330 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
2331 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
2332 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
2333 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
2334 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
2335 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
2336 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2337 break;
2338 case XM_XMAC_REV_C1:
2339 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
2340 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
2341 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
2342 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
2343 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
2344 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
2345 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
2346 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
2347 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2348 break;
2349 default:
2350 break;
2351 }
2352 sk_win_write_2(sc, SK_MACARB_CTL,
2353 SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
2354
2355 sc_if->sk_link = 1;
2356 }
2357
2358 void sk_init_yukon(struct sk_if_softc *sc_if)
2359 {
2360 u_int32_t phy, v;
2361 u_int16_t reg;
2362 struct sk_softc *sc;
2363 int i;
2364
2365 sc = sc_if->sk_softc;
2366
2367 DPRINTFN(2, ("sk_init_yukon: start: sk_csr=%#x\n",
2368 CSR_READ_4(sc_if->sk_softc, SK_CSR)));
2369
2370 if (sc->sk_type == SK_YUKON_LITE &&
2371 sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
2372
2373
2374
2375
2376
2377 v = sk_win_read_4(sc, SK_GPIO);
2378 v |= SK_GPIO_DIR9 | SK_GPIO_DAT9;
2379 sk_win_write_4(sc, SK_GPIO, v);
2380 }
2381
2382 DPRINTFN(6, ("sk_init_yukon: 1\n"));
2383
2384
2385 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET);
2386 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
2387 DELAY(1000);
2388
2389 DPRINTFN(6, ("sk_init_yukon: 2\n"));
2390
2391 if (sc->sk_type == SK_YUKON_LITE &&
2392 sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
2393
2394
2395
2396 v = sk_win_read_4(sc, SK_GPIO);
2397 v |= SK_GPIO_DIR9;
2398 v &= ~SK_GPIO_DAT9;
2399 sk_win_write_4(sc, SK_GPIO, v);
2400 }
2401
2402 phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP |
2403 SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE;
2404
2405 if (sc->sk_coppertype)
2406 phy |= SK_GPHY_COPPER;
2407 else
2408 phy |= SK_GPHY_FIBER;
2409
2410 DPRINTFN(3, ("sk_init_yukon: phy=%#x\n", phy));
2411
2412 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET);
2413 DELAY(1000);
2414 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR);
2415 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF |
2416 SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR);
2417
2418 DPRINTFN(3, ("sk_init_yukon: gmac_ctrl=%#x\n",
2419 SK_IF_READ_4(sc_if, 0, SK_GMAC_CTRL)));
2420
2421 DPRINTFN(6, ("sk_init_yukon: 3\n"));
2422
2423
2424 DPRINTFN(6, ("sk_init_yukon: 4\n"));
2425 SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
2426
2427 DPRINTFN(6, ("sk_init_yukon: 4a\n"));
2428 reg = SK_YU_READ_2(sc_if, YUKON_PAR);
2429 DPRINTFN(6, ("sk_init_yukon: YUKON_PAR=%#x\n", reg));
2430
2431
2432 reg |= YU_PAR_MIB_CLR;
2433 DPRINTFN(6, ("sk_init_yukon: YUKON_PAR=%#x\n", reg));
2434 DPRINTFN(6, ("sk_init_yukon: 4b\n"));
2435 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2436
2437
2438 DPRINTFN(6, ("sk_init_yukon: 5\n"));
2439 reg &= ~YU_PAR_MIB_CLR;
2440 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2441
2442
2443 DPRINTFN(6, ("sk_init_yukon: 7\n"));
2444 SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR);
2445
2446
2447 DPRINTFN(6, ("sk_init_yukon: 8\n"));
2448 SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) |
2449 YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) );
2450
2451
2452 DPRINTFN(6, ("sk_init_yukon: 9\n"));
2453 SK_YU_WRITE_2(sc_if, YUKON_SMR, YU_SMR_DATA_BLIND(0x1c) |
2454 YU_SMR_MFL_VLAN | YU_SMR_MFL_JUMBO |
2455 YU_SMR_IPG_DATA(0x1e));
2456
2457 DPRINTFN(6, ("sk_init_yukon: 10\n"));
2458
2459 for (i = 0; i < 3; i++) {
2460
2461 SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4,
2462 sc_if->arpcom.ac_enaddr[i * 2] |
2463 sc_if->arpcom.ac_enaddr[i * 2 + 1] << 8);
2464 }
2465
2466 for (i = 0; i < 3; i++) {
2467 reg = sk_win_read_2(sc_if->sk_softc,
2468 SK_MAC1_0 + i * 2 + sc_if->sk_port * 8);
2469 SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg);
2470 }
2471
2472
2473 sk_setpromisc(sc_if);
2474
2475
2476 DPRINTFN(6, ("sk_init_yukon: 11\n"));
2477 sk_setmulti(sc_if);
2478
2479
2480 DPRINTFN(6, ("sk_init_yukon: 12\n"));
2481 SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0);
2482 SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0);
2483 SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0);
2484
2485
2486 v = YU_RXSTAT_FOFL | YU_RXSTAT_CRCERR | YU_RXSTAT_MIIERR |
2487 YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | YU_RXSTAT_RUNT |
2488 YU_RXSTAT_JABBER;
2489 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_MASK, v);
2490
2491
2492 if (sc->sk_type == SK_YUKON_LITE && sc->sk_rev == SK_YUKON_LITE_REV_A0)
2493 v = SK_TFCTL_OPERATION_ON;
2494 else
2495 v = SK_TFCTL_OPERATION_ON | SK_RFCTL_FIFO_FLUSH_ON;
2496
2497 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR);
2498 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_CTRL_TEST, v);
2499
2500
2501 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_THRESHOLD,
2502 SK_RFCTL_FIFO_THRESHOLD + 1);
2503
2504
2505 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR);
2506 SK_IF_WRITE_2(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON);
2507
2508 DPRINTFN(6, ("sk_init_yukon: end\n"));
2509 }
2510
2511
2512
2513
2514
2515 void
2516 sk_init(void *xsc_if)
2517 {
2518 struct sk_if_softc *sc_if = xsc_if;
2519 struct sk_softc *sc = sc_if->sk_softc;
2520 struct ifnet *ifp = &sc_if->arpcom.ac_if;
2521 struct mii_data *mii = &sc_if->sk_mii;
2522 int s;
2523
2524 DPRINTFN(2, ("sk_init\n"));
2525
2526 s = splnet();
2527
2528
2529 sk_stop(sc_if);
2530
2531 if (SK_IS_GENESIS(sc)) {
2532
2533 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
2534 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
2535 SK_LINKLED_LINKSYNC_ON);
2536
2537
2538 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL,
2539 SK_RXLEDCTL_COUNTER_START);
2540
2541
2542 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL,
2543 SK_TXLEDCTL_COUNTER_START);
2544 }
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563 SK_IF_WRITE_4(sc_if, 0, SK_DPT_INIT, SK_DPT_TIMER_MAX);
2564
2565
2566
2567
2568 switch (sc->sk_type) {
2569 case SK_GENESIS:
2570 sk_init_xmac(sc_if);
2571 break;
2572 case SK_YUKON:
2573 case SK_YUKON_LITE:
2574 case SK_YUKON_LP:
2575 sk_init_yukon(sc_if);
2576 break;
2577 }
2578 mii_mediachg(mii);
2579
2580 if (SK_IS_GENESIS(sc)) {
2581
2582 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
2583 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
2584 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
2585
2586 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
2587 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
2588 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
2589 }
2590
2591
2592 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
2593 SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
2594
2595
2596 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
2597 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
2598 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
2599 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
2600 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
2601 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
2602
2603 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
2604 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
2605 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
2606 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
2607 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
2608 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
2609 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
2610
2611
2612 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
2613 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
2614 SK_RX_RING_ADDR(sc_if, 0));
2615 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0);
2616
2617 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
2618 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
2619 SK_TX_RING_ADDR(sc_if, 0));
2620 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0);
2621
2622
2623 if (sk_init_rx_ring(sc_if) == ENOBUFS) {
2624 printf("%s: initialization failed: no "
2625 "memory for rx buffers\n", sc_if->sk_dev.dv_xname);
2626 sk_stop(sc_if);
2627 splx(s);
2628 return;
2629 }
2630
2631 if (sk_init_tx_ring(sc_if) == ENOBUFS) {
2632 printf("%s: initialization failed: no "
2633 "memory for tx buffers\n", sc_if->sk_dev.dv_xname);
2634 sk_stop(sc_if);
2635 splx(s);
2636 return;
2637 }
2638
2639
2640 CSR_READ_4(sc, SK_ISSR);
2641 if (sc_if->sk_port == SK_PORT_A)
2642 sc->sk_intrmask |= SK_INTRS1;
2643 else
2644 sc->sk_intrmask |= SK_INTRS2;
2645
2646 sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
2647
2648 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2649
2650
2651 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
2652
2653 if (SK_IS_GENESIS(sc)) {
2654
2655 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
2656 SK_XM_SETBIT_2(sc_if, XM_MMUCMD,
2657 XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2658 }
2659
2660 if (SK_IS_YUKON(sc)) {
2661 u_int16_t reg = SK_YU_READ_2(sc_if, YUKON_GPCR);
2662 reg |= YU_GPCR_TXEN | YU_GPCR_RXEN;
2663 SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg);
2664 }
2665
2666
2667 SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_START);
2668
2669 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
2670
2671 ifp->if_flags |= IFF_RUNNING;
2672 ifp->if_flags &= ~IFF_OACTIVE;
2673
2674 if (SK_IS_YUKON(sc))
2675 timeout_add(&sc_if->sk_tick_ch, hz);
2676
2677 splx(s);
2678 }
2679
2680 void
2681 sk_stop(struct sk_if_softc *sc_if)
2682 {
2683 struct sk_softc *sc = sc_if->sk_softc;
2684 struct ifnet *ifp = &sc_if->arpcom.ac_if;
2685 struct sk_txmap_entry *dma;
2686 int i;
2687 u_int32_t val;
2688
2689 DPRINTFN(2, ("sk_stop\n"));
2690
2691 timeout_del(&sc_if->sk_tick_ch);
2692
2693 ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
2694
2695
2696 SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_STOP);
2697
2698 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_STOP);
2699 for (i = 0; i < SK_TIMEOUT; i++) {
2700 val = CSR_READ_4(sc, sc_if->sk_tx_bmu);
2701 if (!(val & SK_TXBMU_TX_STOP))
2702 break;
2703 DELAY(1);
2704 }
2705 if (i == SK_TIMEOUT)
2706 printf("%s: cannot stop transfer of Tx descriptors\n",
2707 sc_if->sk_dev.dv_xname);
2708
2709 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_STOP);
2710 for (i = 0; i < SK_TIMEOUT; i++) {
2711 val = SK_IF_READ_4(sc_if, 0, SK_RXQ1_BMU_CSR);
2712 if (!(val & SK_RXBMU_RX_STOP))
2713 break;
2714 DELAY(1);
2715 }
2716 if (i == SK_TIMEOUT)
2717 printf("%s: cannot stop transfer of Rx descriptors\n",
2718 sc_if->sk_dev.dv_xname);
2719
2720 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2721 u_int32_t val;
2722
2723
2724 val = sk_win_read_4(sc, SK_GPIO);
2725 if (sc_if->sk_port == SK_PORT_A) {
2726 val |= SK_GPIO_DIR0;
2727 val &= ~SK_GPIO_DAT0;
2728 } else {
2729 val |= SK_GPIO_DIR2;
2730 val &= ~SK_GPIO_DAT2;
2731 }
2732 sk_win_write_4(sc, SK_GPIO, val);
2733 }
2734
2735
2736 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2737 switch (sc->sk_type) {
2738 case SK_GENESIS:
2739 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL,
2740 SK_TXMACCTL_XMAC_RESET);
2741 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
2742 break;
2743 case SK_YUKON:
2744 case SK_YUKON_LITE:
2745 case SK_YUKON_LP:
2746 SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET);
2747 SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET);
2748 break;
2749 }
2750 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
2751 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2752 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
2753 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2754 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
2755 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2756 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2757 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
2758 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
2759
2760
2761 if (sc_if->sk_port == SK_PORT_A)
2762 sc->sk_intrmask &= ~SK_INTRS1;
2763 else
2764 sc->sk_intrmask &= ~SK_INTRS2;
2765 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2766
2767 SK_XM_READ_2(sc_if, XM_ISR);
2768 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2769
2770
2771 for (i = 0; i < SK_RX_RING_CNT; i++) {
2772 if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) {
2773 m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf);
2774 sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL;
2775 }
2776 }
2777
2778 for (i = 0; i < SK_TX_RING_CNT; i++) {
2779 if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) {
2780 m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf);
2781 sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL;
2782 SIMPLEQ_INSERT_HEAD(&sc_if->sk_txmap_head,
2783 sc_if->sk_cdata.sk_tx_map[i], link);
2784 sc_if->sk_cdata.sk_tx_map[i] = 0;
2785 }
2786 }
2787
2788 while ((dma = SIMPLEQ_FIRST(&sc_if->sk_txmap_head))) {
2789 SIMPLEQ_REMOVE_HEAD(&sc_if->sk_txmap_head, link);
2790 bus_dmamap_destroy(sc->sc_dmatag, dma->dmamap);
2791 free(dma, M_DEVBUF);
2792 }
2793 }
2794
2795 struct cfattach skc_ca = {
2796 sizeof(struct sk_softc), skc_probe, skc_attach,
2797 };
2798
2799 struct cfdriver skc_cd = {
2800 0, "skc", DV_DULL
2801 };
2802
2803 struct cfattach sk_ca = {
2804 sizeof(struct sk_if_softc), sk_probe, sk_attach,
2805 };
2806
2807 struct cfdriver sk_cd = {
2808 0, "sk", DV_IFNET
2809 };
2810
2811 #ifdef SK_DEBUG
2812 void
2813 sk_dump_txdesc(struct sk_tx_desc *desc, int idx)
2814 {
2815 #define DESC_PRINT(X) \
2816 if (X) \
2817 printf("txdesc[%d]." #X "=%#x\n", \
2818 idx, X);
2819
2820 DESC_PRINT(letoh32(desc->sk_ctl));
2821 DESC_PRINT(letoh32(desc->sk_next));
2822 DESC_PRINT(letoh32(desc->sk_data_lo));
2823 DESC_PRINT(letoh32(desc->sk_data_hi));
2824 DESC_PRINT(letoh32(desc->sk_xmac_txstat));
2825 DESC_PRINT(letoh16(desc->sk_rsvd0));
2826 DESC_PRINT(letoh16(desc->sk_csum_startval));
2827 DESC_PRINT(letoh16(desc->sk_csum_startpos));
2828 DESC_PRINT(letoh16(desc->sk_csum_writepos));
2829 DESC_PRINT(letoh16(desc->sk_rsvd1));
2830 #undef PRINT
2831 }
2832
2833 void
2834 sk_dump_bytes(const char *data, int len)
2835 {
2836 int c, i, j;
2837
2838 for (i = 0; i < len; i += 16) {
2839 printf("%08x ", i);
2840 c = len - i;
2841 if (c > 16) c = 16;
2842
2843 for (j = 0; j < c; j++) {
2844 printf("%02x ", data[i + j] & 0xff);
2845 if ((j & 0xf) == 7 && j > 0)
2846 printf(" ");
2847 }
2848
2849 for (; j < 16; j++)
2850 printf(" ");
2851 printf(" ");
2852
2853 for (j = 0; j < c; j++) {
2854 int ch = data[i + j] & 0xff;
2855 printf("%c", ' ' <= ch && ch <= '~' ? ch : ' ');
2856 }
2857
2858 printf("\n");
2859
2860 if (c < 16)
2861 break;
2862 }
2863 }
2864
2865 void
2866 sk_dump_mbuf(struct mbuf *m)
2867 {
2868 int count = m->m_pkthdr.len;
2869
2870 printf("m=%#lx, m->m_pkthdr.len=%#d\n", m, m->m_pkthdr.len);
2871
2872 while (count > 0 && m) {
2873 printf("m=%#lx, m->m_data=%#lx, m->m_len=%d\n",
2874 m, m->m_data, m->m_len);
2875 sk_dump_bytes(mtod(m, char *), m->m_len);
2876
2877 count -= m->m_len;
2878 m = m->m_next;
2879 }
2880 }
2881 #endif