This source file includes following definitions.
- ti_eeprom_putbyte
- ti_eeprom_getbyte
- ti_read_eeprom
- ti_mem_read
- ti_mem_write
- ti_mem_set
- ti_loadfw
- ti_cmd
- ti_cmd_ext
- ti_handle_events
- ti_alloc_jumbo_mem
- ti_jalloc
- ti_jfree
- ti_newbuf_std
- ti_newbuf_mini
- ti_newbuf_jumbo
- ti_init_rx_ring_std
- ti_free_rx_ring_std
- ti_init_rx_ring_jumbo
- ti_free_rx_ring_jumbo
- ti_init_rx_ring_mini
- ti_free_rx_ring_mini
- ti_free_tx_ring
- ti_init_tx_ring
- ti_add_mcast
- ti_del_mcast
- ti_setmulti
- ti_64bitslot_war
- ti_chipinit
- ti_gibinit
- ti_probe
- ti_attach
- ti_rxeof
- ti_txeof_tigon1
- ti_txeof_tigon2
- ti_intr
- ti_stats_update
- ti_encap_tigon1
- ti_encap_tigon2
- ti_start
- ti_init
- ti_init2
- ti_ifmedia_upd
- ti_ifmedia_sts
- ti_ioctl
- ti_watchdog
- ti_stop
- ti_shutdown
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83 #include "bpfilter.h"
84 #include "vlan.h"
85
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/sockio.h>
89 #include <sys/mbuf.h>
90 #include <sys/malloc.h>
91 #include <sys/kernel.h>
92 #include <sys/socket.h>
93 #include <sys/device.h>
94 #include <sys/queue.h>
95
96 #include <net/if.h>
97 #include <net/if_dl.h>
98 #include <net/if_types.h>
99
100 #ifdef INET
101 #include <netinet/in.h>
102 #include <netinet/in_systm.h>
103 #include <netinet/in_var.h>
104 #include <netinet/ip.h>
105 #include <netinet/if_ether.h>
106 #endif
107
108 #include <net/if_media.h>
109
110 #if NBPFILTER > 0
111 #include <net/bpf.h>
112 #endif
113
114 #if NVLAN > 0
115 #include <net/if_types.h>
116 #include <net/if_vlan_var.h>
117 #endif
118
119 #include <dev/pci/pcireg.h>
120 #include <dev/pci/pcivar.h>
121 #include <dev/pci/pcidevs.h>
122
123 #include <dev/pci/if_tireg.h>
124 #include <dev/pci/if_tivar.h>
125
126 int ti_probe(struct device *, void *, void *);
127 void ti_attach(struct device *, struct device *, void *);
128
129 struct cfattach ti_ca = {
130 sizeof(struct ti_softc), ti_probe, ti_attach
131 };
132
133 struct cfdriver ti_cd = {
134 0, "ti", DV_IFNET
135 };
136
137 void ti_txeof_tigon1(struct ti_softc *);
138 void ti_txeof_tigon2(struct ti_softc *);
139 void ti_rxeof(struct ti_softc *);
140
141 void ti_stats_update(struct ti_softc *);
142 int ti_encap_tigon1(struct ti_softc *, struct mbuf *, u_int32_t *);
143 int ti_encap_tigon2(struct ti_softc *, struct mbuf *, u_int32_t *);
144
145 int ti_intr(void *);
146 void ti_start(struct ifnet *);
147 int ti_ioctl(struct ifnet *, u_long, caddr_t);
148 void ti_init(void *);
149 void ti_init2(struct ti_softc *);
150 void ti_stop(struct ti_softc *);
151 void ti_watchdog(struct ifnet *);
152 void ti_shutdown(void *);
153 int ti_ifmedia_upd(struct ifnet *);
154 void ti_ifmedia_sts(struct ifnet *, struct ifmediareq *);
155
156 u_int32_t ti_eeprom_putbyte(struct ti_softc *, int);
157 u_int8_t ti_eeprom_getbyte(struct ti_softc *, int, u_int8_t *);
158 int ti_read_eeprom(struct ti_softc *, caddr_t, int, int);
159
160 void ti_add_mcast(struct ti_softc *, struct ether_addr *);
161 void ti_del_mcast(struct ti_softc *, struct ether_addr *);
162 void ti_setmulti(struct ti_softc *);
163
164 void ti_mem_read(struct ti_softc *, u_int32_t, u_int32_t, void *);
165 void ti_mem_write(struct ti_softc *, u_int32_t, u_int32_t, const void*);
166 void ti_mem_set(struct ti_softc *, u_int32_t, u_int32_t);
167 void ti_loadfw(struct ti_softc *);
168 void ti_cmd(struct ti_softc *, struct ti_cmd_desc *);
169 void ti_cmd_ext(struct ti_softc *, struct ti_cmd_desc *,
170 caddr_t, int);
171 void ti_handle_events(struct ti_softc *);
172 int ti_alloc_jumbo_mem(struct ti_softc *);
173 void *ti_jalloc(struct ti_softc *);
174 void ti_jfree(caddr_t, u_int, void *);
175 int ti_newbuf_std(struct ti_softc *, int, struct mbuf *, bus_dmamap_t);
176 int ti_newbuf_mini(struct ti_softc *, int, struct mbuf *, bus_dmamap_t);
177 int ti_newbuf_jumbo(struct ti_softc *, int, struct mbuf *);
178 int ti_init_rx_ring_std(struct ti_softc *);
179 void ti_free_rx_ring_std(struct ti_softc *);
180 int ti_init_rx_ring_jumbo(struct ti_softc *);
181 void ti_free_rx_ring_jumbo(struct ti_softc *);
182 int ti_init_rx_ring_mini(struct ti_softc *);
183 void ti_free_rx_ring_mini(struct ti_softc *);
184 void ti_free_tx_ring(struct ti_softc *);
185 int ti_init_tx_ring(struct ti_softc *);
186
187 int ti_64bitslot_war(struct ti_softc *);
188 int ti_chipinit(struct ti_softc *);
189 int ti_gibinit(struct ti_softc *);
190
191 const struct pci_matchid ti_devices[] = {
192 { PCI_VENDOR_NETGEAR, PCI_PRODUCT_NETGEAR_GA620 },
193 { PCI_VENDOR_NETGEAR, PCI_PRODUCT_NETGEAR_GA620T },
194 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_ACENIC },
195 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_ACENICT },
196 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C985 },
197 { PCI_VENDOR_SGI, PCI_PRODUCT_SGI_TIGON },
198 { PCI_VENDOR_DEC, PCI_PRODUCT_DEC_PN9000SX }
199 };
200
201
202
203
204 u_int32_t
205 ti_eeprom_putbyte(struct ti_softc *sc, int byte)
206 {
207 int i, ack = 0;
208
209
210
211
212 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN);
213
214
215
216
217 for (i = 0x80; i; i >>= 1) {
218 if (byte & i)
219 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT);
220 else
221 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT);
222 DELAY(1);
223 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
224 DELAY(1);
225 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
226 }
227
228
229
230
231 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN);
232
233
234
235
236 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
237 ack = CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN;
238 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
239
240 return (ack);
241 }
242
243
244
245
246
247
248 u_int8_t
249 ti_eeprom_getbyte(struct ti_softc *sc, int addr, u_int8_t *dest)
250 {
251 int i;
252 u_int8_t byte = 0;
253
254 EEPROM_START;
255
256
257
258
259 if (ti_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) {
260 printf("%s: failed to send write command, status: %x\n",
261 sc->sc_dv.dv_xname, CSR_READ_4(sc, TI_MISC_LOCAL_CTL));
262 return (1);
263 }
264
265
266
267
268 if (ti_eeprom_putbyte(sc, (addr >> 8) & 0xFF)) {
269 printf("%s: failed to send address, status: %x\n",
270 sc->sc_dv.dv_xname, CSR_READ_4(sc, TI_MISC_LOCAL_CTL));
271 return (1);
272 }
273
274
275
276 if (ti_eeprom_putbyte(sc, addr & 0xFF)) {
277 printf("%s: failed to send address, status: %x\n",
278 sc->sc_dv.dv_xname, CSR_READ_4(sc, TI_MISC_LOCAL_CTL));
279 return (1);
280 }
281
282 EEPROM_STOP;
283 EEPROM_START;
284
285
286
287 if (ti_eeprom_putbyte(sc, EEPROM_CTL_READ)) {
288 printf("%s: failed to send read command, status: %x\n",
289 sc->sc_dv.dv_xname, CSR_READ_4(sc, TI_MISC_LOCAL_CTL));
290 return (1);
291 }
292
293
294
295
296 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN);
297 for (i = 0x80; i; i >>= 1) {
298 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
299 DELAY(1);
300 if (CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN)
301 byte |= i;
302 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
303 DELAY(1);
304 }
305
306 EEPROM_STOP;
307
308
309
310
311
312 *dest = byte;
313
314 return (0);
315 }
316
317
318
319
320 int
321 ti_read_eeprom(struct ti_softc *sc, caddr_t dest, int off, int cnt)
322 {
323 int err = 0, i;
324 u_int8_t byte = 0;
325
326 for (i = 0; i < cnt; i++) {
327 err = ti_eeprom_getbyte(sc, off + i, &byte);
328 if (err)
329 break;
330 *(dest + i) = byte;
331 }
332
333 return (err ? 1 : 0);
334 }
335
336
337
338
339
340 void
341 ti_mem_read(struct ti_softc *sc, u_int32_t addr, u_int32_t len, void *buf)
342 {
343 int segptr, segsize, cnt;
344 caddr_t ptr;
345
346 segptr = addr;
347 cnt = len;
348 ptr = buf;
349
350 while(cnt) {
351 if (cnt < TI_WINLEN)
352 segsize = cnt;
353 else
354 segsize = TI_WINLEN - (segptr % TI_WINLEN);
355 CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1)));
356 bus_space_read_region_4(sc->ti_btag, sc->ti_bhandle,
357 TI_WINDOW + (segptr & (TI_WINLEN - 1)), (u_int32_t *)ptr,
358 segsize / 4);
359 ptr += segsize;
360 segptr += segsize;
361 cnt -= segsize;
362 }
363 }
364
365
366
367
368
369 void
370 ti_mem_write(struct ti_softc *sc, u_int32_t addr, u_int32_t len,
371 const void *buf)
372 {
373 int segptr, segsize, cnt;
374 const char *ptr;
375
376 segptr = addr;
377 cnt = len;
378 ptr = buf;
379
380 while(cnt) {
381 if (cnt < TI_WINLEN)
382 segsize = cnt;
383 else
384 segsize = TI_WINLEN - (segptr % TI_WINLEN);
385 CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1)));
386 bus_space_write_region_4(sc->ti_btag, sc->ti_bhandle,
387 TI_WINDOW + (segptr & (TI_WINLEN - 1)), (u_int32_t *)ptr,
388 segsize / 4);
389 ptr += segsize;
390 segptr += segsize;
391 cnt -= segsize;
392 }
393 }
394
395
396
397
398
399 void
400 ti_mem_set(struct ti_softc *sc, u_int32_t addr, u_int32_t len)
401 {
402 int segptr, segsize, cnt;
403
404 segptr = addr;
405 cnt = len;
406
407 while(cnt) {
408 if (cnt < TI_WINLEN)
409 segsize = cnt;
410 else
411 segsize = TI_WINLEN - (segptr % TI_WINLEN);
412 CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1)));
413 bus_space_set_region_4(sc->ti_btag, sc->ti_bhandle,
414 TI_WINDOW + (segptr & (TI_WINLEN - 1)), 0, segsize / 4);
415 segptr += segsize;
416 cnt -= segsize;
417 }
418 }
419
420
421
422
423
424
425 void
426 ti_loadfw(struct ti_softc *sc)
427 {
428 struct tigon_firmware *tf;
429 u_char *buf = NULL;
430 size_t buflen;
431 char *name;
432 int error;
433
434 switch(sc->ti_hwrev) {
435 case TI_HWREV_TIGON:
436 name = "tigon1";
437 break;
438 case TI_HWREV_TIGON_II:
439 name = "tigon2";
440 break;
441 default:
442 printf("%s: can't load firmware: unknown hardware rev\n",
443 sc->sc_dv.dv_xname);
444 return;
445 }
446
447 error = loadfirmware(name, &buf, &buflen);
448 if (error)
449 return;
450 tf = (struct tigon_firmware *)buf;
451 if (tf->FwReleaseMajor != TI_FIRMWARE_MAJOR ||
452 tf->FwReleaseMinor != TI_FIRMWARE_MINOR ||
453 tf->FwReleaseFix != TI_FIRMWARE_FIX) {
454 printf("%s: firmware revision mismatch; want "
455 "%d.%d.%d, got %d.%d.%d\n", sc->sc_dv.dv_xname,
456 TI_FIRMWARE_MAJOR, TI_FIRMWARE_MINOR,
457 TI_FIRMWARE_FIX, tf->FwReleaseMajor,
458 tf->FwReleaseMinor, tf->FwReleaseFix);
459 free(buf, M_DEVBUF);
460 return;
461 }
462 ti_mem_write(sc, tf->FwTextAddr, tf->FwTextLen,
463 (caddr_t)&tf->data[tf->FwTextOffset]);
464 ti_mem_write(sc, tf->FwRodataAddr, tf->FwRodataLen,
465 (caddr_t)&tf->data[tf->FwRodataOffset]);
466 ti_mem_write(sc, tf->FwDataAddr, tf->FwDataLen,
467 (caddr_t)&tf->data[tf->FwDataOffset]);
468 ti_mem_set(sc, tf->FwBssAddr, tf->FwBssLen);
469 ti_mem_set(sc, tf->FwSbssAddr, tf->FwSbssLen);
470 CSR_WRITE_4(sc, TI_CPU_PROGRAM_COUNTER, tf->FwStartAddr);
471 free(buf, M_DEVBUF);
472 }
473
474
475
476
477 void
478 ti_cmd(struct ti_softc *sc, struct ti_cmd_desc *cmd)
479 {
480 u_int32_t index;
481
482 index = sc->ti_cmd_saved_prodidx;
483 CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(u_int32_t *)(cmd));
484 TI_INC(index, TI_CMD_RING_CNT);
485 CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index);
486 sc->ti_cmd_saved_prodidx = index;
487 }
488
489
490
491
492
493 void
494 ti_cmd_ext(struct ti_softc *sc, struct ti_cmd_desc *cmd, caddr_t arg,
495 int len)
496 {
497 u_int32_t index;
498 int i;
499
500 index = sc->ti_cmd_saved_prodidx;
501 CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(u_int32_t *)(cmd));
502 TI_INC(index, TI_CMD_RING_CNT);
503 for (i = 0; i < len; i++) {
504 CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4),
505 *(u_int32_t *)(&arg[i * 4]));
506 TI_INC(index, TI_CMD_RING_CNT);
507 }
508 CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index);
509 sc->ti_cmd_saved_prodidx = index;
510 }
511
512
513
514
515 void
516 ti_handle_events(struct ti_softc *sc)
517 {
518 struct ti_event_desc *e;
519
520 if (sc->ti_rdata->ti_event_ring == NULL)
521 return;
522
523 while (sc->ti_ev_saved_considx != sc->ti_ev_prodidx.ti_idx) {
524 e = &sc->ti_rdata->ti_event_ring[sc->ti_ev_saved_considx];
525 switch (TI_EVENT_EVENT(e)) {
526 case TI_EV_LINKSTAT_CHANGED:
527 sc->ti_linkstat = TI_EVENT_CODE(e);
528 break;
529 case TI_EV_ERROR:
530 if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_INVAL_CMD)
531 printf("%s: invalid command\n",
532 sc->sc_dv.dv_xname);
533 else if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_UNIMP_CMD)
534 printf("%s: unknown command\n",
535 sc->sc_dv.dv_xname);
536 else if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_BADCFG)
537 printf("%s: bad config data\n",
538 sc->sc_dv.dv_xname);
539 break;
540 case TI_EV_FIRMWARE_UP:
541 ti_init2(sc);
542 break;
543 case TI_EV_STATS_UPDATED:
544 ti_stats_update(sc);
545 break;
546 case TI_EV_RESET_JUMBO_RING:
547 case TI_EV_MCAST_UPDATED:
548
549 break;
550 default:
551 printf("%s: unknown event: %d\n", sc->sc_dv.dv_xname,
552 TI_EVENT_EVENT(e));
553 break;
554 }
555
556 TI_INC(sc->ti_ev_saved_considx, TI_EVENT_RING_CNT);
557 CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, sc->ti_ev_saved_considx);
558 }
559 }
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584 int
585 ti_alloc_jumbo_mem(struct ti_softc *sc)
586 {
587 caddr_t ptr, kva;
588 bus_dma_segment_t seg;
589 int i, rseg, state, error;
590 struct ti_jpool_entry *entry;
591
592 state = error = 0;
593
594
595 if (bus_dmamem_alloc(sc->sc_dmatag, TI_JMEM, PAGE_SIZE, 0,
596 &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
597 printf("%s: can't alloc rx buffers\n", sc->sc_dv.dv_xname);
598 return (ENOBUFS);
599 }
600
601 state = 1;
602 if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg, TI_JMEM, &kva,
603 BUS_DMA_NOWAIT)) {
604 printf("%s: can't map dma buffers (%d bytes)\n",
605 sc->sc_dv.dv_xname, TI_JMEM);
606 error = ENOBUFS;
607 goto out;
608 }
609
610 state = 2;
611 if (bus_dmamap_create(sc->sc_dmatag, TI_JMEM, 1, TI_JMEM, 0,
612 BUS_DMA_NOWAIT, &sc->ti_cdata.ti_rx_jumbo_map)) {
613 printf("%s: can't create dma map\n", sc->sc_dv.dv_xname);
614 error = ENOBUFS;
615 goto out;
616 }
617
618 state = 3;
619 if (bus_dmamap_load(sc->sc_dmatag, sc->ti_cdata.ti_rx_jumbo_map, kva,
620 TI_JMEM, NULL, BUS_DMA_NOWAIT)) {
621 printf("%s: can't load dma map\n", sc->sc_dv.dv_xname);
622 error = ENOBUFS;
623 goto out;
624 }
625
626 state = 4;
627 sc->ti_cdata.ti_jumbo_buf = (caddr_t)kva;
628
629 SLIST_INIT(&sc->ti_jfree_listhead);
630 SLIST_INIT(&sc->ti_jinuse_listhead);
631
632
633
634
635
636 ptr = sc->ti_cdata.ti_jumbo_buf;
637 for (i = 0; i < TI_JSLOTS; i++) {
638 sc->ti_cdata.ti_jslots[i].ti_buf = ptr;
639 sc->ti_cdata.ti_jslots[i].ti_inuse = 0;
640 ptr += TI_JLEN;
641 entry = malloc(sizeof(struct ti_jpool_entry),
642 M_DEVBUF, M_NOWAIT);
643 if (entry == NULL) {
644 sc->ti_cdata.ti_jumbo_buf = NULL;
645 printf("%s: no memory for jumbo buffer queue\n",
646 sc->sc_dv.dv_xname);
647 error = ENOBUFS;
648 goto out;
649 }
650 entry->slot = i;
651 SLIST_INSERT_HEAD(&sc->ti_jfree_listhead, entry, jpool_entries);
652 }
653 out:
654 if (error != 0) {
655 switch (state) {
656 case 4:
657 bus_dmamap_unload(sc->sc_dmatag,
658 sc->ti_cdata.ti_rx_jumbo_map);
659 case 3:
660 bus_dmamap_destroy(sc->sc_dmatag,
661 sc->ti_cdata.ti_rx_jumbo_map);
662 case 2:
663 bus_dmamem_unmap(sc->sc_dmatag, kva, TI_JMEM);
664 case 1:
665 bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
666 break;
667 default:
668 break;
669 }
670 }
671
672 return (error);
673 }
674
675
676
677
678 void *
679 ti_jalloc(struct ti_softc *sc)
680 {
681 struct ti_jpool_entry *entry;
682
683 entry = SLIST_FIRST(&sc->ti_jfree_listhead);
684
685 if (entry == NULL)
686 return (NULL);
687
688 SLIST_REMOVE_HEAD(&sc->ti_jfree_listhead, jpool_entries);
689 SLIST_INSERT_HEAD(&sc->ti_jinuse_listhead, entry, jpool_entries);
690 sc->ti_cdata.ti_jslots[entry->slot].ti_inuse = 1;
691 return (sc->ti_cdata.ti_jslots[entry->slot].ti_buf);
692 }
693
694
695
696
697 void
698 ti_jfree(caddr_t buf, u_int size, void *arg)
699 {
700 struct ti_softc *sc;
701 int i;
702 struct ti_jpool_entry *entry;
703
704
705 sc = (struct ti_softc *)arg;
706
707 if (sc == NULL)
708 panic("ti_jfree: can't find softc pointer!");
709
710
711 i = ((vaddr_t)buf - (vaddr_t)sc->ti_cdata.ti_jumbo_buf) / TI_JLEN;
712
713 if ((i < 0) || (i >= TI_JSLOTS))
714 panic("ti_jfree: asked to free buffer that we don't manage!");
715 else if (sc->ti_cdata.ti_jslots[i].ti_inuse == 0)
716 panic("ti_jfree: buffer already free!");
717
718 sc->ti_cdata.ti_jslots[i].ti_inuse--;
719 if(sc->ti_cdata.ti_jslots[i].ti_inuse == 0) {
720 entry = SLIST_FIRST(&sc->ti_jinuse_listhead);
721 if (entry == NULL)
722 panic("ti_jfree: buffer not in use!");
723 entry->slot = i;
724 SLIST_REMOVE_HEAD(&sc->ti_jinuse_listhead, jpool_entries);
725 SLIST_INSERT_HEAD(&sc->ti_jfree_listhead,
726 entry, jpool_entries);
727 }
728 }
729
730
731
732
733 int
734 ti_newbuf_std(struct ti_softc *sc, int i, struct mbuf *m,
735 bus_dmamap_t dmamap)
736 {
737 struct mbuf *m_new = NULL;
738 struct ti_rx_desc *r;
739
740 if (dmamap == NULL) {
741
742
743 if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1, MCLBYTES,
744 0, BUS_DMA_NOWAIT, &dmamap)) {
745 printf("%s: can't create recv map\n",
746 sc->sc_dv.dv_xname);
747 return (ENOMEM);
748 }
749 } else if (m == NULL)
750 bus_dmamap_unload(sc->sc_dmatag, dmamap);
751
752 sc->ti_cdata.ti_rx_std_map[i] = dmamap;
753
754 if (m == NULL) {
755 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
756 if (m_new == NULL)
757 return (ENOBUFS);
758
759 MCLGET(m_new, M_DONTWAIT);
760 if (!(m_new->m_flags & M_EXT)) {
761 m_freem(m_new);
762 return (ENOBUFS);
763 }
764 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
765
766 m_adj(m_new, ETHER_ALIGN);
767
768 if (bus_dmamap_load_mbuf(sc->sc_dmatag, dmamap, m_new,
769 BUS_DMA_NOWAIT))
770 return (ENOBUFS);
771
772 } else {
773
774
775
776
777
778 m_new = m;
779 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
780 m_new->m_data = m_new->m_ext.ext_buf;
781 m_adj(m_new, ETHER_ALIGN);
782 }
783
784 sc->ti_cdata.ti_rx_std_chain[i] = m_new;
785 r = &sc->ti_rdata->ti_rx_std_ring[i];
786 TI_HOSTADDR(r->ti_addr) = dmamap->dm_segs[0].ds_addr;
787 r->ti_type = TI_BDTYPE_RECV_BD;
788 r->ti_flags = TI_BDFLAG_IP_CKSUM;
789 r->ti_len = dmamap->dm_segs[0].ds_len;
790 r->ti_idx = i;
791
792 if ((dmamap->dm_segs[0].ds_addr & ~(MCLBYTES - 1)) !=
793 ((dmamap->dm_segs[0].ds_addr + dmamap->dm_segs[0].ds_len - 1) &
794 ~(MCLBYTES - 1)))
795 panic("%s: overwritten!!!", sc->sc_dv.dv_xname);
796
797 return (0);
798 }
799
800
801
802
803
804 int
805 ti_newbuf_mini(struct ti_softc *sc, int i, struct mbuf *m,
806 bus_dmamap_t dmamap)
807 {
808 struct mbuf *m_new = NULL;
809 struct ti_rx_desc *r;
810
811 if (dmamap == NULL) {
812
813
814 if (bus_dmamap_create(sc->sc_dmatag, MHLEN, 1, MHLEN,
815 0, BUS_DMA_NOWAIT, &dmamap)) {
816 printf("%s: can't create recv map\n",
817 sc->sc_dv.dv_xname);
818 return (ENOMEM);
819 }
820 } else if (m == NULL)
821 bus_dmamap_unload(sc->sc_dmatag, dmamap);
822
823 sc->ti_cdata.ti_rx_mini_map[i] = dmamap;
824
825 if (m == NULL) {
826 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
827 if (m_new == NULL)
828 return (ENOBUFS);
829 m_new->m_len = m_new->m_pkthdr.len = MHLEN;
830 m_adj(m_new, ETHER_ALIGN);
831
832 if (bus_dmamap_load_mbuf(sc->sc_dmatag, dmamap, m_new,
833 BUS_DMA_NOWAIT))
834 return (ENOBUFS);
835
836 } else {
837
838
839
840
841
842 m_new = m;
843 m_new->m_data = m_new->m_pktdat;
844 m_new->m_len = m_new->m_pkthdr.len = MHLEN;
845 }
846
847 r = &sc->ti_rdata->ti_rx_mini_ring[i];
848 sc->ti_cdata.ti_rx_mini_chain[i] = m_new;
849 TI_HOSTADDR(r->ti_addr) = dmamap->dm_segs[0].ds_addr;
850 r->ti_type = TI_BDTYPE_RECV_BD;
851 r->ti_flags = TI_BDFLAG_MINI_RING | TI_BDFLAG_IP_CKSUM;
852 r->ti_len = dmamap->dm_segs[0].ds_len;
853 r->ti_idx = i;
854
855 return (0);
856 }
857
858
859
860
861
862 int
863 ti_newbuf_jumbo(struct ti_softc *sc, int i, struct mbuf *m)
864 {
865 struct mbuf *m_new = NULL;
866 struct ti_rx_desc *r;
867
868 if (m == NULL) {
869 caddr_t buf = NULL;
870
871
872 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
873 if (m_new == NULL)
874 return (ENOBUFS);
875
876
877 buf = ti_jalloc(sc);
878 if (buf == NULL) {
879 m_freem(m_new);
880 return (ENOBUFS);
881 }
882
883
884 m_new->m_len = m_new->m_pkthdr.len = TI_JUMBO_FRAMELEN;
885 MEXTADD(m_new, buf, TI_JUMBO_FRAMELEN, 0, ti_jfree, sc);
886 } else {
887
888
889
890
891
892 m_new = m;
893 m_new->m_data = m_new->m_ext.ext_buf;
894 m_new->m_ext.ext_size = TI_JUMBO_FRAMELEN;
895 }
896
897 m_adj(m_new, ETHER_ALIGN);
898
899 r = &sc->ti_rdata->ti_rx_jumbo_ring[i];
900 sc->ti_cdata.ti_rx_jumbo_chain[i] = m_new;
901 TI_HOSTADDR(r->ti_addr) = TI_JUMBO_DMA_ADDR(sc, m_new);
902 r->ti_type = TI_BDTYPE_RECV_JUMBO_BD;
903 r->ti_flags = TI_BDFLAG_JUMBO_RING | TI_BDFLAG_IP_CKSUM;
904 r->ti_len = m_new->m_len;
905 r->ti_idx = i;
906
907 return (0);
908 }
909
910
911
912
913
914
915
916 int
917 ti_init_rx_ring_std(struct ti_softc *sc)
918 {
919 int i;
920 struct ti_cmd_desc cmd;
921
922 for (i = 0; i < TI_SSLOTS; i++) {
923 if (ti_newbuf_std(sc, i, NULL, 0) == ENOBUFS)
924 return (ENOBUFS);
925 }
926
927 TI_UPDATE_STDPROD(sc, i - 1);
928 sc->ti_std = i - 1;
929
930 return (0);
931 }
932
933 void
934 ti_free_rx_ring_std(struct ti_softc *sc)
935 {
936 int i;
937
938 for (i = 0; i < TI_STD_RX_RING_CNT; i++) {
939 if (sc->ti_cdata.ti_rx_std_chain[i] != NULL) {
940 m_freem(sc->ti_cdata.ti_rx_std_chain[i]);
941 sc->ti_cdata.ti_rx_std_chain[i] = NULL;
942 bus_dmamap_destroy(sc->sc_dmatag,
943 sc->ti_cdata.ti_rx_std_map[i]);
944 sc->ti_cdata.ti_rx_std_map[i] = 0;
945 }
946 bzero((char *)&sc->ti_rdata->ti_rx_std_ring[i],
947 sizeof(struct ti_rx_desc));
948 }
949 }
950
951 int
952 ti_init_rx_ring_jumbo(struct ti_softc *sc)
953 {
954 int i;
955 struct ti_cmd_desc cmd;
956
957 for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) {
958 if (ti_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
959 return (ENOBUFS);
960 };
961
962 TI_UPDATE_JUMBOPROD(sc, i - 1);
963 sc->ti_jumbo = i - 1;
964
965 return (0);
966 }
967
968 void
969 ti_free_rx_ring_jumbo(struct ti_softc *sc)
970 {
971 int i;
972
973 for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) {
974 if (sc->ti_cdata.ti_rx_jumbo_chain[i] != NULL) {
975 m_freem(sc->ti_cdata.ti_rx_jumbo_chain[i]);
976 sc->ti_cdata.ti_rx_jumbo_chain[i] = NULL;
977 }
978 bzero((char *)&sc->ti_rdata->ti_rx_jumbo_ring[i],
979 sizeof(struct ti_rx_desc));
980 }
981 }
982
983 int
984 ti_init_rx_ring_mini(struct ti_softc *sc)
985 {
986 int i;
987
988 for (i = 0; i < TI_MSLOTS; i++) {
989 if (ti_newbuf_mini(sc, i, NULL, 0) == ENOBUFS)
990 return (ENOBUFS);
991 };
992
993 TI_UPDATE_MINIPROD(sc, i - 1);
994 sc->ti_mini = i - 1;
995
996 return (0);
997 }
998
999 void
1000 ti_free_rx_ring_mini(struct ti_softc *sc)
1001 {
1002 int i;
1003
1004 for (i = 0; i < TI_MINI_RX_RING_CNT; i++) {
1005 if (sc->ti_cdata.ti_rx_mini_chain[i] != NULL) {
1006 m_freem(sc->ti_cdata.ti_rx_mini_chain[i]);
1007 sc->ti_cdata.ti_rx_mini_chain[i] = NULL;
1008 bus_dmamap_destroy(sc->sc_dmatag,
1009 sc->ti_cdata.ti_rx_mini_map[i]);
1010 sc->ti_cdata.ti_rx_mini_map[i] = 0;
1011 }
1012 bzero((char *)&sc->ti_rdata->ti_rx_mini_ring[i],
1013 sizeof(struct ti_rx_desc));
1014 }
1015 }
1016
1017 void
1018 ti_free_tx_ring(struct ti_softc *sc)
1019 {
1020 int i;
1021 struct ti_txmap_entry *entry;
1022
1023 if (sc->ti_rdata->ti_tx_ring == NULL)
1024 return;
1025
1026 for (i = 0; i < TI_TX_RING_CNT; i++) {
1027 if (sc->ti_cdata.ti_tx_chain[i] != NULL) {
1028 m_freem(sc->ti_cdata.ti_tx_chain[i]);
1029 sc->ti_cdata.ti_tx_chain[i] = NULL;
1030 SLIST_INSERT_HEAD(&sc->ti_tx_map_listhead,
1031 sc->ti_cdata.ti_tx_map[i], link);
1032 sc->ti_cdata.ti_tx_map[i] = 0;
1033 }
1034 bzero((char *)&sc->ti_rdata->ti_tx_ring[i],
1035 sizeof(struct ti_tx_desc));
1036 }
1037
1038 while ((entry = SLIST_FIRST(&sc->ti_tx_map_listhead))) {
1039 SLIST_REMOVE_HEAD(&sc->ti_tx_map_listhead, link);
1040 bus_dmamap_destroy(sc->sc_dmatag, entry->dmamap);
1041 free(entry, M_DEVBUF);
1042 }
1043 }
1044
1045 int
1046 ti_init_tx_ring(struct ti_softc *sc)
1047 {
1048 int i;
1049 bus_dmamap_t dmamap;
1050 struct ti_txmap_entry *entry;
1051
1052 sc->ti_txcnt = 0;
1053 sc->ti_tx_saved_considx = 0;
1054 sc->ti_tx_saved_prodidx = 0;
1055 CSR_WRITE_4(sc, TI_MB_SENDPROD_IDX, 0);
1056
1057 SLIST_INIT(&sc->ti_tx_map_listhead);
1058 for (i = 0; i < TI_TX_RING_CNT; i++) {
1059 if (bus_dmamap_create(sc->sc_dmatag, TI_JUMBO_FRAMELEN,
1060 TI_NTXSEG, MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap))
1061 return (ENOBUFS);
1062
1063 entry = malloc(sizeof(*entry), M_DEVBUF, M_NOWAIT);
1064 if (!entry) {
1065 bus_dmamap_destroy(sc->sc_dmatag, dmamap);
1066 return (ENOBUFS);
1067 }
1068 entry->dmamap = dmamap;
1069 SLIST_INSERT_HEAD(&sc->ti_tx_map_listhead, entry, link);
1070 }
1071
1072 return (0);
1073 }
1074
1075
1076
1077
1078
1079
1080 void
1081 ti_add_mcast(struct ti_softc *sc, struct ether_addr *addr)
1082 {
1083 struct ti_cmd_desc cmd;
1084 u_int16_t *m;
1085 u_int32_t ext[2] = {0, 0};
1086
1087 m = (u_int16_t *)&addr->ether_addr_octet[0];
1088
1089 switch(sc->ti_hwrev) {
1090 case TI_HWREV_TIGON:
1091 CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0]));
1092 CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2]));
1093 TI_DO_CMD(TI_CMD_ADD_MCAST_ADDR, 0, 0);
1094 break;
1095 case TI_HWREV_TIGON_II:
1096 ext[0] = htons(m[0]);
1097 ext[1] = (htons(m[1]) << 16) | htons(m[2]);
1098 TI_DO_CMD_EXT(TI_CMD_EXT_ADD_MCAST, 0, 0, (caddr_t)&ext, 2);
1099 break;
1100 default:
1101 printf("%s: unknown hwrev\n", sc->sc_dv.dv_xname);
1102 break;
1103 }
1104 }
1105
1106 void
1107 ti_del_mcast(struct ti_softc *sc, struct ether_addr *addr)
1108 {
1109 struct ti_cmd_desc cmd;
1110 u_int16_t *m;
1111 u_int32_t ext[2] = {0, 0};
1112
1113 m = (u_int16_t *)&addr->ether_addr_octet[0];
1114
1115 switch(sc->ti_hwrev) {
1116 case TI_HWREV_TIGON:
1117 CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0]));
1118 CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2]));
1119 TI_DO_CMD(TI_CMD_DEL_MCAST_ADDR, 0, 0);
1120 break;
1121 case TI_HWREV_TIGON_II:
1122 ext[0] = htons(m[0]);
1123 ext[1] = (htons(m[1]) << 16) | htons(m[2]);
1124 TI_DO_CMD_EXT(TI_CMD_EXT_DEL_MCAST, 0, 0, (caddr_t)&ext, 2);
1125 break;
1126 default:
1127 printf("%s: unknown hwrev\n", sc->sc_dv.dv_xname);
1128 break;
1129 }
1130 }
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146 void
1147 ti_setmulti(struct ti_softc *sc)
1148 {
1149 struct ifnet *ifp;
1150 struct arpcom *ac = &sc->arpcom;
1151 struct ether_multi *enm;
1152 struct ether_multistep step;
1153 struct ti_cmd_desc cmd;
1154 struct ti_mc_entry *mc;
1155 u_int32_t intrs;
1156
1157 ifp = &sc->arpcom.ac_if;
1158
1159 allmulti:
1160 if (ifp->if_flags & IFF_ALLMULTI) {
1161 TI_DO_CMD(TI_CMD_SET_ALLMULTI, TI_CMD_CODE_ALLMULTI_ENB, 0);
1162 return;
1163 } else {
1164 TI_DO_CMD(TI_CMD_SET_ALLMULTI, TI_CMD_CODE_ALLMULTI_DIS, 0);
1165 }
1166
1167
1168 intrs = CSR_READ_4(sc, TI_MB_HOSTINTR);
1169 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1);
1170
1171
1172 while (SLIST_FIRST(&sc->ti_mc_listhead) != NULL) {
1173 mc = SLIST_FIRST(&sc->ti_mc_listhead);
1174 ti_del_mcast(sc, &mc->mc_addr);
1175 SLIST_REMOVE_HEAD(&sc->ti_mc_listhead, mc_entries);
1176 free(mc, M_DEVBUF);
1177 }
1178
1179
1180 ETHER_FIRST_MULTI(step, ac, enm);
1181 while (enm != NULL) {
1182 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1183
1184 CSR_WRITE_4(sc, TI_MB_HOSTINTR, intrs);
1185
1186 ifp->if_flags |= IFF_ALLMULTI;
1187 goto allmulti;
1188 }
1189 mc = malloc(sizeof(struct ti_mc_entry), M_DEVBUF, M_NOWAIT);
1190 if (mc == NULL)
1191 panic("ti_setmulti");
1192 bcopy(enm->enm_addrlo, (char *)&mc->mc_addr, ETHER_ADDR_LEN);
1193 SLIST_INSERT_HEAD(&sc->ti_mc_listhead, mc, mc_entries);
1194 ti_add_mcast(sc, &mc->mc_addr);
1195 ETHER_NEXT_MULTI(step, enm);
1196 }
1197
1198
1199 CSR_WRITE_4(sc, TI_MB_HOSTINTR, intrs);
1200 }
1201
1202
1203
1204
1205
1206
1207
1208 int
1209 ti_64bitslot_war(struct ti_softc *sc)
1210 {
1211 if (!(CSR_READ_4(sc, TI_PCI_STATE) & TI_PCISTATE_32BIT_BUS)) {
1212 CSR_WRITE_4(sc, 0x600, 0);
1213 CSR_WRITE_4(sc, 0x604, 0);
1214 CSR_WRITE_4(sc, 0x600, 0x5555AAAA);
1215 if (CSR_READ_4(sc, 0x604) == 0x5555AAAA) {
1216 if (sc->ti_hwrev == TI_HWREV_TIGON)
1217 return (EINVAL);
1218 else {
1219 TI_SETBIT(sc, TI_PCI_STATE,
1220 TI_PCISTATE_32BIT_BUS);
1221 return (0);
1222 }
1223 }
1224 }
1225
1226 return (0);
1227 }
1228
1229
1230
1231
1232
1233 int
1234 ti_chipinit(struct ti_softc *sc)
1235 {
1236 u_int32_t cacheline;
1237 u_int32_t pci_writemax = 0;
1238 u_int32_t chip_rev;
1239
1240
1241 sc->ti_linkstat = TI_EV_CODE_LINK_DOWN;
1242
1243
1244 CSR_WRITE_4(sc, TI_MISC_HOST_CTL,
1245 TI_MHC_LITTLEENDIAN_INIT | (TI_MHC_LITTLEENDIAN_INIT << 24));
1246
1247
1248 if (CSR_READ_4(sc, TI_CPU_STATE) & TI_CPUSTATE_ROMFAIL) {
1249 printf("%s: board self-diagnostics failed!\n",
1250 sc->sc_dv.dv_xname);
1251 return (ENODEV);
1252 }
1253
1254
1255 TI_SETBIT(sc, TI_CPU_STATE, TI_CPUSTATE_HALT);
1256
1257
1258 chip_rev = CSR_READ_4(sc, TI_MISC_HOST_CTL) & TI_MHC_CHIP_REV_MASK;
1259 switch(chip_rev) {
1260 case TI_REV_TIGON_I:
1261 sc->ti_hwrev = TI_HWREV_TIGON;
1262 break;
1263 case TI_REV_TIGON_II:
1264 sc->ti_hwrev = TI_HWREV_TIGON_II;
1265 break;
1266 default:
1267 printf("\n");
1268 printf("%s: unsupported chip revision: %x\n",
1269 chip_rev, sc->sc_dv.dv_xname);
1270 return (ENODEV);
1271 }
1272
1273
1274 if (sc->ti_hwrev == TI_HWREV_TIGON_II) {
1275 TI_SETBIT(sc, TI_CPU_CTL_B, TI_CPUSTATE_HALT);
1276 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_SRAM_BANK_512K);
1277 TI_SETBIT(sc, TI_MISC_CONF, TI_MCR_SRAM_SYNCHRONOUS);
1278 }
1279
1280
1281 CSR_WRITE_4(sc, TI_PCI_STATE, TI_PCI_READ_CMD|TI_PCI_WRITE_CMD);
1282 if (sc->ti_hwrev == TI_HWREV_TIGON_II)
1283 TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_USE_MEM_RD_MULT);
1284
1285
1286 TI_CLRBIT(sc, TI_PCI_STATE, (TI_PCISTATE_WRITE_MAXDMA|
1287 TI_PCISTATE_READ_MAXDMA));
1288
1289
1290 cacheline = CSR_READ_4(sc, TI_PCI_BIST) & 0xFF;
1291
1292
1293
1294
1295
1296
1297
1298 if (CSR_READ_4(sc, TI_PCI_CMDSTAT) & PCI_COMMAND_INVALIDATE_ENABLE) {
1299 switch(cacheline) {
1300 case 1:
1301 case 4:
1302 case 8:
1303 case 16:
1304 case 32:
1305 case 64:
1306 break;
1307 default:
1308
1309 CSR_WRITE_4(sc, TI_PCI_CMDSTAT, CSR_READ_4(sc,
1310 TI_PCI_CMDSTAT) & ~PCI_COMMAND_INVALIDATE_ENABLE);
1311 break;
1312 }
1313 }
1314
1315 #ifdef __brokenalpha__
1316
1317
1318
1319
1320
1321
1322
1323 TI_SETBIT(sc, TI_PCI_STATE, pci_writemax|TI_PCI_READMAX_1024);
1324 #else
1325 TI_SETBIT(sc, TI_PCI_STATE, pci_writemax);
1326 #endif
1327
1328
1329 TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_MINDMA);
1330
1331
1332 CSR_WRITE_4(sc, TI_GCR_OPMODE, TI_DMA_SWAP_OPTIONS |
1333 TI_OPMODE_WARN_ENB | TI_OPMODE_FATAL_ENB |
1334 TI_OPMODE_DONT_FRAG_JUMBO);
1335
1336
1337 CSR_WRITE_4(sc, TI_GCR_DMA_WRITECFG, TI_DMA_STATE_THRESH_8W);
1338 CSR_WRITE_4(sc, TI_GCR_DMA_READCFG, TI_DMA_STATE_THRESH_8W);
1339
1340 if (ti_64bitslot_war(sc)) {
1341 printf("%s: bios thinks we're in a 64 bit slot, "
1342 "but we aren't", sc->sc_dv.dv_xname);
1343 return (EINVAL);
1344 }
1345
1346 return (0);
1347 }
1348
1349
1350
1351
1352
1353 int
1354 ti_gibinit(struct ti_softc *sc)
1355 {
1356 struct ti_rcb *rcb;
1357 int i;
1358 struct ifnet *ifp;
1359
1360 ifp = &sc->arpcom.ac_if;
1361
1362
1363 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1);
1364
1365
1366
1367
1368
1369
1370
1371 CSR_WRITE_4(sc, TI_GCR_GENINFO_HI, 0);
1372 CSR_WRITE_4(sc, TI_GCR_GENINFO_LO,
1373 TI_RING_DMA_ADDR(sc, ti_info) & 0xffffffff);
1374
1375
1376 ti_loadfw(sc);
1377
1378
1379
1380
1381 rcb = &sc->ti_rdata->ti_info.ti_ev_rcb;
1382
1383 TI_HOSTADDR(rcb->ti_hostaddr) = TI_RING_DMA_ADDR(sc, ti_event_ring);
1384 rcb->ti_flags = 0;
1385 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_ev_prodidx_ptr) =
1386 TI_RING_DMA_ADDR(sc, ti_ev_prodidx_r);
1387 sc->ti_ev_prodidx.ti_idx = 0;
1388 CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, 0);
1389 sc->ti_ev_saved_considx = 0;
1390
1391
1392 rcb = &sc->ti_rdata->ti_info.ti_cmd_rcb;
1393
1394 TI_HOSTADDR(rcb->ti_hostaddr) = TI_GCR_NIC_ADDR(TI_GCR_CMDRING);
1395 rcb->ti_flags = 0;
1396 rcb->ti_max_len = 0;
1397 for (i = 0; i < TI_CMD_RING_CNT; i++) {
1398 CSR_WRITE_4(sc, TI_GCR_CMDRING + (i * 4), 0);
1399 }
1400 CSR_WRITE_4(sc, TI_GCR_CMDCONS_IDX, 0);
1401 CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, 0);
1402 sc->ti_cmd_saved_prodidx = 0;
1403
1404
1405
1406
1407
1408
1409 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_refresh_stats_ptr) =
1410 TI_RING_DMA_ADDR(sc, ti_info.ti_stats);
1411
1412
1413 rcb = &sc->ti_rdata->ti_info.ti_std_rx_rcb;
1414 TI_HOSTADDR(rcb->ti_hostaddr) =
1415 TI_RING_DMA_ADDR(sc, ti_rx_std_ring);
1416 rcb->ti_max_len = ETHER_MAX_LEN;
1417 rcb->ti_flags = 0;
1418 rcb->ti_flags |= TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM;
1419
1420
1421 rcb = &sc->ti_rdata->ti_info.ti_jumbo_rx_rcb;
1422 TI_HOSTADDR(rcb->ti_hostaddr) = TI_RING_DMA_ADDR(sc, ti_rx_jumbo_ring);
1423 rcb->ti_max_len = TI_JUMBO_FRAMELEN;
1424 rcb->ti_flags = 0;
1425 rcb->ti_flags |= TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM;
1426
1427
1428
1429
1430
1431
1432 rcb = &sc->ti_rdata->ti_info.ti_mini_rx_rcb;
1433 TI_HOSTADDR(rcb->ti_hostaddr) = TI_RING_DMA_ADDR(sc, ti_rx_mini_ring);
1434 rcb->ti_max_len = MHLEN - ETHER_ALIGN;
1435 if (sc->ti_hwrev == TI_HWREV_TIGON)
1436 rcb->ti_flags = TI_RCB_FLAG_RING_DISABLED;
1437 else
1438 rcb->ti_flags = 0;
1439 rcb->ti_flags |= TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM;
1440
1441
1442
1443
1444 rcb = &sc->ti_rdata->ti_info.ti_return_rcb;
1445 TI_HOSTADDR(rcb->ti_hostaddr) = TI_RING_DMA_ADDR(sc,ti_rx_return_ring);
1446 rcb->ti_flags = 0;
1447 rcb->ti_max_len = TI_RETURN_RING_CNT;
1448 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_return_prodidx_ptr) =
1449 TI_RING_DMA_ADDR(sc, ti_return_prodidx_r);
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460 CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE);
1461 bzero((char *)sc->ti_rdata->ti_tx_ring,
1462 TI_TX_RING_CNT * sizeof(struct ti_tx_desc));
1463 rcb = &sc->ti_rdata->ti_info.ti_tx_rcb;
1464 if (sc->ti_hwrev == TI_HWREV_TIGON)
1465 rcb->ti_flags = 0;
1466 else
1467 rcb->ti_flags = TI_RCB_FLAG_HOST_RING;
1468 rcb->ti_flags |= TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM;
1469 #if NVLAN > 0
1470 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
1471 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST;
1472 #endif
1473 rcb->ti_max_len = TI_TX_RING_CNT;
1474 if (sc->ti_hwrev == TI_HWREV_TIGON)
1475 TI_HOSTADDR(rcb->ti_hostaddr) = TI_TX_RING_BASE;
1476 else
1477 TI_HOSTADDR(rcb->ti_hostaddr) =
1478 TI_RING_DMA_ADDR(sc, ti_tx_ring);
1479 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_tx_considx_ptr) =
1480 TI_RING_DMA_ADDR(sc, ti_tx_considx_r);
1481
1482 TI_RING_DMASYNC(sc, ti_info, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1483
1484
1485 CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS, (sc->ti_rx_coal_ticks / 10));
1486 CSR_WRITE_4(sc, TI_GCR_TX_COAL_TICKS, sc->ti_tx_coal_ticks);
1487 CSR_WRITE_4(sc, TI_GCR_STAT_TICKS, sc->ti_stat_ticks);
1488 CSR_WRITE_4(sc, TI_GCR_RX_MAX_COAL_BD, sc->ti_rx_max_coal_bds);
1489 CSR_WRITE_4(sc, TI_GCR_TX_MAX_COAL_BD, sc->ti_tx_max_coal_bds);
1490 CSR_WRITE_4(sc, TI_GCR_TX_BUFFER_RATIO, sc->ti_tx_buf_ratio);
1491
1492
1493 CSR_WRITE_4(sc, TI_GCR_MASK_INTRS, 0);
1494 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0);
1495
1496
1497 TI_CLRBIT(sc, TI_CPU_STATE, (TI_CPUSTATE_HALT|TI_CPUSTATE_STEP));
1498
1499 return (0);
1500 }
1501
1502
1503
1504
1505
1506 int
1507 ti_probe(struct device *parent, void *match, void *aux)
1508 {
1509 return (pci_matchbyid((struct pci_attach_args *)aux, ti_devices,
1510 sizeof(ti_devices)/sizeof(ti_devices[0])));
1511 }
1512
1513 void
1514 ti_attach(struct device *parent, struct device *self, void *aux)
1515 {
1516 struct ti_softc *sc = (struct ti_softc *)self;
1517 struct pci_attach_args *pa = aux;
1518 pci_chipset_tag_t pc = pa->pa_pc;
1519 pci_intr_handle_t ih;
1520 const char *intrstr = NULL;
1521 bus_size_t size;
1522 bus_dma_segment_t seg;
1523 int rseg;
1524 struct ifnet *ifp;
1525 caddr_t kva;
1526
1527
1528
1529
1530
1531 if (pci_mapreg_map(pa, TI_PCI_LOMEM,
1532 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0,
1533 &sc->ti_btag, &sc->ti_bhandle, NULL, &size, 0)) {
1534 printf(": can't map mem space\n");
1535 return;
1536 }
1537
1538 if (pci_intr_map(pa, &ih)) {
1539 printf(": couldn't map interrupt\n");
1540 goto fail_1;
1541 }
1542 intrstr = pci_intr_string(pc, ih);
1543 sc->ti_intrhand = pci_intr_establish(pc, ih, IPL_NET, ti_intr, sc,
1544 self->dv_xname);
1545 if (sc->ti_intrhand == NULL) {
1546 printf(": couldn't establish interrupt");
1547 if (intrstr != NULL)
1548 printf(" at %s", intrstr);
1549 printf("\n");
1550 goto fail_1;
1551 }
1552
1553 if (ti_chipinit(sc)) {
1554 printf("%s: chip initialization failed\n", sc->sc_dv.dv_xname);
1555 goto fail_2;
1556 }
1557
1558
1559 ti_mem_set(sc, 0x2000, 0x100000 - 0x2000);
1560
1561
1562 if (ti_chipinit(sc)) {
1563 printf("%s: chip initialization failed\n", sc->sc_dv.dv_xname);
1564 goto fail_2;
1565 }
1566
1567
1568
1569
1570
1571
1572
1573
1574 if (ti_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
1575 TI_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
1576 printf("%s: failed to read station address\n",
1577 sc->sc_dv.dv_xname);
1578 free(sc, M_DEVBUF);
1579 goto fail_2;
1580 }
1581
1582
1583
1584
1585 printf(": %s, address %s\n", intrstr,
1586 ether_sprintf(sc->arpcom.ac_enaddr));
1587
1588
1589 sc->sc_dmatag = pa->pa_dmat;
1590 if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct ti_ring_data),
1591 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
1592 printf("%s: can't alloc rx buffers\n", sc->sc_dv.dv_xname);
1593 goto fail_2;
1594 }
1595 if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg,
1596 sizeof(struct ti_ring_data), &kva, BUS_DMA_NOWAIT)) {
1597 printf("%s: can't map dma buffers (%d bytes)\n",
1598 sc->sc_dv.dv_xname, sizeof(struct ti_ring_data));
1599 goto fail_3;
1600 }
1601 if (bus_dmamap_create(sc->sc_dmatag, sizeof(struct ti_ring_data), 1,
1602 sizeof(struct ti_ring_data), 0, BUS_DMA_NOWAIT,
1603 &sc->ti_ring_map)) {
1604 printf("%s: can't create dma map\n", sc->sc_dv.dv_xname);
1605 goto fail_4;
1606 }
1607 if (bus_dmamap_load(sc->sc_dmatag, sc->ti_ring_map, kva,
1608 sizeof(struct ti_ring_data), NULL, BUS_DMA_NOWAIT)) {
1609 goto fail_5;
1610 }
1611 sc->ti_rdata = (struct ti_ring_data *)kva;
1612 bzero(sc->ti_rdata, sizeof(struct ti_ring_data));
1613
1614
1615 if (ti_alloc_jumbo_mem(sc)) {
1616 printf("%s: jumbo buffer allocation failed\n",
1617 sc->sc_dv.dv_xname);
1618 goto fail_5;
1619 }
1620
1621
1622
1623
1624
1625
1626
1627
1628 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ALTEON &&
1629 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_ALTEON_ACENICT)
1630 sc->ti_copper = 1;
1631
1632 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NETGEAR &&
1633 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NETGEAR_GA620T)
1634 sc->ti_copper = 1;
1635
1636
1637 sc->ti_stat_ticks = 2 * TI_TICKS_PER_SEC;
1638 sc->ti_rx_coal_ticks = TI_TICKS_PER_SEC / 5000;
1639 sc->ti_tx_coal_ticks = TI_TICKS_PER_SEC / 500;
1640 sc->ti_rx_max_coal_bds = 64;
1641 sc->ti_tx_max_coal_bds = 128;
1642 sc->ti_tx_buf_ratio = 21;
1643
1644
1645 ifp = &sc->arpcom.ac_if;
1646 ifp->if_softc = sc;
1647 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1648 ifp->if_ioctl = ti_ioctl;
1649 ifp->if_start = ti_start;
1650 ifp->if_watchdog = ti_watchdog;
1651 ifp->if_hardmtu = TI_JUMBO_FRAMELEN - ETHER_HDR_LEN;
1652 IFQ_SET_MAXLEN(&ifp->if_snd, TI_TX_RING_CNT - 1);
1653 IFQ_SET_READY(&ifp->if_snd);
1654 bcopy(sc->sc_dv.dv_xname, ifp->if_xname, IFNAMSIZ);
1655
1656 ifp->if_capabilities = IFCAP_VLAN_MTU;
1657
1658 #if NVLAN > 0
1659 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1660 #endif
1661
1662
1663 ifmedia_init(&sc->ifmedia, IFM_IMASK, ti_ifmedia_upd, ti_ifmedia_sts);
1664 if (sc->ti_copper) {
1665
1666
1667
1668
1669
1670
1671
1672
1673 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
1674 ifmedia_add(&sc->ifmedia,
1675 IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
1676 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL);
1677 ifmedia_add(&sc->ifmedia,
1678 IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
1679 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_T, 0, NULL);
1680 ifmedia_add(&sc->ifmedia,
1681 IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL);
1682 } else {
1683
1684 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
1685 ifmedia_add(&sc->ifmedia,
1686 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
1687 }
1688 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1689 ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_AUTO);
1690
1691
1692
1693
1694 if_attach(ifp);
1695 ether_ifattach(ifp);
1696
1697 shutdownhook_establish(ti_shutdown, sc);
1698 return;
1699
1700 fail_5:
1701 bus_dmamap_destroy(sc->sc_dmatag, sc->ti_ring_map);
1702
1703 fail_4:
1704 bus_dmamem_unmap(sc->sc_dmatag, kva,
1705 sizeof(struct ti_ring_data));
1706
1707 fail_3:
1708 bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
1709
1710 fail_2:
1711 pci_intr_disestablish(pc, sc->ti_intrhand);
1712
1713 fail_1:
1714 bus_space_unmap(sc->ti_btag, sc->ti_bhandle, size);
1715 }
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728 void
1729 ti_rxeof(struct ti_softc *sc)
1730 {
1731 struct ifnet *ifp;
1732 struct ti_cmd_desc cmd;
1733
1734 ifp = &sc->arpcom.ac_if;
1735
1736 while(sc->ti_rx_saved_considx != sc->ti_return_prodidx.ti_idx) {
1737 struct ti_rx_desc *cur_rx;
1738 u_int32_t rxidx;
1739 struct mbuf *m = NULL;
1740 int sumflags = 0;
1741 bus_dmamap_t dmamap;
1742
1743 cur_rx =
1744 &sc->ti_rdata->ti_rx_return_ring[sc->ti_rx_saved_considx];
1745 rxidx = cur_rx->ti_idx;
1746 TI_INC(sc->ti_rx_saved_considx, TI_RETURN_RING_CNT);
1747
1748 if (cur_rx->ti_flags & TI_BDFLAG_JUMBO_RING) {
1749 TI_INC(sc->ti_jumbo, TI_JUMBO_RX_RING_CNT);
1750 m = sc->ti_cdata.ti_rx_jumbo_chain[rxidx];
1751 sc->ti_cdata.ti_rx_jumbo_chain[rxidx] = NULL;
1752 if (cur_rx->ti_flags & TI_BDFLAG_ERROR) {
1753 ifp->if_ierrors++;
1754 ti_newbuf_jumbo(sc, sc->ti_jumbo, m);
1755 continue;
1756 }
1757 if (ti_newbuf_jumbo(sc, sc->ti_jumbo, NULL)
1758 == ENOBUFS) {
1759 struct mbuf *m0;
1760 m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
1761 cur_rx->ti_len + ETHER_ALIGN, 0, ifp, NULL);
1762 ti_newbuf_jumbo(sc, sc->ti_jumbo, m);
1763 if (m0 == NULL) {
1764 ifp->if_ierrors++;
1765 continue;
1766 }
1767 m_adj(m0, ETHER_ALIGN);
1768 m = m0;
1769 }
1770 } else if (cur_rx->ti_flags & TI_BDFLAG_MINI_RING) {
1771 TI_INC(sc->ti_mini, TI_MINI_RX_RING_CNT);
1772 m = sc->ti_cdata.ti_rx_mini_chain[rxidx];
1773 sc->ti_cdata.ti_rx_mini_chain[rxidx] = NULL;
1774 dmamap = sc->ti_cdata.ti_rx_mini_map[rxidx];
1775 sc->ti_cdata.ti_rx_mini_map[rxidx] = 0;
1776 if (cur_rx->ti_flags & TI_BDFLAG_ERROR) {
1777 ifp->if_ierrors++;
1778 ti_newbuf_mini(sc, sc->ti_mini, m, dmamap);
1779 continue;
1780 }
1781 if (ti_newbuf_mini(sc, sc->ti_mini, NULL, dmamap)
1782 == ENOBUFS) {
1783 ifp->if_ierrors++;
1784 ti_newbuf_mini(sc, sc->ti_mini, m, dmamap);
1785 continue;
1786 }
1787 } else {
1788 TI_INC(sc->ti_std, TI_STD_RX_RING_CNT);
1789 m = sc->ti_cdata.ti_rx_std_chain[rxidx];
1790 sc->ti_cdata.ti_rx_std_chain[rxidx] = NULL;
1791 dmamap = sc->ti_cdata.ti_rx_std_map[rxidx];
1792 sc->ti_cdata.ti_rx_std_map[rxidx] = 0;
1793 if (cur_rx->ti_flags & TI_BDFLAG_ERROR) {
1794 ifp->if_ierrors++;
1795 ti_newbuf_std(sc, sc->ti_std, m, dmamap);
1796 continue;
1797 }
1798 if (ti_newbuf_std(sc, sc->ti_std, NULL, dmamap)
1799 == ENOBUFS) {
1800 ifp->if_ierrors++;
1801 ti_newbuf_std(sc, sc->ti_std, m, dmamap);
1802 continue;
1803 }
1804 }
1805
1806 if (m == NULL)
1807 panic("%s: couldn't get mbuf", sc->sc_dv.dv_xname);
1808
1809 m->m_pkthdr.len = m->m_len = cur_rx->ti_len;
1810 ifp->if_ipackets++;
1811 m->m_pkthdr.rcvif = ifp;
1812
1813 #if NBPFILTER > 0
1814
1815
1816
1817 if (ifp->if_bpf)
1818 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
1819 #endif
1820
1821 if ((cur_rx->ti_ip_cksum ^ 0xffff) == 0)
1822 sumflags |= M_IPV4_CSUM_IN_OK;
1823 m->m_pkthdr.csum_flags = sumflags;
1824 sumflags = 0;
1825
1826 ether_input_mbuf(ifp, m);
1827 }
1828
1829
1830 if (sc->ti_hwrev == TI_HWREV_TIGON)
1831 CSR_WRITE_4(sc, TI_GCR_RXRETURNCONS_IDX,
1832 sc->ti_rx_saved_considx);
1833
1834 TI_UPDATE_STDPROD(sc, sc->ti_std);
1835 TI_UPDATE_MINIPROD(sc, sc->ti_mini);
1836 TI_UPDATE_JUMBOPROD(sc, sc->ti_jumbo);
1837 }
1838
1839 void
1840 ti_txeof_tigon1(struct ti_softc *sc)
1841 {
1842 struct ifnet *ifp;
1843 struct ti_txmap_entry *entry;
1844 int active = 1;
1845
1846 ifp = &sc->arpcom.ac_if;
1847
1848
1849
1850
1851
1852 while (sc->ti_tx_saved_considx != sc->ti_tx_considx.ti_idx) {
1853 u_int32_t idx = 0;
1854 struct ti_tx_desc txdesc;
1855
1856 idx = sc->ti_tx_saved_considx;
1857 ti_mem_read(sc, TI_TX_RING_BASE + idx * sizeof(txdesc),
1858 sizeof(txdesc), (caddr_t)&txdesc);
1859
1860 if (txdesc.ti_flags & TI_BDFLAG_END)
1861 ifp->if_opackets++;
1862
1863 if (sc->ti_cdata.ti_tx_chain[idx] != NULL) {
1864 m_freem(sc->ti_cdata.ti_tx_chain[idx]);
1865 sc->ti_cdata.ti_tx_chain[idx] = NULL;
1866
1867 entry = sc->ti_cdata.ti_tx_map[idx];
1868 bus_dmamap_sync(sc->sc_dmatag, entry->dmamap, 0,
1869 entry->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1870
1871 bus_dmamap_unload(sc->sc_dmatag, entry->dmamap);
1872 SLIST_INSERT_HEAD(&sc->ti_tx_map_listhead, entry,
1873 link);
1874 sc->ti_cdata.ti_tx_map[idx] = NULL;
1875
1876 }
1877 sc->ti_txcnt--;
1878 TI_INC(sc->ti_tx_saved_considx, TI_TX_RING_CNT);
1879 ifp->if_timer = 0;
1880
1881 active = 0;
1882 }
1883
1884 if (!active)
1885 ifp->if_flags &= ~IFF_OACTIVE;
1886 }
1887
1888 void
1889 ti_txeof_tigon2(struct ti_softc *sc)
1890 {
1891 struct ti_tx_desc *cur_tx = NULL;
1892 struct ifnet *ifp;
1893 struct ti_txmap_entry *entry;
1894
1895 ifp = &sc->arpcom.ac_if;
1896
1897
1898
1899
1900
1901 while (sc->ti_tx_saved_considx != sc->ti_tx_considx.ti_idx) {
1902 u_int32_t idx = 0;
1903
1904 idx = sc->ti_tx_saved_considx;
1905 cur_tx = &sc->ti_rdata->ti_tx_ring[idx];
1906
1907 if (cur_tx->ti_flags & TI_BDFLAG_END)
1908 ifp->if_opackets++;
1909 if (sc->ti_cdata.ti_tx_chain[idx] != NULL) {
1910 m_freem(sc->ti_cdata.ti_tx_chain[idx]);
1911 sc->ti_cdata.ti_tx_chain[idx] = NULL;
1912
1913 entry = sc->ti_cdata.ti_tx_map[idx];
1914 bus_dmamap_sync(sc->sc_dmatag, entry->dmamap, 0,
1915 entry->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1916
1917 bus_dmamap_unload(sc->sc_dmatag, entry->dmamap);
1918 SLIST_INSERT_HEAD(&sc->ti_tx_map_listhead, entry,
1919 link);
1920 sc->ti_cdata.ti_tx_map[idx] = NULL;
1921
1922 }
1923 sc->ti_txcnt--;
1924 TI_INC(sc->ti_tx_saved_considx, TI_TX_RING_CNT);
1925 ifp->if_timer = 0;
1926 }
1927
1928 if (cur_tx != NULL)
1929 ifp->if_flags &= ~IFF_OACTIVE;
1930 }
1931
1932 int
1933 ti_intr(void *xsc)
1934 {
1935 struct ti_softc *sc;
1936 struct ifnet *ifp;
1937
1938 sc = xsc;
1939 ifp = &sc->arpcom.ac_if;
1940
1941
1942
1943 if (!(CSR_READ_4(sc, TI_MISC_HOST_CTL) & TI_MHC_INTSTATE))
1944 return (0);
1945
1946
1947 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1);
1948
1949 if (ifp->if_flags & IFF_RUNNING) {
1950
1951 ti_rxeof(sc);
1952
1953
1954 if (sc->ti_hwrev == TI_HWREV_TIGON)
1955 ti_txeof_tigon1(sc);
1956 else
1957 ti_txeof_tigon2(sc);
1958 }
1959
1960 ti_handle_events(sc);
1961
1962
1963 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0);
1964
1965 if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
1966 ti_start(ifp);
1967
1968 return (1);
1969 }
1970
1971 void
1972 ti_stats_update(struct ti_softc *sc)
1973 {
1974 struct ifnet *ifp;
1975 struct ti_stats *stats = &sc->ti_rdata->ti_info.ti_stats;
1976
1977 ifp = &sc->arpcom.ac_if;
1978
1979 TI_RING_DMASYNC(sc, ti_info.ti_stats, BUS_DMASYNC_POSTREAD);
1980
1981 ifp->if_collisions += stats->dot3StatsSingleCollisionFrames +
1982 stats->dot3StatsMultipleCollisionFrames +
1983 stats->dot3StatsExcessiveCollisions +
1984 stats->dot3StatsLateCollisions -
1985 ifp->if_collisions;
1986
1987 TI_RING_DMASYNC(sc, ti_info.ti_stats, BUS_DMASYNC_PREREAD);
1988 }
1989
1990
1991
1992
1993
1994 int
1995 ti_encap_tigon1(struct ti_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
1996 {
1997 u_int32_t frag, cur, cnt = 0;
1998 struct ti_txmap_entry *entry;
1999 bus_dmamap_t txmap;
2000 struct ti_tx_desc txdesc;
2001 int i = 0;
2002 #if NVLAN > 0
2003 struct ifvlan *ifv = NULL;
2004
2005 if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
2006 m_head->m_pkthdr.rcvif != NULL)
2007 ifv = m_head->m_pkthdr.rcvif->if_softc;
2008 #endif
2009
2010 entry = SLIST_FIRST(&sc->ti_tx_map_listhead);
2011 if (entry == NULL)
2012 return (ENOBUFS);
2013 txmap = entry->dmamap;
2014
2015 cur = frag = *txidx;
2016
2017
2018
2019
2020
2021
2022 if (bus_dmamap_load_mbuf(sc->sc_dmatag, txmap, m_head,
2023 BUS_DMA_NOWAIT))
2024 return (ENOBUFS);
2025
2026 for (i = 0; i < txmap->dm_nsegs; i++) {
2027 if (sc->ti_cdata.ti_tx_chain[frag] != NULL)
2028 break;
2029
2030 memset(&txdesc, 0, sizeof(txdesc));
2031
2032 TI_HOSTADDR(txdesc.ti_addr) = txmap->dm_segs[i].ds_addr;
2033 txdesc.ti_len = txmap->dm_segs[i].ds_len & 0xffff;
2034
2035 txdesc.ti_flags = 0;
2036 #if NVLAN > 0
2037 if (ifv != NULL) {
2038 txdesc.ti_flags |= TI_BDFLAG_VLAN_TAG;
2039 txdesc.ti_vlan_tag = ifv->ifv_tag;
2040 }
2041 #endif
2042
2043 ti_mem_write(sc, TI_TX_RING_BASE + frag * sizeof(txdesc),
2044 sizeof(txdesc), (caddr_t)&txdesc);
2045
2046
2047
2048
2049
2050 if ((TI_TX_RING_CNT - (sc->ti_txcnt + cnt)) < 16)
2051 return (ENOBUFS);
2052 cur = frag;
2053 TI_INC(frag, TI_TX_RING_CNT);
2054 cnt++;
2055 }
2056
2057 if (frag == sc->ti_tx_saved_considx)
2058 return (ENOBUFS);
2059
2060 txdesc.ti_flags |= TI_BDFLAG_END;
2061 ti_mem_write(sc, TI_TX_RING_BASE + cur * sizeof(txdesc),
2062 sizeof(txdesc), (caddr_t)&txdesc);
2063
2064 bus_dmamap_sync(sc->sc_dmatag, txmap, 0, txmap->dm_mapsize,
2065 BUS_DMASYNC_PREWRITE);
2066
2067 sc->ti_cdata.ti_tx_chain[cur] = m_head;
2068 SLIST_REMOVE_HEAD(&sc->ti_tx_map_listhead, link);
2069 sc->ti_cdata.ti_tx_map[cur] = entry;
2070 sc->ti_txcnt += cnt;
2071
2072 *txidx = frag;
2073
2074 return (0);
2075 }
2076
2077
2078
2079
2080
2081 int
2082 ti_encap_tigon2(struct ti_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
2083 {
2084 struct ti_tx_desc *f = NULL;
2085 u_int32_t frag, cur, cnt = 0;
2086 struct ti_txmap_entry *entry;
2087 bus_dmamap_t txmap;
2088 int i = 0;
2089 #if NVLAN > 0
2090 struct ifvlan *ifv = NULL;
2091
2092 if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
2093 m_head->m_pkthdr.rcvif != NULL)
2094 ifv = m_head->m_pkthdr.rcvif->if_softc;
2095 #endif
2096
2097 entry = SLIST_FIRST(&sc->ti_tx_map_listhead);
2098 if (entry == NULL)
2099 return (ENOBUFS);
2100 txmap = entry->dmamap;
2101
2102 cur = frag = *txidx;
2103
2104
2105
2106
2107
2108
2109 if (bus_dmamap_load_mbuf(sc->sc_dmatag, txmap, m_head,
2110 BUS_DMA_NOWAIT))
2111 return (ENOBUFS);
2112
2113 for (i = 0; i < txmap->dm_nsegs; i++) {
2114 f = &sc->ti_rdata->ti_tx_ring[frag];
2115
2116 if (sc->ti_cdata.ti_tx_chain[frag] != NULL)
2117 break;
2118
2119 TI_HOSTADDR(f->ti_addr) = txmap->dm_segs[i].ds_addr;
2120 f->ti_len = txmap->dm_segs[i].ds_len & 0xffff;
2121 f->ti_flags = 0;
2122 #if NVLAN > 0
2123 if (ifv != NULL) {
2124 f->ti_flags |= TI_BDFLAG_VLAN_TAG;
2125 f->ti_vlan_tag = ifv->ifv_tag;
2126 } else {
2127 f->ti_vlan_tag = 0;
2128 }
2129 #endif
2130
2131
2132
2133
2134 if ((TI_TX_RING_CNT - (sc->ti_txcnt + cnt)) < 16)
2135 return(ENOBUFS);
2136 cur = frag;
2137 TI_INC(frag, TI_TX_RING_CNT);
2138 cnt++;
2139 }
2140
2141 if (frag == sc->ti_tx_saved_considx)
2142 return(ENOBUFS);
2143
2144 sc->ti_rdata->ti_tx_ring[cur].ti_flags |= TI_BDFLAG_END;
2145
2146 bus_dmamap_sync(sc->sc_dmatag, txmap, 0, txmap->dm_mapsize,
2147 BUS_DMASYNC_PREWRITE);
2148
2149 TI_RING_DMASYNC(sc, ti_tx_ring[cur], BUS_DMASYNC_POSTREAD);
2150
2151 sc->ti_cdata.ti_tx_chain[cur] = m_head;
2152 SLIST_REMOVE_HEAD(&sc->ti_tx_map_listhead, link);
2153 sc->ti_cdata.ti_tx_map[cur] = entry;
2154 sc->ti_txcnt += cnt;
2155
2156 *txidx = frag;
2157
2158 return (0);
2159 }
2160
2161
2162
2163
2164
2165 void
2166 ti_start(struct ifnet *ifp)
2167 {
2168 struct ti_softc *sc;
2169 struct mbuf *m_head = NULL;
2170 u_int32_t prodidx;
2171 int pkts = 0, error;
2172
2173 sc = ifp->if_softc;
2174
2175 prodidx = sc->ti_tx_saved_prodidx;
2176
2177 while(sc->ti_cdata.ti_tx_chain[prodidx] == NULL) {
2178 IFQ_POLL(&ifp->if_snd, m_head);
2179 if (m_head == NULL)
2180 break;
2181
2182
2183
2184
2185
2186
2187 if (sc->ti_hwrev == TI_HWREV_TIGON)
2188 error = ti_encap_tigon1(sc, m_head, &prodidx);
2189 else
2190 error = ti_encap_tigon2(sc, m_head, &prodidx);
2191
2192 if (error) {
2193 ifp->if_flags |= IFF_OACTIVE;
2194 break;
2195 }
2196
2197
2198 IFQ_DEQUEUE(&ifp->if_snd, m_head);
2199 pkts++;
2200
2201
2202
2203
2204
2205 #if NBPFILTER > 0
2206 if (ifp->if_bpf)
2207 bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
2208 #endif
2209 }
2210 if (pkts == 0)
2211 return;
2212
2213
2214 sc->ti_tx_saved_prodidx = prodidx;
2215 CSR_WRITE_4(sc, TI_MB_SENDPROD_IDX, prodidx);
2216
2217
2218
2219
2220 ifp->if_timer = 5;
2221 }
2222
2223 void
2224 ti_init(void *xsc)
2225 {
2226 struct ti_softc *sc = xsc;
2227 int s;
2228
2229 s = splnet();
2230
2231
2232 ti_stop(sc);
2233
2234
2235 if (ti_gibinit(sc)) {
2236 printf("%s: initialization failure\n", sc->sc_dv.dv_xname);
2237 splx(s);
2238 return;
2239 }
2240
2241 splx(s);
2242 }
2243
2244 void
2245 ti_init2(struct ti_softc *sc)
2246 {
2247 struct ti_cmd_desc cmd;
2248 struct ifnet *ifp;
2249 u_int16_t *m;
2250 struct ifmedia *ifm;
2251 int tmp;
2252
2253 ifp = &sc->arpcom.ac_if;
2254
2255
2256 CSR_WRITE_4(sc, TI_GCR_IFINDEX, sc->sc_dv.dv_unit);
2257 CSR_WRITE_4(sc, TI_GCR_IFMTU,
2258 TI_JUMBO_FRAMELEN + ETHER_VLAN_ENCAP_LEN);
2259 TI_DO_CMD(TI_CMD_UPDATE_GENCOM, 0, 0);
2260
2261
2262 m = (u_int16_t *)&sc->arpcom.ac_enaddr[0];
2263 CSR_WRITE_4(sc, TI_GCR_PAR0, htons(m[0]));
2264 CSR_WRITE_4(sc, TI_GCR_PAR1, (htons(m[1]) << 16) | htons(m[2]));
2265 TI_DO_CMD(TI_CMD_SET_MAC_ADDR, 0, 0);
2266
2267
2268 if (ifp->if_flags & IFF_PROMISC) {
2269 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_ENB, 0);
2270 } else {
2271 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_DIS, 0);
2272 }
2273
2274
2275 ti_setmulti(sc);
2276
2277
2278
2279
2280
2281 if (sc->ti_hwrev == TI_HWREV_TIGON) {
2282 TI_DO_CMD(TI_CMD_FDR_FILTERING, TI_CMD_CODE_FILT_ENB, 0);
2283 }
2284
2285
2286 if (ti_init_rx_ring_std(sc) == ENOBUFS)
2287 panic("not enough mbufs for rx ring");
2288
2289
2290 ti_init_rx_ring_jumbo(sc);
2291
2292
2293
2294
2295
2296 if (sc->ti_hwrev == TI_HWREV_TIGON_II)
2297 ti_init_rx_ring_mini(sc);
2298
2299 CSR_WRITE_4(sc, TI_GCR_RXRETURNCONS_IDX, 0);
2300 sc->ti_rx_saved_considx = 0;
2301
2302
2303 ti_init_tx_ring(sc);
2304
2305
2306 TI_DO_CMD(TI_CMD_HOST_STATE, TI_CMD_CODE_STACK_UP, 0);
2307
2308
2309 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0);
2310
2311 ifp->if_flags |= IFF_RUNNING;
2312 ifp->if_flags &= ~IFF_OACTIVE;
2313
2314
2315
2316
2317
2318
2319
2320 ifm = &sc->ifmedia;
2321 tmp = ifm->ifm_media;
2322 ifm->ifm_media = ifm->ifm_cur->ifm_media;
2323 ti_ifmedia_upd(ifp);
2324 ifm->ifm_media = tmp;
2325 }
2326
2327
2328
2329
2330 int
2331 ti_ifmedia_upd(struct ifnet *ifp)
2332 {
2333 struct ti_softc *sc;
2334 struct ifmedia *ifm;
2335 struct ti_cmd_desc cmd;
2336
2337 sc = ifp->if_softc;
2338 ifm = &sc->ifmedia;
2339
2340 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2341 return(EINVAL);
2342
2343 switch(IFM_SUBTYPE(ifm->ifm_media)) {
2344 case IFM_AUTO:
2345 CSR_WRITE_4(sc, TI_GCR_GLINK, TI_GLNK_PREF|TI_GLNK_1000MB|
2346 TI_GLNK_FULL_DUPLEX|TI_GLNK_RX_FLOWCTL_Y|
2347 TI_GLNK_AUTONEGENB|TI_GLNK_ENB);
2348 CSR_WRITE_4(sc, TI_GCR_LINK, TI_LNK_100MB|TI_LNK_10MB|
2349 TI_LNK_FULL_DUPLEX|TI_LNK_HALF_DUPLEX|
2350 TI_LNK_AUTONEGENB|TI_LNK_ENB);
2351 TI_DO_CMD(TI_CMD_LINK_NEGOTIATION,
2352 TI_CMD_CODE_NEGOTIATE_BOTH, 0);
2353 break;
2354 case IFM_1000_SX:
2355 case IFM_1000_T:
2356 CSR_WRITE_4(sc, TI_GCR_GLINK, TI_GLNK_PREF|TI_GLNK_1000MB|
2357 TI_GLNK_RX_FLOWCTL_Y|TI_GLNK_ENB);
2358 CSR_WRITE_4(sc, TI_GCR_LINK, 0);
2359 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
2360 TI_SETBIT(sc, TI_GCR_GLINK, TI_GLNK_FULL_DUPLEX);
2361 }
2362 TI_DO_CMD(TI_CMD_LINK_NEGOTIATION,
2363 TI_CMD_CODE_NEGOTIATE_GIGABIT, 0);
2364 break;
2365 case IFM_100_FX:
2366 case IFM_10_FL:
2367 case IFM_100_TX:
2368 case IFM_10_T:
2369 CSR_WRITE_4(sc, TI_GCR_GLINK, 0);
2370 CSR_WRITE_4(sc, TI_GCR_LINK, TI_LNK_ENB|TI_LNK_PREF);
2371 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_FX ||
2372 IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) {
2373 TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_100MB);
2374 } else {
2375 TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_10MB);
2376 }
2377 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
2378 TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_FULL_DUPLEX);
2379 } else {
2380 TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_HALF_DUPLEX);
2381 }
2382 TI_DO_CMD(TI_CMD_LINK_NEGOTIATION,
2383 TI_CMD_CODE_NEGOTIATE_10_100, 0);
2384 break;
2385 }
2386
2387 return (0);
2388 }
2389
2390
2391
2392
2393 void
2394 ti_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2395 {
2396 struct ti_softc *sc;
2397 u_int32_t media = 0;
2398
2399 sc = ifp->if_softc;
2400
2401 ifmr->ifm_status = IFM_AVALID;
2402 ifmr->ifm_active = IFM_ETHER;
2403
2404 if (sc->ti_linkstat == TI_EV_CODE_LINK_DOWN) {
2405 ifmr->ifm_active |= IFM_NONE;
2406 return;
2407 }
2408
2409 ifmr->ifm_status |= IFM_ACTIVE;
2410
2411 if (sc->ti_linkstat == TI_EV_CODE_GIG_LINK_UP) {
2412 media = CSR_READ_4(sc, TI_GCR_GLINK_STAT);
2413 if (sc->ti_copper)
2414 ifmr->ifm_active |= IFM_1000_T;
2415 else
2416 ifmr->ifm_active |= IFM_1000_SX;
2417 if (media & TI_GLNK_FULL_DUPLEX)
2418 ifmr->ifm_active |= IFM_FDX;
2419 else
2420 ifmr->ifm_active |= IFM_HDX;
2421 } else if (sc->ti_linkstat == TI_EV_CODE_LINK_UP) {
2422 media = CSR_READ_4(sc, TI_GCR_LINK_STAT);
2423 if (sc->ti_copper) {
2424 if (media & TI_LNK_100MB)
2425 ifmr->ifm_active |= IFM_100_TX;
2426 if (media & TI_LNK_10MB)
2427 ifmr->ifm_active |= IFM_10_T;
2428 } else {
2429 if (media & TI_LNK_100MB)
2430 ifmr->ifm_active |= IFM_100_FX;
2431 if (media & TI_LNK_10MB)
2432 ifmr->ifm_active |= IFM_10_FL;
2433 }
2434 if (media & TI_LNK_FULL_DUPLEX)
2435 ifmr->ifm_active |= IFM_FDX;
2436 if (media & TI_LNK_HALF_DUPLEX)
2437 ifmr->ifm_active |= IFM_HDX;
2438 }
2439 }
2440
2441 int
2442 ti_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2443 {
2444 struct ti_softc *sc = ifp->if_softc;
2445 struct ifreq *ifr = (struct ifreq *)data;
2446 struct ifaddr *ifa = (struct ifaddr *)data;
2447 int s, error = 0;
2448 struct ti_cmd_desc cmd;
2449
2450 s = splnet();
2451
2452 if ((error = ether_ioctl(ifp, &sc->arpcom, command, data)) > 0) {
2453 splx(s);
2454 return (error);
2455 }
2456
2457 switch(command) {
2458 case SIOCSIFADDR:
2459 ifp->if_flags |= IFF_UP;
2460 if ((ifp->if_flags & IFF_RUNNING) == 0)
2461 ti_init(sc);
2462 #ifdef INET
2463 if (ifa->ifa_addr->sa_family == AF_INET)
2464 arp_ifinit(&sc->arpcom, ifa);
2465 #endif
2466 break;
2467 case SIOCSIFMTU:
2468 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ifp->if_hardmtu)
2469 error = EINVAL;
2470 else if (ifp->if_mtu != ifr->ifr_mtu)
2471 ifp->if_mtu = ifr->ifr_mtu;
2472 break;
2473 case SIOCSIFFLAGS:
2474 if (ifp->if_flags & IFF_UP) {
2475
2476
2477
2478
2479
2480
2481
2482
2483 if (ifp->if_flags & IFF_RUNNING &&
2484 ifp->if_flags & IFF_PROMISC &&
2485 !(sc->ti_if_flags & IFF_PROMISC)) {
2486 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE,
2487 TI_CMD_CODE_PROMISC_ENB, 0);
2488 } else if (ifp->if_flags & IFF_RUNNING &&
2489 !(ifp->if_flags & IFF_PROMISC) &&
2490 sc->ti_if_flags & IFF_PROMISC) {
2491 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE,
2492 TI_CMD_CODE_PROMISC_DIS, 0);
2493 } else {
2494 if ((ifp->if_flags & IFF_RUNNING) == 0)
2495 ti_init(sc);
2496 }
2497 } else {
2498 if (ifp->if_flags & IFF_RUNNING)
2499 ti_stop(sc);
2500 }
2501 sc->ti_if_flags = ifp->if_flags;
2502 break;
2503 case SIOCADDMULTI:
2504 case SIOCDELMULTI:
2505 error = (command == SIOCADDMULTI) ?
2506 ether_addmulti(ifr, &sc->arpcom) :
2507 ether_delmulti(ifr, &sc->arpcom);
2508
2509 if (error == ENETRESET) {
2510 if (ifp->if_flags & IFF_RUNNING)
2511 ti_setmulti(sc);
2512 error = 0;
2513 }
2514 break;
2515 case SIOCSIFMEDIA:
2516 case SIOCGIFMEDIA:
2517 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
2518 break;
2519 default:
2520 error = ENOTTY;
2521 break;
2522 }
2523
2524 splx(s);
2525 return (error);
2526 }
2527
2528 void
2529 ti_watchdog(struct ifnet *ifp)
2530 {
2531 struct ti_softc *sc;
2532
2533 sc = ifp->if_softc;
2534
2535 printf("%s: watchdog timeout -- resetting\n", sc->sc_dv.dv_xname);
2536 ti_stop(sc);
2537 ti_init(sc);
2538
2539 ifp->if_oerrors++;
2540 }
2541
2542
2543
2544
2545
2546 void
2547 ti_stop(struct ti_softc *sc)
2548 {
2549 struct ifnet *ifp;
2550 struct ti_cmd_desc cmd;
2551
2552 ifp = &sc->arpcom.ac_if;
2553
2554 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2555
2556
2557 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1);
2558
2559
2560
2561 TI_DO_CMD(TI_CMD_HOST_STATE, TI_CMD_CODE_STACK_DOWN, 0);
2562
2563
2564 ti_chipinit(sc);
2565 ti_mem_set(sc, 0x2000, 0x100000 - 0x2000);
2566 ti_chipinit(sc);
2567
2568
2569 ti_free_rx_ring_std(sc);
2570
2571
2572 ti_free_rx_ring_jumbo(sc);
2573
2574
2575 ti_free_rx_ring_mini(sc);
2576
2577
2578 ti_free_tx_ring(sc);
2579
2580 sc->ti_ev_prodidx.ti_idx = 0;
2581 sc->ti_return_prodidx.ti_idx = 0;
2582 sc->ti_tx_considx.ti_idx = 0;
2583 sc->ti_tx_saved_considx = TI_TXCONS_UNSET;
2584 }
2585
2586
2587
2588
2589
2590 void
2591 ti_shutdown(void *xsc)
2592 {
2593 struct ti_softc *sc;
2594
2595 sc = xsc;
2596
2597 ti_chipinit(sc);
2598 }