This source file includes following definitions.
- vr_mii_sync
- vr_mii_send
- vr_mii_readreg
- vr_mii_writereg
- vr_miibus_readreg
- vr_miibus_writereg
- vr_miibus_statchg
- vr_setmulti
- vr_setcfg
- vr_reset
- vr_probe
- vr_attach
- vr_list_tx_init
- vr_list_rx_init
- vr_rxeof
- vr_rxeoc
- vr_txeof
- vr_tick
- vr_intr
- vr_encap
- vr_start
- vr_init
- vr_ifmedia_upd
- vr_ifmedia_sts
- vr_ioctl
- vr_watchdog
- vr_stop
- vr_shutdown
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64 #include "bpfilter.h"
65
66 #include <sys/param.h>
67 #include <sys/systm.h>
68 #include <sys/sockio.h>
69 #include <sys/mbuf.h>
70 #include <sys/malloc.h>
71 #include <sys/kernel.h>
72 #include <sys/timeout.h>
73 #include <sys/socket.h>
74
75 #include <net/if.h>
76 #include <sys/device.h>
77 #ifdef INET
78 #include <netinet/in.h>
79 #include <netinet/in_systm.h>
80 #include <netinet/in_var.h>
81 #include <netinet/ip.h>
82 #include <netinet/if_ether.h>
83 #endif
84 #include <net/if_dl.h>
85 #include <net/if_media.h>
86
87 #if NBPFILTER > 0
88 #include <net/bpf.h>
89 #endif
90
91 #include <machine/bus.h>
92
93 #include <dev/mii/mii.h>
94 #include <dev/mii/miivar.h>
95
96 #include <dev/pci/pcireg.h>
97 #include <dev/pci/pcivar.h>
98 #include <dev/pci/pcidevs.h>
99
100 #define VR_USEIOSPACE
101 #undef VR_USESWSHIFT
102
103 #include <dev/pci/if_vrreg.h>
104
105 int vr_probe(struct device *, void *, void *);
106 void vr_attach(struct device *, struct device *, void *);
107
108 struct cfattach vr_ca = {
109 sizeof(struct vr_softc), vr_probe, vr_attach
110 };
111 struct cfdriver vr_cd = {
112 0, "vr", DV_IFNET
113 };
114
115 int vr_encap(struct vr_softc *, struct vr_chain *, struct mbuf *);
116 void vr_rxeof(struct vr_softc *);
117 void vr_rxeoc(struct vr_softc *);
118 void vr_txeof(struct vr_softc *);
119 void vr_tick(void *);
120 int vr_intr(void *);
121 void vr_start(struct ifnet *);
122 int vr_ioctl(struct ifnet *, u_long, caddr_t);
123 void vr_init(void *);
124 void vr_stop(struct vr_softc *);
125 void vr_watchdog(struct ifnet *);
126 void vr_shutdown(void *);
127 int vr_ifmedia_upd(struct ifnet *);
128 void vr_ifmedia_sts(struct ifnet *, struct ifmediareq *);
129
130 void vr_mii_sync(struct vr_softc *);
131 void vr_mii_send(struct vr_softc *, u_int32_t, int);
132 int vr_mii_readreg(struct vr_softc *, struct vr_mii_frame *);
133 int vr_mii_writereg(struct vr_softc *, struct vr_mii_frame *);
134 int vr_miibus_readreg(struct device *, int, int);
135 void vr_miibus_writereg(struct device *, int, int, int);
136 void vr_miibus_statchg(struct device *);
137
138 void vr_setcfg(struct vr_softc *, int);
139 void vr_setmulti(struct vr_softc *);
140 void vr_reset(struct vr_softc *);
141 int vr_list_rx_init(struct vr_softc *);
142 int vr_list_tx_init(struct vr_softc *);
143
144 const struct pci_matchid vr_devices[] = {
145 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINE },
146 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINEII },
147 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINEII_2 },
148 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6105 },
149 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6105M },
150 { PCI_VENDOR_DELTA, PCI_PRODUCT_DELTA_RHINEII },
151 { PCI_VENDOR_ADDTRON, PCI_PRODUCT_ADDTRON_RHINEII }
152 };
153
154 #define VR_SETBIT(sc, reg, x) \
155 CSR_WRITE_1(sc, reg, \
156 CSR_READ_1(sc, reg) | (x))
157
158 #define VR_CLRBIT(sc, reg, x) \
159 CSR_WRITE_1(sc, reg, \
160 CSR_READ_1(sc, reg) & ~(x))
161
162 #define VR_SETBIT16(sc, reg, x) \
163 CSR_WRITE_2(sc, reg, \
164 CSR_READ_2(sc, reg) | (x))
165
166 #define VR_CLRBIT16(sc, reg, x) \
167 CSR_WRITE_2(sc, reg, \
168 CSR_READ_2(sc, reg) & ~(x))
169
170 #define VR_SETBIT32(sc, reg, x) \
171 CSR_WRITE_4(sc, reg, \
172 CSR_READ_4(sc, reg) | (x))
173
174 #define VR_CLRBIT32(sc, reg, x) \
175 CSR_WRITE_4(sc, reg, \
176 CSR_READ_4(sc, reg) & ~(x))
177
178 #define SIO_SET(x) \
179 CSR_WRITE_1(sc, VR_MIICMD, \
180 CSR_READ_1(sc, VR_MIICMD) | (x))
181
182 #define SIO_CLR(x) \
183 CSR_WRITE_1(sc, VR_MIICMD, \
184 CSR_READ_1(sc, VR_MIICMD) & ~(x))
185
186 #ifdef VR_USESWSHIFT
187
188
189
190 void
191 vr_mii_sync(struct vr_softc *sc)
192 {
193 int i;
194
195 SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAIN);
196
197 for (i = 0; i < 32; i++) {
198 SIO_SET(VR_MIICMD_CLK);
199 DELAY(1);
200 SIO_CLR(VR_MIICMD_CLK);
201 DELAY(1);
202 }
203 }
204
205
206
207
208 void
209 vr_mii_send(struct vr_softc *sc, u_int32_t bits, int cnt)
210 {
211 int i;
212
213 SIO_CLR(VR_MIICMD_CLK);
214
215 for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
216 if (bits & i) {
217 SIO_SET(VR_MIICMD_DATAIN);
218 } else {
219 SIO_CLR(VR_MIICMD_DATAIN);
220 }
221 DELAY(1);
222 SIO_CLR(VR_MIICMD_CLK);
223 DELAY(1);
224 SIO_SET(VR_MIICMD_CLK);
225 }
226 }
227 #endif
228
229
230
231
232 int
233 vr_mii_readreg(struct vr_softc *sc, struct vr_mii_frame *frame)
234 #ifdef VR_USESWSHIFT
235 {
236 int i, ack, s;
237
238 s = splnet();
239
240
241
242
243 frame->mii_stdelim = VR_MII_STARTDELIM;
244 frame->mii_opcode = VR_MII_READOP;
245 frame->mii_turnaround = 0;
246 frame->mii_data = 0;
247
248 CSR_WRITE_1(sc, VR_MIICMD, 0);
249 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
250
251
252
253
254 SIO_SET(VR_MIICMD_DIR);
255
256 vr_mii_sync(sc);
257
258
259
260
261 vr_mii_send(sc, frame->mii_stdelim, 2);
262 vr_mii_send(sc, frame->mii_opcode, 2);
263 vr_mii_send(sc, frame->mii_phyaddr, 5);
264 vr_mii_send(sc, frame->mii_regaddr, 5);
265
266
267 SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAIN));
268 DELAY(1);
269 SIO_SET(VR_MIICMD_CLK);
270 DELAY(1);
271
272
273 SIO_CLR(VR_MIICMD_DIR);
274
275
276 SIO_CLR(VR_MIICMD_CLK);
277 DELAY(1);
278 ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT;
279 SIO_SET(VR_MIICMD_CLK);
280 DELAY(1);
281
282
283
284
285
286 if (ack) {
287 for(i = 0; i < 16; i++) {
288 SIO_CLR(VR_MIICMD_CLK);
289 DELAY(1);
290 SIO_SET(VR_MIICMD_CLK);
291 DELAY(1);
292 }
293 goto fail;
294 }
295
296 for (i = 0x8000; i; i >>= 1) {
297 SIO_CLR(VR_MIICMD_CLK);
298 DELAY(1);
299 if (!ack) {
300 if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT)
301 frame->mii_data |= i;
302 DELAY(1);
303 }
304 SIO_SET(VR_MIICMD_CLK);
305 DELAY(1);
306 }
307
308 fail:
309
310 SIO_CLR(VR_MIICMD_CLK);
311 DELAY(1);
312 SIO_SET(VR_MIICMD_CLK);
313 DELAY(1);
314
315 splx(s);
316
317 if (ack)
318 return(1);
319 return(0);
320 }
321 #else
322 {
323 int s, i;
324
325 s = splnet();
326
327
328 CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)|
329 frame->mii_phyaddr);
330
331
332 CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr);
333 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB);
334
335 for (i = 0; i < 10000; i++) {
336 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0)
337 break;
338 DELAY(1);
339 }
340
341 frame->mii_data = CSR_READ_2(sc, VR_MIIDATA);
342
343 splx(s);
344
345 return(0);
346 }
347 #endif
348
349
350
351
352
353 int
354 vr_mii_writereg(struct vr_softc *sc, struct vr_mii_frame *frame)
355 #ifdef VR_USESWSHIFT
356 {
357 int s;
358
359 s = splnet();
360
361 CSR_WRITE_1(sc, VR_MIICMD, 0);
362 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
363
364
365
366
367
368 frame->mii_stdelim = VR_MII_STARTDELIM;
369 frame->mii_opcode = VR_MII_WRITEOP;
370 frame->mii_turnaround = VR_MII_TURNAROUND;
371
372
373
374
375 SIO_SET(VR_MIICMD_DIR);
376
377 vr_mii_sync(sc);
378
379 vr_mii_send(sc, frame->mii_stdelim, 2);
380 vr_mii_send(sc, frame->mii_opcode, 2);
381 vr_mii_send(sc, frame->mii_phyaddr, 5);
382 vr_mii_send(sc, frame->mii_regaddr, 5);
383 vr_mii_send(sc, frame->mii_turnaround, 2);
384 vr_mii_send(sc, frame->mii_data, 16);
385
386
387 SIO_SET(VR_MIICMD_CLK);
388 DELAY(1);
389 SIO_CLR(VR_MIICMD_CLK);
390 DELAY(1);
391
392
393
394
395 SIO_CLR(VR_MIICMD_DIR);
396
397 splx(s);
398
399 return(0);
400 }
401 #else
402 {
403 int s, i;
404
405 s = splnet();
406
407
408 CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)|
409 frame->mii_phyaddr);
410
411
412 CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr);
413 CSR_WRITE_2(sc, VR_MIIDATA, frame->mii_data);
414
415 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB);
416
417 for (i = 0; i < 10000; i++) {
418 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0)
419 break;
420 DELAY(1);
421 }
422
423 splx(s);
424
425 return(0);
426 }
427 #endif
428
429 int
430 vr_miibus_readreg(struct device *dev, int phy, int reg)
431 {
432 struct vr_softc *sc = (struct vr_softc *)dev;
433 struct vr_mii_frame frame;
434
435 switch (sc->vr_revid) {
436 case REV_ID_VT6102_APOLLO:
437 case REV_ID_VT6103:
438 if (phy != 1)
439 return 0;
440 default:
441 break;
442 }
443
444 bzero((char *)&frame, sizeof(frame));
445
446 frame.mii_phyaddr = phy;
447 frame.mii_regaddr = reg;
448 vr_mii_readreg(sc, &frame);
449
450 return(frame.mii_data);
451 }
452
453 void
454 vr_miibus_writereg(struct device *dev, int phy, int reg, int data)
455 {
456 struct vr_softc *sc = (struct vr_softc *)dev;
457 struct vr_mii_frame frame;
458
459 switch (sc->vr_revid) {
460 case REV_ID_VT6102_APOLLO:
461 case REV_ID_VT6103:
462 if (phy != 1)
463 return;
464 default:
465 break;
466 }
467
468 bzero((char *)&frame, sizeof(frame));
469
470 frame.mii_phyaddr = phy;
471 frame.mii_regaddr = reg;
472 frame.mii_data = data;
473
474 vr_mii_writereg(sc, &frame);
475 }
476
477 void
478 vr_miibus_statchg(struct device *dev)
479 {
480 struct vr_softc *sc = (struct vr_softc *)dev;
481
482 vr_setcfg(sc, sc->sc_mii.mii_media_active);
483 }
484
485
486
487
488 void
489 vr_setmulti(struct vr_softc *sc)
490 {
491 struct ifnet *ifp;
492 int h = 0;
493 u_int32_t hashes[2] = { 0, 0 };
494 struct arpcom *ac = &sc->arpcom;
495 struct ether_multi *enm;
496 struct ether_multistep step;
497 u_int8_t rxfilt;
498 int mcnt = 0;
499
500 ifp = &sc->arpcom.ac_if;
501
502 rxfilt = CSR_READ_1(sc, VR_RXCFG);
503
504 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
505 allmulti:
506 rxfilt |= VR_RXCFG_RX_MULTI;
507 CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
508 CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
509 CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
510 return;
511 }
512
513
514 CSR_WRITE_4(sc, VR_MAR0, 0);
515 CSR_WRITE_4(sc, VR_MAR1, 0);
516
517
518 ETHER_FIRST_MULTI(step, ac, enm);
519 while (enm != NULL) {
520 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
521 ifp->if_flags |= IFF_ALLMULTI;
522 goto allmulti;
523 }
524 h = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26;
525 if (h < 32)
526 hashes[0] |= (1 << h);
527 else
528 hashes[1] |= (1 << (h - 32));
529 mcnt++;
530
531 ETHER_NEXT_MULTI(step, enm);
532 }
533
534 if (mcnt)
535 rxfilt |= VR_RXCFG_RX_MULTI;
536 else
537 rxfilt &= ~VR_RXCFG_RX_MULTI;
538
539 CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
540 CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
541 CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
542 }
543
544
545
546
547
548
549 void
550 vr_setcfg(struct vr_softc *sc, int media)
551 {
552 int restart = 0;
553
554 if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) {
555 restart = 1;
556 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
557 }
558
559 if ((media & IFM_GMASK) == IFM_FDX)
560 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
561 else
562 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
563
564 if (restart)
565 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
566 }
567
568 void
569 vr_reset(struct vr_softc *sc)
570 {
571 int i;
572
573 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
574
575 for (i = 0; i < VR_TIMEOUT; i++) {
576 DELAY(10);
577 if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
578 break;
579 }
580 if (i == VR_TIMEOUT) {
581 if (sc->vr_revid < REV_ID_VT3065_A)
582 printf("%s: reset never completed!\n",
583 sc->sc_dev.dv_xname);
584 else {
585 #ifdef VR_DEBUG
586
587 printf("%s: Using force reset command.\n",
588 sc->sc_dev.dv_xname);
589 #endif
590 VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST);
591 }
592 }
593
594
595 DELAY(1000);
596 }
597
598
599
600
601 int
602 vr_probe(struct device *parent, void *match, void *aux)
603 {
604 return (pci_matchbyid((struct pci_attach_args *)aux, vr_devices,
605 sizeof(vr_devices)/sizeof(vr_devices[0])));
606 }
607
608
609
610
611
612 void
613 vr_attach(struct device *parent, struct device *self, void *aux)
614 {
615 int i;
616 pcireg_t command;
617 struct vr_softc *sc = (struct vr_softc *)self;
618 struct pci_attach_args *pa = aux;
619 pci_chipset_tag_t pc = pa->pa_pc;
620 pci_intr_handle_t ih;
621 const char *intrstr = NULL;
622 struct ifnet *ifp = &sc->arpcom.ac_if;
623 bus_size_t size;
624 int rseg;
625 caddr_t kva;
626
627
628
629
630 command = pci_conf_read(pa->pa_pc, pa->pa_tag,
631 VR_PCI_CAPID) & 0x000000ff;
632 if (command == 0x01) {
633 command = pci_conf_read(pa->pa_pc, pa->pa_tag,
634 VR_PCI_PWRMGMTCTRL);
635 if (command & VR_PSTATE_MASK) {
636 pcireg_t iobase, membase, irq;
637
638
639 iobase = pci_conf_read(pa->pa_pc, pa->pa_tag,
640 VR_PCI_LOIO);
641 membase = pci_conf_read(pa->pa_pc, pa->pa_tag,
642 VR_PCI_LOMEM);
643 irq = pci_conf_read(pa->pa_pc, pa->pa_tag,
644 VR_PCI_INTLINE);
645
646
647 command &= 0xFFFFFFFC;
648 pci_conf_write(pa->pa_pc, pa->pa_tag,
649 VR_PCI_PWRMGMTCTRL, command);
650
651
652 pci_conf_write(pa->pa_pc, pa->pa_tag,
653 VR_PCI_LOIO, iobase);
654 pci_conf_write(pa->pa_pc, pa->pa_tag,
655 VR_PCI_LOMEM, membase);
656 pci_conf_write(pa->pa_pc, pa->pa_tag,
657 VR_PCI_INTLINE, irq);
658 }
659 }
660
661
662
663
664
665 #ifdef VR_USEIOSPACE
666 if (pci_mapreg_map(pa, VR_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0,
667 &sc->vr_btag, &sc->vr_bhandle, NULL, &size, 0)) {
668 printf(": failed to map i/o space\n");
669 return;
670 }
671 #else
672 if (pci_mapreg_map(pa, VR_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0,
673 &sc->vr_btag, &sc->vr_bhandle, NULL, &size, 0)) {
674 printf(": failed to map memory space\n");
675 return;
676 }
677 #endif
678
679
680 if (pci_intr_map(pa, &ih)) {
681 printf(": couldn't map interrupt\n");
682 goto fail_1;
683 }
684 intrstr = pci_intr_string(pc, ih);
685 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, vr_intr, sc,
686 self->dv_xname);
687 if (sc->sc_ih == NULL) {
688 printf(": could not establish interrupt");
689 if (intrstr != NULL)
690 printf(" at %s", intrstr);
691 printf("\n");
692 goto fail_1;
693 }
694 printf(": %s", intrstr);
695
696 sc->vr_revid = PCI_REVISION(pa->pa_class);
697
698
699
700
701
702
703 VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1));
704
705
706 vr_reset(sc);
707
708
709
710
711
712 pci_conf_write(pa->pa_pc, pa->pa_tag, VR_PCI_MODE,
713 pci_conf_read(pa->pa_pc, pa->pa_tag, VR_PCI_MODE) |
714 (VR_MODE3_MIION << 24));
715 VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL);
716
717
718
719
720
721
722
723
724 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
725 DELAY(1000);
726 for (i = 0; i < ETHER_ADDR_LEN; i++)
727 sc->arpcom.ac_enaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
728
729
730
731
732 printf(", address %s\n", ether_sprintf(sc->arpcom.ac_enaddr));
733
734 sc->sc_dmat = pa->pa_dmat;
735 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct vr_list_data),
736 PAGE_SIZE, 0, &sc->sc_listseg, 1, &rseg, BUS_DMA_NOWAIT)) {
737 printf(": can't alloc list\n");
738 goto fail_2;
739 }
740 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_listseg, rseg,
741 sizeof(struct vr_list_data), &kva, BUS_DMA_NOWAIT)) {
742 printf(": can't map dma buffers (%d bytes)\n",
743 sizeof(struct vr_list_data));
744 goto fail_3;
745 }
746 if (bus_dmamap_create(sc->sc_dmat, sizeof(struct vr_list_data), 1,
747 sizeof(struct vr_list_data), 0, BUS_DMA_NOWAIT, &sc->sc_listmap)) {
748 printf(": can't create dma map\n");
749 goto fail_4;
750 }
751 if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, kva,
752 sizeof(struct vr_list_data), NULL, BUS_DMA_NOWAIT)) {
753 printf(": can't load dma map\n");
754 goto fail_5;
755 }
756 sc->vr_ldata = (struct vr_list_data *)kva;
757 bzero(sc->vr_ldata, sizeof(struct vr_list_data));
758
759 ifp = &sc->arpcom.ac_if;
760 ifp->if_softc = sc;
761 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
762 ifp->if_ioctl = vr_ioctl;
763 ifp->if_start = vr_start;
764 ifp->if_watchdog = vr_watchdog;
765 ifp->if_baudrate = 10000000;
766 IFQ_SET_READY(&ifp->if_snd);
767 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
768
769
770
771
772 sc->sc_mii.mii_ifp = ifp;
773 sc->sc_mii.mii_readreg = vr_miibus_readreg;
774 sc->sc_mii.mii_writereg = vr_miibus_writereg;
775 sc->sc_mii.mii_statchg = vr_miibus_statchg;
776 ifmedia_init(&sc->sc_mii.mii_media, 0, vr_ifmedia_upd, vr_ifmedia_sts);
777 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
778 0);
779 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
780 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
781 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
782 } else
783 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
784 timeout_set(&sc->sc_to, vr_tick, sc);
785
786
787
788
789 if_attach(ifp);
790 ether_ifattach(ifp);
791
792 shutdownhook_establish(vr_shutdown, sc);
793 return;
794
795 fail_5:
796 bus_dmamap_destroy(sc->sc_dmat, sc->sc_listmap);
797
798 fail_4:
799 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(struct vr_list_data));
800
801 fail_3:
802 bus_dmamem_free(sc->sc_dmat, &sc->sc_listseg, rseg);
803
804 fail_2:
805 pci_intr_disestablish(pc, sc->sc_ih);
806
807 fail_1:
808 bus_space_unmap(sc->vr_btag, sc->vr_bhandle, size);
809 }
810
811
812
813
814 int
815 vr_list_tx_init(struct vr_softc *sc)
816 {
817 struct vr_chain_data *cd;
818 struct vr_list_data *ld;
819 int i;
820
821 cd = &sc->vr_cdata;
822 ld = sc->vr_ldata;
823 for (i = 0; i < VR_TX_LIST_CNT; i++) {
824 cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i];
825 cd->vr_tx_chain[i].vr_paddr =
826 sc->sc_listmap->dm_segs[0].ds_addr +
827 offsetof(struct vr_list_data, vr_tx_list[i]);
828
829 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
830 MCLBYTES, 0, BUS_DMA_NOWAIT, &cd->vr_tx_chain[i].vr_map))
831 return (ENOBUFS);
832
833 if (i == (VR_TX_LIST_CNT - 1))
834 cd->vr_tx_chain[i].vr_nextdesc =
835 &cd->vr_tx_chain[0];
836 else
837 cd->vr_tx_chain[i].vr_nextdesc =
838 &cd->vr_tx_chain[i + 1];
839 }
840
841 cd->vr_tx_cons = cd->vr_tx_prod = &cd->vr_tx_chain[0];
842
843 return (0);
844 }
845
846
847
848
849
850
851
852 int
853 vr_list_rx_init(struct vr_softc *sc)
854 {
855 struct vr_chain_data *cd;
856 struct vr_list_data *ld;
857 int i;
858 struct vr_desc *d;
859
860 cd = &sc->vr_cdata;
861 ld = sc->vr_ldata;
862
863 for (i = 0; i < VR_RX_LIST_CNT; i++) {
864 d = (struct vr_desc *)&ld->vr_rx_list[i];
865 cd->vr_rx_chain[i].vr_ptr = d;
866 cd->vr_rx_chain[i].vr_paddr =
867 sc->sc_listmap->dm_segs[0].ds_addr +
868 offsetof(struct vr_list_data, vr_rx_list[i]);
869 cd->vr_rx_chain[i].vr_buf =
870 (u_int8_t *)malloc(MCLBYTES, M_DEVBUF, M_NOWAIT);
871 if (cd->vr_rx_chain[i].vr_buf == NULL)
872 return (ENOBUFS);
873
874 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
875 0, BUS_DMA_NOWAIT | BUS_DMA_READ,
876 &cd->vr_rx_chain[i].vr_map))
877 return (ENOBUFS);
878
879 if (bus_dmamap_load(sc->sc_dmat, cd->vr_rx_chain[i].vr_map,
880 cd->vr_rx_chain[i].vr_buf, MCLBYTES, NULL, BUS_DMA_NOWAIT))
881 return (ENOBUFS);
882 bus_dmamap_sync(sc->sc_dmat, cd->vr_rx_chain[i].vr_map,
883 0, cd->vr_rx_chain[i].vr_map->dm_mapsize,
884 BUS_DMASYNC_PREREAD);
885
886 d->vr_status = htole32(VR_RXSTAT);
887 d->vr_data =
888 htole32(cd->vr_rx_chain[i].vr_map->dm_segs[0].ds_addr +
889 sizeof(u_int64_t));
890 d->vr_ctl = htole32(VR_RXCTL | VR_RXLEN);
891
892 if (i == (VR_RX_LIST_CNT - 1)) {
893 cd->vr_rx_chain[i].vr_nextdesc =
894 &cd->vr_rx_chain[0];
895 ld->vr_rx_list[i].vr_next =
896 htole32(sc->sc_listmap->dm_segs[0].ds_addr +
897 offsetof(struct vr_list_data, vr_rx_list[0]));
898 } else {
899 cd->vr_rx_chain[i].vr_nextdesc =
900 &cd->vr_rx_chain[i + 1];
901 ld->vr_rx_list[i].vr_next =
902 htole32(sc->sc_listmap->dm_segs[0].ds_addr +
903 offsetof(struct vr_list_data, vr_rx_list[i + 1]));
904 }
905 }
906
907 cd->vr_rx_head = &cd->vr_rx_chain[0];
908
909 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 0,
910 sc->sc_listmap->dm_mapsize,
911 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
912
913 return(0);
914 }
915
916
917
918
919
920 void
921 vr_rxeof(struct vr_softc *sc)
922 {
923 struct mbuf *m0;
924 struct ifnet *ifp;
925 struct vr_chain_onefrag *cur_rx;
926 int total_len = 0;
927 u_int32_t rxstat;
928
929 ifp = &sc->arpcom.ac_if;
930
931 for (;;) {
932
933 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
934 0, sc->sc_listmap->dm_mapsize,
935 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
936 rxstat = letoh32(sc->vr_cdata.vr_rx_head->vr_ptr->vr_status);
937 if (rxstat & VR_RXSTAT_OWN)
938 break;
939
940 m0 = NULL;
941 cur_rx = sc->vr_cdata.vr_rx_head;
942 sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc;
943
944
945
946
947
948
949
950 if (rxstat & VR_RXSTAT_RXERR) {
951 ifp->if_ierrors++;
952 #ifdef VR_DEBUG
953 printf("%s: rx error (%02x):",
954 sc->sc_dev.dv_xname, rxstat & 0x000000ff);
955 if (rxstat & VR_RXSTAT_CRCERR)
956 printf(" crc error");
957 if (rxstat & VR_RXSTAT_FRAMEALIGNERR)
958 printf(" frame alignment error");
959 if (rxstat & VR_RXSTAT_FIFOOFLOW)
960 printf(" FIFO overflow");
961 if (rxstat & VR_RXSTAT_GIANT)
962 printf(" received giant packet");
963 if (rxstat & VR_RXSTAT_RUNT)
964 printf(" received runt packet");
965 if (rxstat & VR_RXSTAT_BUSERR)
966 printf(" system bus error");
967 if (rxstat & VR_RXSTAT_BUFFERR)
968 printf(" rx buffer error");
969 printf("\n");
970 #endif
971
972
973 cur_rx->vr_ptr->vr_status = htole32(VR_RXSTAT);
974 cur_rx->vr_ptr->vr_data =
975 htole32(cur_rx->vr_map->dm_segs[0].ds_addr +
976 sizeof(u_int64_t));
977 cur_rx->vr_ptr->vr_ctl = htole32(VR_RXCTL | VR_RXLEN);
978 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
979 0, sc->sc_listmap->dm_mapsize,
980 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
981 continue;
982 }
983
984
985 total_len = VR_RXBYTES(letoh32(cur_rx->vr_ptr->vr_status));
986
987
988
989
990
991
992
993
994 total_len -= ETHER_CRC_LEN;
995
996 bus_dmamap_sync(sc->sc_dmat, cur_rx->vr_map, 0,
997 cur_rx->vr_map->dm_mapsize,
998 BUS_DMASYNC_POSTREAD);
999 m0 = m_devget(cur_rx->vr_buf + sizeof(u_int64_t) - ETHER_ALIGN,
1000 total_len + ETHER_ALIGN, 0, ifp, NULL);
1001 bus_dmamap_sync(sc->sc_dmat, cur_rx->vr_map, 0,
1002 cur_rx->vr_map->dm_mapsize,
1003 BUS_DMASYNC_PREREAD);
1004
1005
1006 cur_rx->vr_ptr->vr_status = htole32(VR_RXSTAT);
1007 cur_rx->vr_ptr->vr_data =
1008 htole32(cur_rx->vr_map->dm_segs[0].ds_addr +
1009 sizeof(u_int64_t));
1010 cur_rx->vr_ptr->vr_ctl = htole32(VR_RXCTL | VR_RXLEN);
1011 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 0,
1012 sc->sc_listmap->dm_mapsize,
1013 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1014
1015 if (m0 == NULL) {
1016 ifp->if_ierrors++;
1017 continue;
1018 }
1019 m_adj(m0, ETHER_ALIGN);
1020
1021 ifp->if_ipackets++;
1022
1023 #if NBPFILTER > 0
1024
1025
1026
1027 if (ifp->if_bpf)
1028 bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_IN);
1029 #endif
1030
1031 ether_input_mbuf(ifp, m0);
1032 }
1033
1034 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1035 0, sc->sc_listmap->dm_mapsize,
1036 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1037 }
1038
1039 void
1040 vr_rxeoc(struct vr_softc *sc)
1041 {
1042 struct ifnet *ifp;
1043 int i;
1044
1045 ifp = &sc->arpcom.ac_if;
1046
1047 ifp->if_ierrors++;
1048
1049 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1050 DELAY(10000);
1051
1052 for (i = 0x400;
1053 i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RX_ON);
1054 i--)
1055 ;
1056
1057 if (!i) {
1058 printf("%s: rx shutdown error!\n", sc->sc_dev.dv_xname);
1059 sc->vr_flags |= VR_F_RESTART;
1060 return;
1061 }
1062
1063 vr_rxeof(sc);
1064
1065 CSR_WRITE_4(sc, VR_RXADDR, sc->vr_cdata.vr_rx_head->vr_paddr);
1066 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1067 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
1068 }
1069
1070
1071
1072
1073
1074
1075 void
1076 vr_txeof(struct vr_softc *sc)
1077 {
1078 struct vr_chain *cur_tx;
1079 struct ifnet *ifp;
1080
1081 ifp = &sc->arpcom.ac_if;
1082
1083
1084
1085
1086
1087 cur_tx = sc->vr_cdata.vr_tx_cons;
1088 while(cur_tx->vr_mbuf != NULL) {
1089 u_int32_t txstat;
1090 int i;
1091
1092 txstat = letoh32(cur_tx->vr_ptr->vr_status);
1093
1094 if ((txstat & VR_TXSTAT_ABRT) ||
1095 (txstat & VR_TXSTAT_UDF)) {
1096 for (i = 0x400;
1097 i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_TX_ON);
1098 i--)
1099 ;
1100 if (!i) {
1101 printf("%s: tx shutdown timeout\n",
1102 sc->sc_dev.dv_xname);
1103 sc->vr_flags |= VR_F_RESTART;
1104 break;
1105 }
1106 VR_TXOWN(cur_tx) = htole32(VR_TXSTAT_OWN);
1107 CSR_WRITE_4(sc, VR_TXADDR, cur_tx->vr_paddr);
1108 break;
1109 }
1110
1111 if (txstat & VR_TXSTAT_OWN)
1112 break;
1113
1114 if (txstat & VR_TXSTAT_ERRSUM) {
1115 ifp->if_oerrors++;
1116 if (txstat & VR_TXSTAT_DEFER)
1117 ifp->if_collisions++;
1118 if (txstat & VR_TXSTAT_LATECOLL)
1119 ifp->if_collisions++;
1120 }
1121
1122 ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3;
1123
1124 ifp->if_opackets++;
1125 if (cur_tx->vr_map != NULL && cur_tx->vr_map->dm_nsegs > 0)
1126 bus_dmamap_unload(sc->sc_dmat, cur_tx->vr_map);
1127
1128 m_freem(cur_tx->vr_mbuf);
1129 cur_tx->vr_mbuf = NULL;
1130 ifp->if_flags &= ~IFF_OACTIVE;
1131
1132 cur_tx = cur_tx->vr_nextdesc;
1133 }
1134
1135 sc->vr_cdata.vr_tx_cons = cur_tx;
1136 if (cur_tx->vr_mbuf == NULL)
1137 ifp->if_timer = 0;
1138 }
1139
1140 void
1141 vr_tick(void *xsc)
1142 {
1143 struct vr_softc *sc = xsc;
1144 int s;
1145
1146 s = splnet();
1147 if (sc->vr_flags & VR_F_RESTART) {
1148 printf("%s: restarting\n", sc->sc_dev.dv_xname);
1149 vr_stop(sc);
1150 vr_reset(sc);
1151 vr_init(sc);
1152 sc->vr_flags &= ~VR_F_RESTART;
1153 }
1154
1155 mii_tick(&sc->sc_mii);
1156 timeout_add(&sc->sc_to, hz);
1157 splx(s);
1158 }
1159
1160 int
1161 vr_intr(void *arg)
1162 {
1163 struct vr_softc *sc;
1164 struct ifnet *ifp;
1165 u_int16_t status;
1166 int claimed = 0;
1167
1168 sc = arg;
1169 ifp = &sc->arpcom.ac_if;
1170
1171
1172 if (!(ifp->if_flags & IFF_UP)) {
1173 vr_stop(sc);
1174 return 0;
1175 }
1176
1177
1178 CSR_WRITE_2(sc, VR_IMR, 0x0000);
1179
1180 for (;;) {
1181
1182 status = CSR_READ_2(sc, VR_ISR);
1183 if (status)
1184 CSR_WRITE_2(sc, VR_ISR, status);
1185
1186 if ((status & VR_INTRS) == 0)
1187 break;
1188
1189 claimed = 1;
1190
1191 if (status & VR_ISR_RX_OK)
1192 vr_rxeof(sc);
1193
1194 if (status & VR_ISR_RX_DROPPED) {
1195 #ifdef VR_DEBUG
1196 printf("%s: rx packet lost\n", sc->sc_dev.dv_xname);
1197 #endif
1198 ifp->if_ierrors++;
1199 }
1200
1201 if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) ||
1202 (status & VR_ISR_RX_OFLOW)) {
1203 #ifdef VR_DEBUG
1204 printf("%s: receive error (%04x)",
1205 sc->sc_dev.dv_xname, status);
1206 if (status & VR_ISR_RX_NOBUF)
1207 printf(" no buffers");
1208 if (status & VR_ISR_RX_OFLOW)
1209 printf(" overflow");
1210 printf("\n");
1211 #endif
1212 vr_rxeoc(sc);
1213 }
1214
1215 if ((status & VR_ISR_BUSERR) || (status & VR_ISR_TX_UNDERRUN)) {
1216 #ifdef VR_DEBUG
1217 if (status & VR_ISR_BUSERR)
1218 printf("%s: PCI bus error\n",
1219 sc->sc_dev.dv_xname);
1220 if (status & VR_ISR_TX_UNDERRUN)
1221 printf("%s: transmit underrun\n",
1222 sc->sc_dev.dv_xname);
1223 #endif
1224 vr_reset(sc);
1225 vr_init(sc);
1226 break;
1227 }
1228
1229 if ((status & VR_ISR_TX_OK) || (status & VR_ISR_TX_ABRT) ||
1230 (status & VR_ISR_TX_ABRT2) || (status & VR_ISR_UDFI)) {
1231 vr_txeof(sc);
1232 if ((status & VR_ISR_UDFI) ||
1233 (status & VR_ISR_TX_ABRT2) ||
1234 (status & VR_ISR_TX_ABRT)) {
1235 #ifdef VR_DEBUG
1236 if (status & (VR_ISR_TX_ABRT | VR_ISR_TX_ABRT2))
1237 printf("%s: transmit aborted\n",
1238 sc->sc_dev.dv_xname);
1239 if (status & VR_ISR_UDFI)
1240 printf("%s: transmit underflow\n",
1241 sc->sc_dev.dv_xname);
1242 #endif
1243 ifp->if_oerrors++;
1244 if (sc->vr_cdata.vr_tx_cons->vr_mbuf != NULL) {
1245 VR_SETBIT16(sc, VR_COMMAND,
1246 VR_CMD_TX_ON);
1247 VR_SETBIT16(sc, VR_COMMAND,
1248 VR_CMD_TX_GO);
1249 }
1250 }
1251 }
1252 }
1253
1254
1255 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1256
1257 if (!IFQ_IS_EMPTY(&ifp->if_snd))
1258 vr_start(ifp);
1259
1260 return (claimed);
1261 }
1262
1263
1264
1265
1266
1267 int
1268 vr_encap(struct vr_softc *sc, struct vr_chain *c, struct mbuf *m_head)
1269 {
1270 struct vr_desc *f = NULL;
1271 struct mbuf *m_new = NULL;
1272
1273 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1274 if (m_new == NULL)
1275 return (1);
1276 if (m_head->m_pkthdr.len > MHLEN) {
1277 MCLGET(m_new, M_DONTWAIT);
1278 if (!(m_new->m_flags & M_EXT)) {
1279 m_freem(m_new);
1280 return (1);
1281 }
1282 }
1283 m_copydata(m_head, 0, m_head->m_pkthdr.len, mtod(m_new, caddr_t));
1284 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1285
1286
1287
1288
1289
1290
1291 if (m_new->m_len < VR_MIN_FRAMELEN) {
1292
1293 bzero(&m_new->m_data[m_new->m_len],
1294 VR_MIN_FRAMELEN-m_new->m_len);
1295 m_new->m_pkthdr.len += VR_MIN_FRAMELEN - m_new->m_len;
1296 m_new->m_len = m_new->m_pkthdr.len;
1297 }
1298
1299 if (bus_dmamap_load_mbuf(sc->sc_dmat, c->vr_map, m_new,
1300 BUS_DMA_NOWAIT | BUS_DMA_WRITE)) {
1301 m_freem(m_new);
1302 return (1);
1303 }
1304 bus_dmamap_sync(sc->sc_dmat, c->vr_map, 0, c->vr_map->dm_mapsize,
1305 BUS_DMASYNC_PREWRITE);
1306
1307 m_freem(m_head);
1308
1309 c->vr_mbuf = m_new;
1310
1311 f = c->vr_ptr;
1312 f->vr_data = htole32(c->vr_map->dm_segs[0].ds_addr);
1313 f->vr_ctl = htole32(c->vr_map->dm_mapsize);
1314 f->vr_ctl |= htole32(VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG);
1315 f->vr_status = htole32(0);
1316
1317 f->vr_ctl |= htole32(VR_TXCTL_LASTFRAG|VR_TXCTL_FINT);
1318 f->vr_next = htole32(c->vr_nextdesc->vr_paddr);
1319
1320 return (0);
1321 }
1322
1323
1324
1325
1326
1327
1328
1329
1330 void
1331 vr_start(struct ifnet *ifp)
1332 {
1333 struct vr_softc *sc;
1334 struct mbuf *m_head;
1335 struct vr_chain *cur_tx;
1336
1337 if (ifp->if_flags & IFF_OACTIVE)
1338 return;
1339
1340 sc = ifp->if_softc;
1341
1342 cur_tx = sc->vr_cdata.vr_tx_prod;
1343 while (cur_tx->vr_mbuf == NULL) {
1344 IFQ_DEQUEUE(&ifp->if_snd, m_head);
1345 if (m_head == NULL)
1346 break;
1347
1348
1349 if (vr_encap(sc, cur_tx, m_head)) {
1350
1351 if (ALTQ_IS_ENABLED(&ifp->if_snd))
1352 m_freem(m_head);
1353 else
1354 IF_PREPEND(&ifp->if_snd, m_head);
1355 break;
1356 }
1357
1358 VR_TXOWN(cur_tx) = htole32(VR_TXSTAT_OWN);
1359
1360 #if NBPFILTER > 0
1361
1362
1363
1364
1365 if (ifp->if_bpf)
1366 bpf_mtap(ifp->if_bpf, cur_tx->vr_mbuf,
1367 BPF_DIRECTION_OUT);
1368 #endif
1369 cur_tx = cur_tx->vr_nextdesc;
1370 }
1371 if (cur_tx != sc->vr_cdata.vr_tx_prod || cur_tx->vr_mbuf != NULL) {
1372 sc->vr_cdata.vr_tx_prod = cur_tx;
1373
1374 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 0,
1375 sc->sc_listmap->dm_mapsize,
1376 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1377
1378
1379 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO);
1380
1381
1382 ifp->if_timer = 5;
1383
1384 if (cur_tx->vr_mbuf != NULL)
1385 ifp->if_flags |= IFF_OACTIVE;
1386 }
1387 }
1388
1389 void
1390 vr_init(void *xsc)
1391 {
1392 struct vr_softc *sc = xsc;
1393 struct ifnet *ifp = &sc->arpcom.ac_if;
1394 struct mii_data *mii = &sc->sc_mii;
1395 int s, i;
1396
1397 s = splnet();
1398
1399
1400
1401
1402 vr_stop(sc);
1403 vr_reset(sc);
1404
1405
1406
1407
1408 for (i = 0; i < ETHER_ADDR_LEN; i++)
1409 CSR_WRITE_1(sc, VR_PAR0 + i, sc->arpcom.ac_enaddr[i]);
1410
1411
1412 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH);
1413 VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD);
1414
1415
1416
1417
1418
1419 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH);
1420 VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES);
1421
1422 VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH);
1423 VR_SETBIT(sc, VR_BCR1, VR_BCR1_TXTHRESHSTORENFWD);
1424
1425 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
1426 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES);
1427
1428 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
1429 VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
1430
1431
1432 if (vr_list_rx_init(sc) == ENOBUFS) {
1433 printf("%s: initialization failed: no memory for rx buffers\n",
1434 sc->sc_dev.dv_xname);
1435 vr_stop(sc);
1436 splx(s);
1437 return;
1438 }
1439
1440
1441
1442
1443 if (vr_list_tx_init(sc) == ENOBUFS) {
1444 printf("%s: initialization failed: no memory for tx buffers\n",
1445 sc->sc_dev.dv_xname);
1446 vr_stop(sc);
1447 splx(s);
1448 return;
1449 }
1450
1451
1452 if (ifp->if_flags & IFF_PROMISC)
1453 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1454 else
1455 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1456
1457
1458 if (ifp->if_flags & IFF_BROADCAST)
1459 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1460 else
1461 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1462
1463
1464
1465
1466 vr_setmulti(sc);
1467
1468
1469
1470
1471 CSR_WRITE_4(sc, VR_RXADDR, sc->vr_cdata.vr_rx_head->vr_paddr);
1472
1473
1474 CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
1475 VR_CMD_TX_ON|VR_CMD_RX_ON|
1476 VR_CMD_RX_GO);
1477
1478 CSR_WRITE_4(sc, VR_TXADDR, sc->sc_listmap->dm_segs[0].ds_addr +
1479 offsetof(struct vr_list_data, vr_tx_list[0]));
1480
1481
1482
1483
1484 CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
1485 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1486
1487
1488 mii_mediachg(mii);
1489
1490 ifp->if_flags |= IFF_RUNNING;
1491 ifp->if_flags &= ~IFF_OACTIVE;
1492
1493 if (!timeout_pending(&sc->sc_to))
1494 timeout_add(&sc->sc_to, hz);
1495
1496 splx(s);
1497 }
1498
1499
1500
1501
1502 int
1503 vr_ifmedia_upd(struct ifnet *ifp)
1504 {
1505 struct vr_softc *sc = ifp->if_softc;
1506
1507 if (ifp->if_flags & IFF_UP)
1508 vr_init(sc);
1509
1510 return (0);
1511 }
1512
1513
1514
1515
1516 void
1517 vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1518 {
1519 struct vr_softc *sc = ifp->if_softc;
1520 struct mii_data *mii = &sc->sc_mii;
1521
1522 mii_pollstat(mii);
1523 ifmr->ifm_active = mii->mii_media_active;
1524 ifmr->ifm_status = mii->mii_media_status;
1525 }
1526
1527 int
1528 vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1529 {
1530 struct vr_softc *sc = ifp->if_softc;
1531 struct ifreq *ifr = (struct ifreq *) data;
1532 int s, error = 0;
1533 struct ifaddr *ifa = (struct ifaddr *)data;
1534
1535 s = splnet();
1536
1537 if ((error = ether_ioctl(ifp, &sc->arpcom, command, data)) > 0) {
1538 splx(s);
1539 return error;
1540 }
1541
1542 switch(command) {
1543 case SIOCSIFADDR:
1544 ifp->if_flags |= IFF_UP;
1545 if (!(ifp->if_flags & IFF_RUNNING))
1546 vr_init(sc);
1547 #ifdef INET
1548 if (ifa->ifa_addr->sa_family == AF_INET)
1549 arp_ifinit(&sc->arpcom, ifa);
1550 #endif
1551 break;
1552 case SIOCSIFFLAGS:
1553 if (ifp->if_flags & IFF_UP) {
1554 if (ifp->if_flags & IFF_RUNNING &&
1555 ifp->if_flags & IFF_PROMISC &&
1556 !(sc->sc_if_flags & IFF_PROMISC)) {
1557 VR_SETBIT(sc, VR_RXCFG,
1558 VR_RXCFG_RX_PROMISC);
1559 vr_setmulti(sc);
1560 } else if (ifp->if_flags & IFF_RUNNING &&
1561 !(ifp->if_flags & IFF_PROMISC) &&
1562 sc->sc_if_flags & IFF_PROMISC) {
1563 VR_CLRBIT(sc, VR_RXCFG,
1564 VR_RXCFG_RX_PROMISC);
1565 vr_setmulti(sc);
1566 } else if (ifp->if_flags & IFF_RUNNING &&
1567 (ifp->if_flags ^ sc->sc_if_flags) & IFF_ALLMULTI) {
1568 vr_setmulti(sc);
1569 } else {
1570 if (!(ifp->if_flags & IFF_RUNNING))
1571 vr_init(sc);
1572 }
1573 } else {
1574 if (ifp->if_flags & IFF_RUNNING)
1575 vr_stop(sc);
1576 }
1577 sc->sc_if_flags = ifp->if_flags;
1578 break;
1579 case SIOCADDMULTI:
1580 case SIOCDELMULTI:
1581 error = (command == SIOCADDMULTI) ?
1582 ether_addmulti(ifr, &sc->arpcom) :
1583 ether_delmulti(ifr, &sc->arpcom);
1584
1585 if (error == ENETRESET) {
1586
1587
1588
1589
1590 if (ifp->if_flags & IFF_RUNNING)
1591 vr_setmulti(sc);
1592 error = 0;
1593 }
1594 break;
1595 case SIOCGIFMEDIA:
1596 case SIOCSIFMEDIA:
1597 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
1598 break;
1599 default:
1600 error = ENOTTY;
1601 break;
1602 }
1603
1604 splx(s);
1605
1606 return(error);
1607 }
1608
1609 void
1610 vr_watchdog(struct ifnet *ifp)
1611 {
1612 struct vr_softc *sc;
1613
1614 sc = ifp->if_softc;
1615
1616 ifp->if_oerrors++;
1617 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1618
1619 vr_stop(sc);
1620 vr_reset(sc);
1621 vr_init(sc);
1622
1623 if (!IFQ_IS_EMPTY(&ifp->if_snd))
1624 vr_start(ifp);
1625 }
1626
1627
1628
1629
1630
1631 void
1632 vr_stop(struct vr_softc *sc)
1633 {
1634 int i;
1635 struct ifnet *ifp;
1636 bus_dmamap_t map;
1637
1638 ifp = &sc->arpcom.ac_if;
1639 ifp->if_timer = 0;
1640
1641 if (timeout_pending(&sc->sc_to))
1642 timeout_del(&sc->sc_to);
1643
1644 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1645
1646 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
1647 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
1648 CSR_WRITE_2(sc, VR_IMR, 0x0000);
1649 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
1650 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
1651
1652
1653
1654
1655 for (i = 0; i < VR_RX_LIST_CNT; i++) {
1656
1657 if (sc->vr_cdata.vr_rx_chain[i].vr_buf != NULL) {
1658 free(sc->vr_cdata.vr_rx_chain[i].vr_buf, M_DEVBUF);
1659 sc->vr_cdata.vr_rx_chain[i].vr_buf = NULL;
1660 }
1661
1662 map = sc->vr_cdata.vr_rx_chain[i].vr_map;
1663 if (map != NULL) {
1664 if (map->dm_nsegs > 0)
1665 bus_dmamap_unload(sc->sc_dmat, map);
1666 bus_dmamap_destroy(sc->sc_dmat, map);
1667 sc->vr_cdata.vr_rx_chain[i].vr_map = NULL;
1668 }
1669 }
1670 bzero((char *)&sc->vr_ldata->vr_rx_list,
1671 sizeof(sc->vr_ldata->vr_rx_list));
1672
1673
1674
1675
1676 for (i = 0; i < VR_TX_LIST_CNT; i++) {
1677 bus_dmamap_t map;
1678
1679 if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) {
1680 m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf);
1681 sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL;
1682 }
1683 map = sc->vr_cdata.vr_tx_chain[i].vr_map;
1684 if (map != NULL) {
1685 if (map->dm_nsegs > 0)
1686 bus_dmamap_unload(sc->sc_dmat, map);
1687 bus_dmamap_destroy(sc->sc_dmat, map);
1688 sc->vr_cdata.vr_tx_chain[i].vr_map = NULL;
1689 }
1690 }
1691
1692 bzero((char *)&sc->vr_ldata->vr_tx_list,
1693 sizeof(sc->vr_ldata->vr_tx_list));
1694 }
1695
1696
1697
1698
1699
1700 void
1701 vr_shutdown(void *arg)
1702 {
1703 struct vr_softc *sc = (struct vr_softc *)arg;
1704
1705 vr_stop(sc);
1706 }