root/dev/pci/if_vge.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. vge_eeprom_getword
  2. vge_read_eeprom
  3. vge_miipoll_stop
  4. vge_miipoll_start
  5. vge_miibus_readreg
  6. vge_miibus_writereg
  7. vge_cam_clear
  8. vge_cam_set
  9. vge_setmulti
  10. vge_reset
  11. vge_probe
  12. vge_allocmem
  13. vge_attach
  14. vge_newbuf
  15. vge_tx_list_init
  16. vge_rx_list_init
  17. vge_rxeof
  18. vge_txeof
  19. vge_tick
  20. vge_intr
  21. vge_encap
  22. vge_start
  23. vge_init
  24. vge_ifmedia_upd
  25. vge_ifmedia_sts
  26. vge_miibus_statchg
  27. vge_ioctl
  28. vge_watchdog
  29. vge_stop

    1 /*      $OpenBSD: if_vge.c,v 1.33 2007/05/01 11:28:06 canacar Exp $     */
    2 /*      $FreeBSD: if_vge.c,v 1.3 2004/09/11 22:13:25 wpaul Exp $        */
    3 /*
    4  * Copyright (c) 2004
    5  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  * 3. All advertising materials mentioning features or use of this software
   16  *    must display the following acknowledgement:
   17  *      This product includes software developed by Bill Paul.
   18  * 4. Neither the name of the author nor the names of any co-contributors
   19  *    may be used to endorse or promote products derived from this software
   20  *    without specific prior written permission.
   21  *
   22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
   23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
   26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   32  * THE POSSIBILITY OF SUCH DAMAGE.
   33  */
   34 
   35 /*
   36  * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver.
   37  *
   38  * Written by Bill Paul <wpaul@windriver.com>
   39  * Senior Networking Software Engineer
   40  * Wind River Systems
   41  *
   42  * Ported to OpenBSD by Peter Valchev <pvalchev@openbsd.org>
   43  */
   44 
   45 /*
   46  * The VIA Networking VT6122 is a 32bit, 33/66MHz PCI device that
   47  * combines a tri-speed ethernet MAC and PHY, with the following
   48  * features:
   49  *
   50  *      o Jumbo frame support up to 16K
   51  *      o Transmit and receive flow control
   52  *      o IPv4 checksum offload
   53  *      o VLAN tag insertion and stripping
   54  *      o TCP large send
   55  *      o 64-bit multicast hash table filter
   56  *      o 64 entry CAM filter
   57  *      o 16K RX FIFO and 48K TX FIFO memory
   58  *      o Interrupt moderation
   59  *
   60  * The VT6122 supports up to four transmit DMA queues. The descriptors
   61  * in the transmit ring can address up to 7 data fragments; frames which
   62  * span more than 7 data buffers must be coalesced, but in general the
   63  * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments
   64  * long. The receive descriptors address only a single buffer.
   65  *
   66  * There are two peculiar design issues with the VT6122. One is that
   67  * receive data buffers must be aligned on a 32-bit boundary. This is
   68  * not a problem where the VT6122 is used as a LOM device in x86-based
   69  * systems, but on architectures that generate unaligned access traps, we
   70  * have to do some copying.
   71  *
   72  * The other issue has to do with the way 64-bit addresses are handled.
   73  * The DMA descriptors only allow you to specify 48 bits of addressing
   74  * information. The remaining 16 bits are specified using one of the
   75  * I/O registers. If you only have a 32-bit system, then this isn't
   76  * an issue, but if you have a 64-bit system and more than 4GB of
   77  * memory, you must have to make sure your network data buffers reside
   78  * in the same 48-bit 'segment.'
   79  *
   80  * Special thanks to Ryan Fu at VIA Networking for providing documentation
   81  * and sample NICs for testing.
   82  */
   83 
   84 #include "bpfilter.h"
   85 #include "vlan.h"
   86 
   87 #include <sys/param.h>
   88 #include <sys/endian.h>
   89 #include <sys/systm.h>
   90 #include <sys/sockio.h>
   91 #include <sys/mbuf.h>
   92 #include <sys/malloc.h>
   93 #include <sys/kernel.h>
   94 #include <sys/device.h>
   95 #include <sys/timeout.h>
   96 #include <sys/socket.h>
   97 
   98 #include <net/if.h>
   99 #include <net/if_dl.h>
  100 #include <net/if_media.h>
  101 
  102 #ifdef INET
  103 #include <netinet/in.h>
  104 #include <netinet/in_systm.h>
  105 #include <netinet/in_var.h>
  106 #include <netinet/ip.h>
  107 #include <netinet/if_ether.h>
  108 #endif
  109 
  110 #if NVLAN > 0
  111 #include <net/if_types.h>
  112 #include <net/if_vlan_var.h>
  113 #endif
  114 
  115 #if NBPFILTER > 0
  116 #include <net/bpf.h>
  117 #endif
  118 
  119 #include <dev/mii/mii.h>
  120 #include <dev/mii/miivar.h>
  121 
  122 #include <dev/pci/pcireg.h>
  123 #include <dev/pci/pcivar.h>
  124 #include <dev/pci/pcidevs.h>
  125 
  126 #include <dev/pci/if_vgereg.h>
  127 #include <dev/pci/if_vgevar.h>
  128 
  129 int vge_probe           (struct device *, void *, void *);
  130 void vge_attach         (struct device *, struct device *, void *);
  131 
  132 int vge_encap           (struct vge_softc *, struct mbuf *, int);
  133 
  134 int vge_allocmem                (struct vge_softc *);
  135 int vge_newbuf          (struct vge_softc *, int, struct mbuf *);
  136 int vge_rx_list_init    (struct vge_softc *);
  137 int vge_tx_list_init    (struct vge_softc *);
  138 void vge_rxeof          (struct vge_softc *);
  139 void vge_txeof          (struct vge_softc *);
  140 int vge_intr            (void *);
  141 void vge_tick           (void *);
  142 void vge_start          (struct ifnet *);
  143 int vge_ioctl           (struct ifnet *, u_long, caddr_t);
  144 int vge_init            (struct ifnet *);
  145 void vge_stop           (struct vge_softc *);
  146 void vge_watchdog       (struct ifnet *);
  147 int vge_ifmedia_upd     (struct ifnet *);
  148 void vge_ifmedia_sts    (struct ifnet *, struct ifmediareq *);
  149 
  150 #ifdef VGE_EEPROM
  151 void vge_eeprom_getword (struct vge_softc *, int, u_int16_t *);
  152 #endif
  153 void vge_read_eeprom    (struct vge_softc *, caddr_t, int, int, int);
  154 
  155 void vge_miipoll_start  (struct vge_softc *);
  156 void vge_miipoll_stop   (struct vge_softc *);
  157 int vge_miibus_readreg  (struct device *, int, int);
  158 void vge_miibus_writereg (struct device *, int, int, int);
  159 void vge_miibus_statchg (struct device *);
  160 
  161 void vge_cam_clear      (struct vge_softc *);
  162 int vge_cam_set         (struct vge_softc *, uint8_t *);
  163 void vge_setmulti       (struct vge_softc *);
  164 void vge_reset          (struct vge_softc *);
  165 
  166 struct cfattach vge_ca = {
  167         sizeof(struct vge_softc), vge_probe, vge_attach
  168 };
  169 
  170 struct cfdriver vge_cd = {
  171         0, "vge", DV_IFNET
  172 };
  173 
  174 #define VGE_PCI_LOIO             0x10
  175 #define VGE_PCI_LOMEM            0x14
  176 
  177 int vge_debug = 0;
  178 #define DPRINTF(x)      if (vge_debug) printf x
  179 #define DPRINTFN(n, x)  if (vge_debug >= (n)) printf x
  180 
  181 const struct pci_matchid vge_devices[] = {
  182         { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT612x },
  183 };
  184 
  185 #ifdef VGE_EEPROM
  186 /*
  187  * Read a word of data stored in the EEPROM at address 'addr.'
  188  */
  189 void
  190 vge_eeprom_getword(struct vge_softc *sc, int addr, u_int16_t *dest)
  191 {
  192         int                     i;
  193         u_int16_t               word = 0;
  194 
  195         /*
  196          * Enter EEPROM embedded programming mode. In order to
  197          * access the EEPROM at all, we first have to set the
  198          * EELOAD bit in the CHIPCFG2 register.
  199          */
  200         CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
  201         CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
  202 
  203         /* Select the address of the word we want to read */
  204         CSR_WRITE_1(sc, VGE_EEADDR, addr);
  205 
  206         /* Issue read command */
  207         CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD);
  208 
  209         /* Wait for the done bit to be set. */
  210         for (i = 0; i < VGE_TIMEOUT; i++) {
  211                 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE)
  212                         break;
  213         }
  214 
  215         if (i == VGE_TIMEOUT) {
  216                 printf("%s: EEPROM read timed out\n", sc->vge_dev.dv_xname);
  217                 *dest = 0;
  218                 return;
  219         }
  220 
  221         /* Read the result */
  222         word = CSR_READ_2(sc, VGE_EERDDAT);
  223 
  224         /* Turn off EEPROM access mode. */
  225         CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
  226         CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
  227 
  228         *dest = word;
  229 }
  230 #endif
  231 
  232 /*
  233  * Read a sequence of words from the EEPROM.
  234  */
  235 void
  236 vge_read_eeprom(struct vge_softc *sc, caddr_t dest, int off, int cnt,
  237     int swap)
  238 {
  239         int                     i;
  240 #ifdef VGE_EEPROM
  241         u_int16_t               word = 0, *ptr;
  242 
  243         for (i = 0; i < cnt; i++) {
  244                 vge_eeprom_getword(sc, off + i, &word);
  245                 ptr = (u_int16_t *)(dest + (i * 2));
  246                 if (swap)
  247                         *ptr = ntohs(word);
  248                 else
  249                         *ptr = word;
  250         }
  251 #else
  252         for (i = 0; i < ETHER_ADDR_LEN; i++)
  253                 dest[i] = CSR_READ_1(sc, VGE_PAR0 + i);
  254 #endif
  255 }
  256 
  257 void
  258 vge_miipoll_stop(struct vge_softc *sc)
  259 {
  260         int                     i;
  261 
  262         CSR_WRITE_1(sc, VGE_MIICMD, 0);
  263 
  264         for (i = 0; i < VGE_TIMEOUT; i++) {
  265                 DELAY(1);
  266                 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
  267                         break;
  268         }
  269 
  270         if (i == VGE_TIMEOUT)
  271                 printf("%s: failed to idle MII autopoll\n", sc->vge_dev.dv_xname);
  272 }
  273 
  274 void
  275 vge_miipoll_start(struct vge_softc *sc)
  276 {
  277         int                     i;
  278 
  279         /* First, make sure we're idle. */
  280 
  281         CSR_WRITE_1(sc, VGE_MIICMD, 0);
  282         CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL);
  283 
  284         for (i = 0; i < VGE_TIMEOUT; i++) {
  285                 DELAY(1);
  286                 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
  287                         break;
  288         }
  289 
  290         if (i == VGE_TIMEOUT) {
  291                 printf("%s: failed to idle MII autopoll\n", sc->vge_dev.dv_xname);
  292                 return;
  293         }
  294 
  295         /* Now enable auto poll mode. */
  296 
  297         CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO);
  298 
  299         /* And make sure it started. */
  300 
  301         for (i = 0; i < VGE_TIMEOUT; i++) {
  302                 DELAY(1);
  303                 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0)
  304                         break;
  305         }
  306 
  307         if (i == VGE_TIMEOUT)
  308                 printf("%s: failed to start MII autopoll\n", sc->vge_dev.dv_xname);
  309 }
  310 
  311 int
  312 vge_miibus_readreg(struct device *dev, int phy, int reg)
  313 {
  314         struct vge_softc        *sc = (struct vge_softc *)dev;
  315         int                     i, s;
  316         u_int16_t               rval = 0;
  317 
  318         if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
  319                 return(0);
  320 
  321         s = splnet();
  322 
  323         vge_miipoll_stop(sc);
  324 
  325         /* Specify the register we want to read. */
  326         CSR_WRITE_1(sc, VGE_MIIADDR, reg);
  327 
  328         /* Issue read command. */
  329         CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD);
  330 
  331         /* Wait for the read command bit to self-clear. */
  332         for (i = 0; i < VGE_TIMEOUT; i++) {
  333                 DELAY(1);
  334                 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0)
  335                         break;
  336         }
  337 
  338         if (i == VGE_TIMEOUT)
  339                 printf("%s: MII read timed out\n", sc->vge_dev.dv_xname);
  340         else
  341                 rval = CSR_READ_2(sc, VGE_MIIDATA);
  342 
  343         vge_miipoll_start(sc);
  344         splx(s);
  345 
  346         return (rval);
  347 }
  348 
  349 void
  350 vge_miibus_writereg(struct device *dev, int phy, int reg, int data)
  351 {
  352         struct vge_softc        *sc = (struct vge_softc *)dev;
  353         int                     i, s;
  354 
  355         if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
  356                 return;
  357 
  358         s = splnet();
  359         vge_miipoll_stop(sc);
  360 
  361         /* Specify the register we want to write. */
  362         CSR_WRITE_1(sc, VGE_MIIADDR, reg);
  363 
  364         /* Specify the data we want to write. */
  365         CSR_WRITE_2(sc, VGE_MIIDATA, data);
  366 
  367         /* Issue write command. */
  368         CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD);
  369 
  370         /* Wait for the write command bit to self-clear. */
  371         for (i = 0; i < VGE_TIMEOUT; i++) {
  372                 DELAY(1);
  373                 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0)
  374                         break;
  375         }
  376 
  377         if (i == VGE_TIMEOUT) {
  378                 printf("%s: MII write timed out\n", sc->vge_dev.dv_xname);
  379         }
  380 
  381         vge_miipoll_start(sc);
  382         splx(s);
  383 }
  384 
  385 void
  386 vge_cam_clear(struct vge_softc *sc)
  387 {
  388         int                     i;
  389 
  390         /*
  391          * Turn off all the mask bits. This tells the chip
  392          * that none of the entries in the CAM filter are valid.
  393          * desired entries will be enabled as we fill the filter in.
  394          */
  395 
  396         CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
  397         CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
  398         CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE);
  399         for (i = 0; i < 8; i++)
  400                 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
  401 
  402         /* Clear the VLAN filter too. */
  403 
  404         CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0);
  405         for (i = 0; i < 8; i++)
  406                 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
  407 
  408         CSR_WRITE_1(sc, VGE_CAMADDR, 0);
  409         CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
  410         CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
  411 
  412         sc->vge_camidx = 0;
  413 }
  414 
  415 int
  416 vge_cam_set(struct vge_softc *sc, uint8_t *addr)
  417 {
  418         int                     i, error = 0;
  419 
  420         if (sc->vge_camidx == VGE_CAM_MAXADDRS)
  421                 return(ENOSPC);
  422 
  423         /* Select the CAM data page. */
  424         CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
  425         CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA);
  426 
  427         /* Set the filter entry we want to update and enable writing. */
  428         CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx);
  429 
  430         /* Write the address to the CAM registers */
  431         for (i = 0; i < ETHER_ADDR_LEN; i++)
  432                 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]);
  433 
  434         /* Issue a write command. */
  435         CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE);
  436 
  437         /* Wake for it to clear. */
  438         for (i = 0; i < VGE_TIMEOUT; i++) {
  439                 DELAY(1);
  440                 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0)
  441                         break;
  442         }
  443 
  444         if (i == VGE_TIMEOUT) {
  445                 printf("%s: setting CAM filter failed\n", sc->vge_dev.dv_xname);
  446                 error = EIO;
  447                 goto fail;
  448         }
  449 
  450         /* Select the CAM mask page. */
  451         CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
  452         CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
  453 
  454         /* Set the mask bit that enables this filter. */
  455         CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8),
  456             1<<(sc->vge_camidx & 7));
  457 
  458         sc->vge_camidx++;
  459 
  460 fail:
  461         /* Turn off access to CAM. */
  462         CSR_WRITE_1(sc, VGE_CAMADDR, 0);
  463         CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
  464         CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
  465 
  466         return (error);
  467 }
  468 
  469 /*
  470  * Program the multicast filter. We use the 64-entry CAM filter
  471  * for perfect filtering. If there's more than 64 multicast addresses,
  472  * we use the hash filter instead.
  473  */
  474 void
  475 vge_setmulti(struct vge_softc *sc)
  476 {
  477         struct arpcom           *ac = &sc->arpcom;
  478         struct ifnet            *ifp = &ac->ac_if;
  479         struct ether_multi      *enm;
  480         struct ether_multistep  step;
  481         int                     error;
  482         u_int32_t               h = 0, hashes[2] = { 0, 0 };
  483 
  484         /* First, zot all the multicast entries. */
  485         vge_cam_clear(sc);
  486         CSR_WRITE_4(sc, VGE_MAR0, 0);
  487         CSR_WRITE_4(sc, VGE_MAR1, 0);
  488         ifp->if_flags &= ~IFF_ALLMULTI;
  489 
  490         /*
  491          * If the user wants allmulti or promisc mode, enable reception
  492          * of all multicast frames.
  493          */
  494         if (ifp->if_flags & IFF_PROMISC) {
  495 allmulti:
  496                 CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF);
  497                 CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF);
  498                 ifp->if_flags |= IFF_ALLMULTI;
  499                 return;
  500         }
  501 
  502         /* Now program new ones */
  503         ETHER_FIRST_MULTI(step, ac, enm);
  504         while (enm != NULL) {
  505                 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN))
  506                         goto allmulti;
  507 
  508                 error = vge_cam_set(sc, enm->enm_addrlo);
  509                 if (error)
  510                         break;
  511 
  512                 ETHER_NEXT_MULTI(step, enm);
  513         }
  514 
  515         /* If there were too many addresses, use the hash filter. */
  516         if (error) {
  517                 vge_cam_clear(sc);
  518 
  519                 ETHER_FIRST_MULTI(step, ac, enm);
  520                 while (enm != NULL) {
  521                         h = ether_crc32_be(enm->enm_addrlo,
  522                             ETHER_ADDR_LEN) >> 26;
  523                         hashes[h >> 5] |= 1 << (h & 0x1f);
  524 
  525                         ETHER_NEXT_MULTI(step, enm);
  526                 }
  527 
  528                 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]);
  529                 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]);
  530         }
  531 }
  532 
  533 void
  534 vge_reset(struct vge_softc *sc)
  535 {
  536         int                     i;
  537 
  538         CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET);
  539 
  540         for (i = 0; i < VGE_TIMEOUT; i++) {
  541                 DELAY(5);
  542                 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0)
  543                         break;
  544         }
  545 
  546         if (i == VGE_TIMEOUT) {
  547                 printf("%s: soft reset timed out", sc->vge_dev.dv_xname);
  548                 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE);
  549                 DELAY(2000);
  550         }
  551 
  552         DELAY(5000);
  553 
  554         CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD);
  555 
  556         for (i = 0; i < VGE_TIMEOUT; i++) {
  557                 DELAY(5);
  558                 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0)
  559                         break;
  560         }
  561 
  562         if (i == VGE_TIMEOUT) {
  563                 printf("%s: EEPROM reload timed out\n", sc->vge_dev.dv_xname);
  564                 return;
  565         }
  566 
  567         CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI);
  568 }
  569 
  570 /*
  571  * Probe for a VIA gigabit chip. Check the PCI vendor and device
  572  * IDs against our list and return a device name if we find a match.
  573  */
  574 int
  575 vge_probe(struct device *dev, void *match, void *aux)
  576 {
  577         return (pci_matchbyid((struct pci_attach_args *)aux, vge_devices,
  578             sizeof(vge_devices)/sizeof(vge_devices[0])));
  579 }
  580 
  581 /*
  582  * Allocate memory for RX/TX rings
  583  */
  584 int
  585 vge_allocmem(struct vge_softc *sc)
  586 {
  587         int                     nseg, rseg;
  588         int                     i, error;
  589 
  590         nseg = 32;
  591 
  592         /* Allocate DMA'able memory for the TX ring */
  593 
  594         error = bus_dmamap_create(sc->sc_dmat, VGE_TX_LIST_SZ, 1,
  595             VGE_TX_LIST_SZ, 0, BUS_DMA_ALLOCNOW,
  596             &sc->vge_ldata.vge_tx_list_map);
  597         if (error)
  598                 return (ENOMEM);
  599         error = bus_dmamem_alloc(sc->sc_dmat, VGE_TX_LIST_SZ,
  600             ETHER_ALIGN, 0,
  601             &sc->vge_ldata.vge_tx_listseg, 1, &rseg, BUS_DMA_NOWAIT);
  602         if (error) {
  603                 printf("%s: can't alloc TX list\n", sc->vge_dev.dv_xname);
  604                 return (ENOMEM);
  605         }
  606 
  607         /* Load the map for the TX ring. */
  608         error = bus_dmamem_map(sc->sc_dmat, &sc->vge_ldata.vge_tx_listseg,
  609              1, VGE_TX_LIST_SZ,
  610              (caddr_t *)&sc->vge_ldata.vge_tx_list, BUS_DMA_NOWAIT);
  611         memset(sc->vge_ldata.vge_tx_list, 0, VGE_TX_LIST_SZ);
  612         if (error) {
  613                 printf("%s: can't map TX dma buffers\n",
  614                     sc->vge_dev.dv_xname);
  615                 bus_dmamem_free(sc->sc_dmat, &sc->vge_ldata.vge_tx_listseg, rseg);
  616                 return (ENOMEM);
  617         }
  618 
  619         error = bus_dmamap_load(sc->sc_dmat, sc->vge_ldata.vge_tx_list_map,
  620             sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
  621         if (error) {
  622                 printf("%s: can't load TX dma map\n", sc->vge_dev.dv_xname);
  623                 bus_dmamap_destroy(sc->sc_dmat, sc->vge_ldata.vge_tx_list_map);
  624                 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->vge_ldata.vge_tx_list,
  625                     VGE_TX_LIST_SZ);
  626                 bus_dmamem_free(sc->sc_dmat, &sc->vge_ldata.vge_tx_listseg, rseg);
  627                 return (ENOMEM);
  628         }
  629 
  630         /* Create DMA maps for TX buffers */
  631 
  632         for (i = 0; i < VGE_TX_DESC_CNT; i++) {
  633                 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES * nseg, nseg,
  634                     MCLBYTES, 0, BUS_DMA_ALLOCNOW,
  635                     &sc->vge_ldata.vge_tx_dmamap[i]);
  636                 if (error) {
  637                         printf("%s: can't create DMA map for TX\n",
  638                             sc->vge_dev.dv_xname);
  639                         return (ENOMEM);
  640                 }
  641         }
  642 
  643         /* Allocate DMA'able memory for the RX ring */
  644 
  645         error = bus_dmamap_create(sc->sc_dmat, VGE_RX_LIST_SZ, 1,
  646             VGE_RX_LIST_SZ, 0, BUS_DMA_ALLOCNOW,
  647             &sc->vge_ldata.vge_rx_list_map);
  648         if (error)
  649                 return (ENOMEM);
  650         error = bus_dmamem_alloc(sc->sc_dmat, VGE_RX_LIST_SZ, VGE_RING_ALIGN,
  651             0, &sc->vge_ldata.vge_rx_listseg, 1, &rseg, BUS_DMA_NOWAIT);
  652         if (error) {
  653                 printf("%s: can't alloc RX list\n", sc->vge_dev.dv_xname);
  654                 return (ENOMEM);
  655         }
  656 
  657         /* Load the map for the RX ring. */
  658 
  659         error = bus_dmamem_map(sc->sc_dmat, &sc->vge_ldata.vge_rx_listseg,
  660              1, VGE_RX_LIST_SZ,
  661              (caddr_t *)&sc->vge_ldata.vge_rx_list, BUS_DMA_NOWAIT);
  662         memset(sc->vge_ldata.vge_rx_list, 0, VGE_RX_LIST_SZ);
  663         if (error) {
  664                 printf("%s: can't map RX dma buffers\n",
  665                     sc->vge_dev.dv_xname);
  666                 bus_dmamem_free(sc->sc_dmat, &sc->vge_ldata.vge_rx_listseg, rseg);
  667                 return (ENOMEM);
  668         }
  669         error = bus_dmamap_load(sc->sc_dmat, sc->vge_ldata.vge_rx_list_map,
  670             sc->vge_ldata.vge_rx_list, VGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
  671         if (error) {
  672                 printf("%s: can't load RX dma map\n", sc->vge_dev.dv_xname);
  673                 bus_dmamap_destroy(sc->sc_dmat, sc->vge_ldata.vge_rx_list_map);
  674                 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->vge_ldata.vge_rx_list,
  675                     VGE_RX_LIST_SZ);
  676                 bus_dmamem_free(sc->sc_dmat, &sc->vge_ldata.vge_rx_listseg, rseg);
  677                 return (ENOMEM);
  678         }
  679 
  680         /* Create DMA maps for RX buffers */
  681 
  682         for (i = 0; i < VGE_RX_DESC_CNT; i++) {
  683                 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES * nseg, nseg,
  684                     MCLBYTES, 0, BUS_DMA_ALLOCNOW,
  685                     &sc->vge_ldata.vge_rx_dmamap[i]);
  686                 if (error) {
  687                         printf("%s: can't create DMA map for RX\n",
  688                             sc->vge_dev.dv_xname);
  689                         return (ENOMEM);
  690                 }
  691         }
  692 
  693         return (0);
  694 }
  695 
  696 /*
  697  * Attach the interface. Allocate softc structures, do ifmedia
  698  * setup and ethernet/BPF attach.
  699  */
  700 void
  701 vge_attach(struct device *parent, struct device *self, void *aux)
  702 {
  703         u_char                  eaddr[ETHER_ADDR_LEN];
  704         u_int16_t               as[3];
  705         struct vge_softc        *sc = (struct vge_softc *)self;
  706         struct pci_attach_args  *pa = aux;
  707         pci_chipset_tag_t       pc = pa->pa_pc;
  708         pci_intr_handle_t       ih;
  709         const char              *intrstr = NULL;
  710         struct ifnet            *ifp;
  711         int                     error = 0, i;
  712         bus_size_t              iosize;
  713 
  714         /*
  715          * Map control/status registers.
  716          */
  717         if (pci_mapreg_map(pa, VGE_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0,
  718             &sc->vge_btag, &sc->vge_bhandle, NULL, &iosize, 0)) {
  719                 if (pci_mapreg_map(pa, VGE_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0,
  720                     &sc->vge_btag, &sc->vge_bhandle, NULL, &iosize, 0)) {
  721                         printf(": can't map mem or i/o space\n");
  722                         return;
  723                 }
  724         }
  725 
  726         /* Allocate interrupt */
  727         if (pci_intr_map(pa, &ih)) {
  728                 printf(": couldn't map interrupt\n");
  729                 return;
  730         }
  731         intrstr = pci_intr_string(pc, ih);
  732         sc->vge_intrhand = pci_intr_establish(pc, ih, IPL_NET, vge_intr, sc,
  733             sc->vge_dev.dv_xname);
  734         if (sc->vge_intrhand == NULL) {
  735                 printf(": couldn't establish interrupt");
  736                 if (intrstr != NULL)
  737                         printf(" at %s", intrstr);
  738                 return;
  739         }
  740         printf(": %s", intrstr);
  741 
  742         sc->sc_dmat = pa->pa_dmat;
  743 
  744         /* Reset the adapter. */
  745         vge_reset(sc);
  746 
  747         /*
  748          * Get station address from the EEPROM.
  749          */
  750         vge_read_eeprom(sc, (caddr_t)as, VGE_EE_EADDR, 3, 0);
  751         for (i = 0; i < 3; i++) {
  752                 eaddr[(i * 2) + 0] = as[i] & 0xff;
  753                 eaddr[(i * 2) + 1] = as[i] >> 8;
  754         }
  755 
  756         bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
  757 
  758         printf(", address %s\n",
  759             ether_sprintf(sc->arpcom.ac_enaddr));
  760 
  761         error = vge_allocmem(sc);
  762 
  763         if (error)
  764                 return;
  765 
  766         ifp = &sc->arpcom.ac_if;
  767         ifp->if_softc = sc;
  768         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  769         ifp->if_ioctl = vge_ioctl;
  770         ifp->if_start = vge_start;
  771         ifp->if_watchdog = vge_watchdog;
  772         ifp->if_init = vge_init;
  773         ifp->if_baudrate = 1000000000;
  774 #ifdef VGE_JUMBO
  775         ifp->if_hardmtu = VGE_JUMBO_MTU;
  776 #endif
  777         IFQ_SET_MAXLEN(&ifp->if_snd, VGE_IFQ_MAXLEN);
  778         IFQ_SET_READY(&ifp->if_snd);
  779 
  780         ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
  781                                 IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
  782 
  783 #ifdef VGE_VLAN
  784         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
  785 #endif
  786 
  787         /* Set interface name */
  788         strlcpy(ifp->if_xname, sc->vge_dev.dv_xname, IFNAMSIZ);
  789 
  790         /* Do MII setup */
  791         sc->sc_mii.mii_ifp = ifp;
  792         sc->sc_mii.mii_readreg = vge_miibus_readreg;
  793         sc->sc_mii.mii_writereg = vge_miibus_writereg;
  794         sc->sc_mii.mii_statchg = vge_miibus_statchg;
  795         ifmedia_init(&sc->sc_mii.mii_media, 0,
  796             vge_ifmedia_upd, vge_ifmedia_sts);
  797         mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
  798             MII_OFFSET_ANY, 0);
  799         if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
  800                 printf("%s: no PHY found!\n", sc->vge_dev.dv_xname);
  801                 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL,
  802                     0, NULL);
  803                 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
  804         } else
  805                 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
  806 
  807         timeout_set(&sc->timer_handle, vge_tick, sc);
  808 
  809         /*
  810          * Call MI attach routine.
  811          */
  812         if_attach(ifp);
  813         ether_ifattach(ifp);
  814 }
  815 
  816 int
  817 vge_newbuf(struct vge_softc *sc, int idx, struct mbuf *m)
  818 {
  819         struct mbuf             *m_new = NULL;
  820         struct vge_rx_desc      *r;
  821         bus_dmamap_t            rxmap = sc->vge_ldata.vge_rx_dmamap[idx];
  822         int                     i;
  823 
  824         if (m == NULL) {
  825                 /* Allocate a new mbuf */
  826                 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
  827                 if (m_new == NULL)
  828                         return (ENOBUFS);
  829 
  830                 /* Allocate a cluster */
  831                 MCLGET(m_new, M_DONTWAIT);
  832                 if (!(m_new->m_flags & M_EXT)) {
  833                         m_freem(m_new);
  834                         return (ENOBUFS);
  835                 }
  836         } else
  837                 m_new->m_data = m_new->m_ext.ext_buf;
  838 
  839         m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
  840         /* Fix-up alignment so payload is doubleword-aligned */
  841         /* XXX m_adj(m_new, ETHER_ALIGN); */
  842 
  843         if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m_new, BUS_DMA_NOWAIT))
  844                 return (ENOBUFS);
  845 
  846         if (rxmap->dm_nsegs > 1)
  847                 goto out;
  848 
  849         /* Map the segments into RX descriptors */
  850         r = &sc->vge_ldata.vge_rx_list[idx];
  851 
  852         if (letoh32(r->vge_sts) & VGE_RDSTS_OWN) {
  853                 printf("%s: tried to map a busy RX descriptor\n",
  854                     sc->vge_dev.dv_xname);
  855                 goto out;
  856         }
  857         r->vge_buflen = htole16(VGE_BUFLEN(rxmap->dm_segs[0].ds_len) | VGE_RXDESC_I);
  858         r->vge_addrlo = htole32(VGE_ADDR_LO(rxmap->dm_segs[0].ds_addr));
  859         r->vge_addrhi = htole16(VGE_ADDR_HI(rxmap->dm_segs[0].ds_addr) & 0xFFFF);
  860         r->vge_sts = htole32(0);
  861         r->vge_ctl = htole32(0);
  862 
  863         /*
  864          * Note: the manual fails to document the fact that for
  865          * proper operation, the driver needs to replenish the RX
  866          * DMA ring 4 descriptors at a time (rather than one at a
  867          * time, like most chips). We can allocate the new buffers
  868          * but we should not set the OWN bits until we're ready
  869          * to hand back 4 of them in one shot.
  870          */
  871 #define VGE_RXCHUNK 4
  872         sc->vge_rx_consumed++;
  873         if (sc->vge_rx_consumed == VGE_RXCHUNK) {
  874                 for (i = idx; i != idx - sc->vge_rx_consumed; i--)
  875                         sc->vge_ldata.vge_rx_list[i].vge_sts |=
  876                             htole32(VGE_RDSTS_OWN);
  877                 sc->vge_rx_consumed = 0;
  878         }
  879 
  880         sc->vge_ldata.vge_rx_mbuf[idx] = m_new;
  881 
  882         bus_dmamap_sync(sc->sc_dmat, rxmap, 0,
  883             rxmap->dm_mapsize, BUS_DMASYNC_PREREAD);
  884 
  885         return (0);
  886 out:
  887         DPRINTF(("vge_newbuf: out of memory\n"));
  888         if (m_new != NULL)
  889                 m_freem(m_new);
  890         return (ENOMEM);
  891 }
  892 
  893 int
  894 vge_tx_list_init(struct vge_softc *sc)
  895 {
  896         bzero ((char *)sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ);
  897         bzero ((char *)&sc->vge_ldata.vge_tx_mbuf,
  898             (VGE_TX_DESC_CNT * sizeof(struct mbuf *)));
  899 
  900         bus_dmamap_sync(sc->sc_dmat,
  901             sc->vge_ldata.vge_tx_list_map, 0,
  902             sc->vge_ldata.vge_tx_list_map->dm_mapsize,
  903             BUS_DMASYNC_PREWRITE);
  904         sc->vge_ldata.vge_tx_prodidx = 0;
  905         sc->vge_ldata.vge_tx_considx = 0;
  906         sc->vge_ldata.vge_tx_free = VGE_TX_DESC_CNT;
  907 
  908         return (0);
  909 }
  910 
  911 /* Init RX descriptors and allocate mbufs with vge_newbuf()
  912  * A ring is used, and last descriptor points to first. */
  913 int
  914 vge_rx_list_init(struct vge_softc *sc)
  915 {
  916         int                     i;
  917 
  918         bzero ((char *)sc->vge_ldata.vge_rx_list, VGE_RX_LIST_SZ);
  919         bzero ((char *)&sc->vge_ldata.vge_rx_mbuf,
  920             (VGE_RX_DESC_CNT * sizeof(struct mbuf *)));
  921 
  922         sc->vge_rx_consumed = 0;
  923 
  924         for (i = 0; i < VGE_RX_DESC_CNT; i++) {
  925                 if (vge_newbuf(sc, i, NULL) == ENOBUFS)
  926                         return (ENOBUFS);
  927         }
  928 
  929         /* Flush the RX descriptors */
  930 
  931         bus_dmamap_sync(sc->sc_dmat,
  932             sc->vge_ldata.vge_rx_list_map,
  933             0, sc->vge_ldata.vge_rx_list_map->dm_mapsize,
  934             BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
  935 
  936         sc->vge_ldata.vge_rx_prodidx = 0;
  937         sc->vge_rx_consumed = 0;
  938         sc->vge_head = sc->vge_tail = NULL;
  939 
  940         return (0);
  941 }
  942 
  943 /*
  944  * RX handler. We support the reception of jumbo frames that have
  945  * been fragmented across multiple 2K mbuf cluster buffers.
  946  */
  947 void
  948 vge_rxeof(struct vge_softc *sc)
  949 {
  950         struct mbuf             *m;
  951         struct ifnet            *ifp;
  952         int                     i, total_len;
  953         int                     lim = 0;
  954         struct vge_rx_desc      *cur_rx;
  955         u_int32_t               rxstat, rxctl;
  956 
  957         ifp = &sc->arpcom.ac_if;
  958         i = sc->vge_ldata.vge_rx_prodidx;
  959 
  960         /* Invalidate the descriptor memory */
  961 
  962         bus_dmamap_sync(sc->sc_dmat,
  963             sc->vge_ldata.vge_rx_list_map,
  964             0, sc->vge_ldata.vge_rx_list_map->dm_mapsize,
  965             BUS_DMASYNC_POSTREAD);
  966 
  967         while (!VGE_OWN(&sc->vge_ldata.vge_rx_list[i])) {
  968                 struct mbuf *m0 = NULL;
  969 
  970                 cur_rx = &sc->vge_ldata.vge_rx_list[i];
  971                 m = sc->vge_ldata.vge_rx_mbuf[i];
  972                 total_len = VGE_RXBYTES(cur_rx);
  973                 rxstat = letoh32(cur_rx->vge_sts);
  974                 rxctl = letoh32(cur_rx->vge_ctl);
  975 
  976                 /* Invalidate the RX mbuf and unload its map */
  977 
  978                 bus_dmamap_sync(sc->sc_dmat,
  979                     sc->vge_ldata.vge_rx_dmamap[i],
  980                     0, sc->vge_ldata.vge_rx_dmamap[i]->dm_mapsize,
  981                     BUS_DMASYNC_POSTWRITE);
  982                 bus_dmamap_unload(sc->sc_dmat,
  983                     sc->vge_ldata.vge_rx_dmamap[i]);
  984 
  985                 /*
  986                  * If the 'start of frame' bit is set, this indicates
  987                  * either the first fragment in a multi-fragment receive,
  988                  * or an intermediate fragment. Either way, we want to
  989                  * accumulate the buffers.
  990                  */
  991                 if (rxstat & VGE_RXPKT_SOF) {
  992                         DPRINTF(("vge_rxeof: SOF\n"));
  993                         m->m_len = MCLBYTES;
  994                         if (sc->vge_head == NULL)
  995                                 sc->vge_head = sc->vge_tail = m;
  996                         else {
  997                                 m->m_flags &= ~M_PKTHDR;
  998                                 sc->vge_tail->m_next = m;
  999                                 sc->vge_tail = m;
 1000                         }
 1001                         vge_newbuf(sc, i, NULL);
 1002                         VGE_RX_DESC_INC(i);
 1003                         continue;
 1004                 }
 1005 
 1006                 /*
 1007                  * Bad/error frames will have the RXOK bit cleared.
 1008                  * However, there's one error case we want to allow:
 1009                  * if a VLAN tagged frame arrives and the chip can't
 1010                  * match it against the CAM filter, it considers this
 1011                  * a 'VLAN CAM filter miss' and clears the 'RXOK' bit.
 1012                  * We don't want to drop the frame though: our VLAN
 1013                  * filtering is done in software.
 1014                  */
 1015                 if (!(rxstat & VGE_RDSTS_RXOK) && !(rxstat & VGE_RDSTS_VIDM)
 1016                     && !(rxstat & VGE_RDSTS_CSUMERR)) {
 1017                         ifp->if_ierrors++;
 1018                         /*
 1019                          * If this is part of a multi-fragment packet,
 1020                          * discard all the pieces.
 1021                          */
 1022                         if (sc->vge_head != NULL) {
 1023                                 m_freem(sc->vge_head);
 1024                                 sc->vge_head = sc->vge_tail = NULL;
 1025                         }
 1026                         vge_newbuf(sc, i, m);
 1027                         VGE_RX_DESC_INC(i);
 1028                         continue;
 1029                 }
 1030 
 1031                 /*
 1032                  * If allocating a replacement mbuf fails,
 1033                  * reload the current one.
 1034                  */
 1035 
 1036                 if (vge_newbuf(sc, i, NULL) == ENOBUFS) {
 1037                         if (sc->vge_head != NULL) {
 1038                                 m_freem(sc->vge_head);
 1039                                 sc->vge_head = sc->vge_tail = NULL;
 1040                         }
 1041 
 1042                         m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
 1043                             total_len - ETHER_CRC_LEN + ETHER_ALIGN,
 1044                             0, ifp, NULL);
 1045                         vge_newbuf(sc, i, m);
 1046                         if (m0 == NULL) {
 1047                                 ifp->if_ierrors++;
 1048                                 continue;
 1049                         }
 1050                         m_adj(m0, ETHER_ALIGN);
 1051                         m = m0;
 1052 
 1053                         VGE_RX_DESC_INC(i);
 1054                         continue;
 1055                 }
 1056 
 1057                 VGE_RX_DESC_INC(i);
 1058 
 1059                 if (sc->vge_head != NULL) {
 1060                         m->m_len = total_len % MCLBYTES;
 1061                         /*
 1062                          * Special case: if there's 4 bytes or less
 1063                          * in this buffer, the mbuf can be discarded:
 1064                          * the last 4 bytes is the CRC, which we don't
 1065                          * care about anyway.
 1066                          */
 1067                         if (m->m_len <= ETHER_CRC_LEN) {
 1068                                 sc->vge_tail->m_len -=
 1069                                     (ETHER_CRC_LEN - m->m_len);
 1070                                 m_freem(m);
 1071                         } else {
 1072                                 m->m_len -= ETHER_CRC_LEN;
 1073                                 m->m_flags &= ~M_PKTHDR;
 1074                                 sc->vge_tail->m_next = m;
 1075                         }
 1076                         m = sc->vge_head;
 1077                         sc->vge_head = sc->vge_tail = NULL;
 1078                         m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
 1079                 } else
 1080                         m->m_pkthdr.len = m->m_len =
 1081                             (total_len - ETHER_CRC_LEN);
 1082 
 1083 #ifdef __STRICT_ALIGNMENT
 1084                 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
 1085                     total_len);
 1086                 m->m_data += ETHER_ALIGN;
 1087 #endif
 1088                 ifp->if_ipackets++;
 1089                 m->m_pkthdr.rcvif = ifp;
 1090 
 1091                 /* Do RX checksumming */
 1092 
 1093                 /* Check IP header checksum */
 1094                 if ((rxctl & VGE_RDCTL_IPPKT) &&
 1095                     (rxctl & VGE_RDCTL_IPCSUMOK))
 1096                         m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
 1097 
 1098                 /* Check TCP/UDP checksum */
 1099                 if ((rxctl & (VGE_RDCTL_TCPPKT|VGE_RDCTL_UDPPKT)) &&
 1100                     (rxctl & VGE_RDCTL_PROTOCSUMOK))
 1101                         m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
 1102 
 1103 #if NBPFILTER > 0
 1104                 if (ifp->if_bpf)
 1105                         bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
 1106 #endif
 1107                 ether_input_mbuf(ifp, m);
 1108 
 1109                 lim++;
 1110                 if (lim == VGE_RX_DESC_CNT)
 1111                         break;
 1112         }
 1113 
 1114         /* Flush the RX DMA ring */
 1115         bus_dmamap_sync(sc->sc_dmat,
 1116             sc->vge_ldata.vge_rx_list_map,
 1117             0, sc->vge_ldata.vge_rx_list_map->dm_mapsize,
 1118             BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
 1119 
 1120         sc->vge_ldata.vge_rx_prodidx = i;
 1121         CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim);
 1122 }
 1123 
 1124 void
 1125 vge_txeof(struct vge_softc *sc)
 1126 {
 1127         struct ifnet            *ifp;
 1128         u_int32_t               txstat;
 1129         int                     idx;
 1130 
 1131         ifp = &sc->arpcom.ac_if;
 1132         idx = sc->vge_ldata.vge_tx_considx;
 1133 
 1134         /* Invalidate the TX descriptor list */
 1135 
 1136         bus_dmamap_sync(sc->sc_dmat,
 1137             sc->vge_ldata.vge_tx_list_map,
 1138             0, sc->vge_ldata.vge_tx_list_map->dm_mapsize,
 1139             BUS_DMASYNC_POSTREAD);
 1140 
 1141         /* Transmitted frames can be now free'd from the TX list */
 1142         while (idx != sc->vge_ldata.vge_tx_prodidx) {
 1143                 txstat = letoh32(sc->vge_ldata.vge_tx_list[idx].vge_sts);
 1144                 if (txstat & VGE_TDSTS_OWN)
 1145                         break;
 1146 
 1147                 m_freem(sc->vge_ldata.vge_tx_mbuf[idx]);
 1148                 sc->vge_ldata.vge_tx_mbuf[idx] = NULL;
 1149                 bus_dmamap_unload(sc->sc_dmat,
 1150                     sc->vge_ldata.vge_tx_dmamap[idx]);
 1151                 if (txstat & (VGE_TDSTS_EXCESSCOLL|VGE_TDSTS_COLL))
 1152                         ifp->if_collisions++;
 1153                 if (txstat & VGE_TDSTS_TXERR)
 1154                         ifp->if_oerrors++;
 1155                 else
 1156                         ifp->if_opackets++;
 1157 
 1158                 sc->vge_ldata.vge_tx_free++;
 1159                 VGE_TX_DESC_INC(idx);
 1160         }
 1161 
 1162         /* No changes made to the TX ring, so no flush needed */
 1163 
 1164         if (idx != sc->vge_ldata.vge_tx_considx) {
 1165                 sc->vge_ldata.vge_tx_considx = idx;
 1166                 ifp->if_flags &= ~IFF_OACTIVE;
 1167                 ifp->if_timer = 0;
 1168         }
 1169 
 1170         /*
 1171          * If not all descriptors have been released reaped yet,
 1172          * reload the timer so that we will eventually get another
 1173          * interrupt that will cause us to re-enter this routine.
 1174          * This is done in case the transmitter has gone idle.
 1175          */
 1176         if (sc->vge_ldata.vge_tx_free != VGE_TX_DESC_CNT)
 1177                 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
 1178 }
 1179 
 1180 void
 1181 vge_tick(void *xsc)
 1182 {
 1183         struct vge_softc        *sc = xsc;
 1184         struct ifnet            *ifp = &sc->arpcom.ac_if;
 1185         struct mii_data         *mii = &sc->sc_mii;
 1186         int s;
 1187 
 1188         s = splnet();
 1189 
 1190         mii_tick(mii);
 1191 
 1192         if (sc->vge_link) {
 1193                 if (!(mii->mii_media_status & IFM_ACTIVE)) {
 1194                         sc->vge_link = 0;
 1195                         ifp->if_link_state = LINK_STATE_DOWN;
 1196                         if_link_state_change(ifp);
 1197                 }
 1198         } else {
 1199                 if (mii->mii_media_status & IFM_ACTIVE &&
 1200                     IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
 1201                         sc->vge_link = 1;
 1202                         if (mii->mii_media_status & IFM_FDX)
 1203                                 ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
 1204                         else if (mii->mii_media_status & IFM_HDX)
 1205                                 ifp->if_link_state = LINK_STATE_HALF_DUPLEX;
 1206                         else
 1207                                 ifp->if_link_state = LINK_STATE_UP;
 1208                         if_link_state_change(ifp);
 1209                         if (!IFQ_IS_EMPTY(&ifp->if_snd))
 1210                                 vge_start(ifp);
 1211                 }
 1212         }
 1213         timeout_add(&sc->timer_handle, hz);
 1214         splx(s);
 1215 }
 1216 
 1217 int
 1218 vge_intr(void *arg)
 1219 {
 1220         struct vge_softc        *sc = arg;
 1221         struct ifnet            *ifp;
 1222         u_int32_t               status;
 1223         int                     claimed = 0;
 1224 
 1225         ifp = &sc->arpcom.ac_if;
 1226 
 1227         if (!(ifp->if_flags & IFF_UP))
 1228                 return 0;
 1229 
 1230         /* Disable interrupts */
 1231         CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
 1232 
 1233         for (;;) {
 1234                 status = CSR_READ_4(sc, VGE_ISR);
 1235                 DPRINTFN(3, ("vge_intr: status=%#x\n", status));
 1236 
 1237                 /* If the card has gone away the read returns 0xffffffff. */
 1238                 if (status == 0xFFFFFFFF)
 1239                         break;
 1240 
 1241                 if (status) {
 1242                         CSR_WRITE_4(sc, VGE_ISR, status);
 1243                 }
 1244 
 1245                 if ((status & VGE_INTRS) == 0)
 1246                         break;
 1247 
 1248                 claimed = 1;
 1249 
 1250                 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO))
 1251                         vge_rxeof(sc);
 1252 
 1253                 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
 1254                         DPRINTFN(2, ("vge_intr: RX error, recovering\n"));
 1255                         vge_rxeof(sc);
 1256                         CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
 1257                         CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
 1258                 }
 1259 
 1260                 if (status & (VGE_ISR_TXOK0|VGE_ISR_TIMER0))
 1261                         vge_txeof(sc);
 1262 
 1263                 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL)) {
 1264                         DPRINTFN(2, ("DMA_STALL\n"));
 1265                         vge_init(ifp);
 1266                 }
 1267 
 1268                 if (status & VGE_ISR_LINKSTS) {
 1269                         timeout_del(&sc->timer_handle);
 1270                         vge_tick(sc);
 1271                 }
 1272         }
 1273 
 1274         /* Re-enable interrupts */
 1275         CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
 1276 
 1277         if (!IFQ_IS_EMPTY(&ifp->if_snd))
 1278                 vge_start(ifp);
 1279 
 1280         return (claimed);
 1281 }
 1282 
 1283 /*
 1284  * Encapsulate an mbuf chain into the TX ring by combining it w/
 1285  * the descriptors.
 1286  */
 1287 int
 1288 vge_encap(struct vge_softc *sc, struct mbuf *m_head, int idx)
 1289 {
 1290         struct ifnet            *ifp = &sc->arpcom.ac_if;
 1291         bus_dmamap_t            txmap;
 1292         struct vge_tx_desc      *d = NULL;
 1293         struct vge_tx_frag      *f;
 1294         struct mbuf             *mnew = NULL;
 1295         int                     error, frag;
 1296         u_int32_t               vge_flags;
 1297 #if NVLAN > 0
 1298         struct ifvlan           *ifv = NULL;
 1299 
 1300         if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
 1301             m_head->m_pkthdr.rcvif != NULL)
 1302                 ifv = m_head->m_pkthdr.rcvif->if_softc;
 1303 #endif
 1304 
 1305         vge_flags = 0;
 1306 
 1307         if (m_head->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
 1308                 vge_flags |= VGE_TDCTL_IPCSUM;
 1309         if (m_head->m_pkthdr.csum_flags & M_TCPV4_CSUM_OUT)
 1310                 vge_flags |= VGE_TDCTL_TCPCSUM;
 1311         if (m_head->m_pkthdr.csum_flags & M_UDPV4_CSUM_OUT)
 1312                 vge_flags |= VGE_TDCTL_UDPCSUM;
 1313 
 1314         txmap = sc->vge_ldata.vge_tx_dmamap[idx];
 1315 repack:
 1316         error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap,
 1317             m_head, BUS_DMA_NOWAIT);
 1318         if (error) {
 1319                 printf("%s: can't map mbuf (error %d)\n",
 1320                     sc->vge_dev.dv_xname, error);
 1321                 return (ENOBUFS);
 1322         }
 1323 
 1324         d = &sc->vge_ldata.vge_tx_list[idx];
 1325         /* If owned by chip, fail */
 1326         if (letoh32(d->vge_sts) & VGE_TDSTS_OWN)
 1327                 return (ENOBUFS);
 1328 
 1329         for (frag = 0; frag < txmap->dm_nsegs; frag++) {
 1330                 /* Check if we have used all 7 fragments. */
 1331                 if (frag == VGE_TX_FRAGS)
 1332                         break;
 1333                 f = &d->vge_frag[frag];
 1334                 f->vge_buflen = htole16(VGE_BUFLEN(txmap->dm_segs[frag].ds_len));
 1335                 f->vge_addrlo = htole32(VGE_ADDR_LO(txmap->dm_segs[frag].ds_addr));
 1336                 f->vge_addrhi = htole16(VGE_ADDR_HI(txmap->dm_segs[frag].ds_addr) & 0xFFFF);
 1337         }
 1338 
 1339         /*
 1340          * We used up all 7 fragments!  Now what we have to do is
 1341          * copy the data into a mbuf cluster and map that.
 1342          */
 1343         if (frag == VGE_TX_FRAGS) {
 1344                 MGETHDR(mnew, M_DONTWAIT, MT_DATA);
 1345                 if (mnew == NULL)
 1346                         return (ENOBUFS);
 1347 
 1348                 if (m_head->m_pkthdr.len > MHLEN) {
 1349                         MCLGET(mnew, M_DONTWAIT);
 1350                         if (!(mnew->m_flags & M_EXT)) {
 1351                                 m_freem(mnew);
 1352                                 return (ENOBUFS);
 1353                         }
 1354                 }
 1355                 m_copydata(m_head, 0, m_head->m_pkthdr.len,
 1356                     mtod(mnew, caddr_t));
 1357                 mnew->m_pkthdr.len = mnew->m_len = m_head->m_pkthdr.len;
 1358                 IFQ_DEQUEUE(&ifp->if_snd, m_head);
 1359                 m_freem(m_head);
 1360                 m_head = mnew;
 1361                 goto repack;
 1362         }
 1363 
 1364         /* This chip does not do auto-padding */
 1365         if (m_head->m_pkthdr.len < VGE_MIN_FRAMELEN) {
 1366                 f = &d->vge_frag[frag];
 1367 
 1368                 f->vge_buflen = htole16(VGE_BUFLEN(VGE_MIN_FRAMELEN -
 1369                     m_head->m_pkthdr.len));
 1370                 f->vge_addrlo = htole32(VGE_ADDR_LO(txmap->dm_segs[0].ds_addr));
 1371                 f->vge_addrhi = htole16(VGE_ADDR_HI(txmap->dm_segs[0].ds_addr) & 0xFFFF);
 1372                 m_head->m_pkthdr.len = VGE_MIN_FRAMELEN;
 1373                 frag++;
 1374         }
 1375         /* For some reason, we need to tell the card fragment + 1 */
 1376         frag++;
 1377 
 1378         bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
 1379             BUS_DMASYNC_PREWRITE);
 1380 
 1381         d->vge_sts = htole32(m_head->m_pkthdr.len << 16);
 1382         d->vge_ctl = htole32(vge_flags|(frag << 28) | VGE_TD_LS_NORM);
 1383 
 1384         if (m_head->m_pkthdr.len > ETHERMTU + ETHER_HDR_LEN)
 1385                 d->vge_ctl |= htole32(VGE_TDCTL_JUMBO);
 1386 
 1387         sc->vge_ldata.vge_tx_dmamap[idx] = txmap;
 1388         sc->vge_ldata.vge_tx_mbuf[idx] = m_head;
 1389         sc->vge_ldata.vge_tx_free--;
 1390         sc->vge_ldata.vge_tx_list[idx].vge_sts |= htole32(VGE_TDSTS_OWN);
 1391 
 1392         /*
 1393          * Set up hardware VLAN tagging.
 1394          */
 1395 #if NVLAN > 0
 1396         if (ifv != NULL) {
 1397                 sc->vge_ldata.vge_tx_list[idx].vge_ctl |=
 1398                     htole32(htons(ifv->ifv_tag) | VGE_TDCTL_VTAG);
 1399         }
 1400 #endif
 1401 
 1402         idx++;
 1403         if (mnew == NULL) {
 1404                 /* if mbuf is coalesced, it is already dequeued */
 1405                 IFQ_DEQUEUE(&ifp->if_snd, m_head);
 1406         }
 1407         return (0);
 1408 }
 1409 
 1410 /*
 1411  * Main transmit routine.
 1412  */
 1413 void
 1414 vge_start(struct ifnet *ifp)
 1415 {
 1416         struct vge_softc        *sc;
 1417         struct mbuf             *m_head = NULL;
 1418         int                     idx, pidx = 0;
 1419 
 1420         sc = ifp->if_softc;
 1421 
 1422         if (!sc->vge_link || ifp->if_flags & IFF_OACTIVE)
 1423                 return;
 1424 
 1425         if (IFQ_IS_EMPTY(&ifp->if_snd))
 1426                 return;
 1427 
 1428         idx = sc->vge_ldata.vge_tx_prodidx;
 1429 
 1430         pidx = idx - 1;
 1431         if (pidx < 0)
 1432                 pidx = VGE_TX_DESC_CNT - 1;
 1433 
 1434         while (sc->vge_ldata.vge_tx_mbuf[idx] == NULL) {
 1435                 IFQ_POLL(&ifp->if_snd, m_head);
 1436                 if (m_head == NULL)
 1437                         break;
 1438 
 1439                 /*
 1440                  * If there's a BPF listener, bounce a copy of this frame
 1441                  * to him.
 1442                  */
 1443 #if NBPFILTER > 0
 1444                 if (ifp->if_bpf)
 1445                         bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
 1446 #endif
 1447 
 1448                 if (vge_encap(sc, m_head, idx)) {
 1449                         ifp->if_flags |= IFF_OACTIVE;
 1450                         break;
 1451                 }
 1452 
 1453                 sc->vge_ldata.vge_tx_list[pidx].vge_frag[0].vge_buflen |=
 1454                     htole16(VGE_TXDESC_Q);
 1455 
 1456                 pidx = idx;
 1457                 VGE_TX_DESC_INC(idx);
 1458         }
 1459 
 1460         if (idx == sc->vge_ldata.vge_tx_prodidx) {
 1461                 return;
 1462         }
 1463 
 1464         /* Flush the TX descriptors */
 1465 
 1466         bus_dmamap_sync(sc->sc_dmat,
 1467             sc->vge_ldata.vge_tx_list_map,
 1468             0, sc->vge_ldata.vge_tx_list_map->dm_mapsize,
 1469             BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
 1470 
 1471         /* Issue a transmit command. */
 1472         CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0);
 1473 
 1474         sc->vge_ldata.vge_tx_prodidx = idx;
 1475 
 1476         /*
 1477          * Use the countdown timer for interrupt moderation.
 1478          * 'TX done' interrupts are disabled. Instead, we reset the
 1479          * countdown timer, which will begin counting until it hits
 1480          * the value in the SSTIMER register, and then trigger an
 1481          * interrupt. Each time we set the TIMER0_ENABLE bit, the
 1482          * the timer count is reloaded. Only when the transmitter
 1483          * is idle will the timer hit 0 and an interrupt fire.
 1484          */
 1485         CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
 1486 
 1487         /*
 1488          * Set a timeout in case the chip goes out to lunch.
 1489          */
 1490         ifp->if_timer = 5;
 1491 }
 1492 
 1493 int
 1494 vge_init(struct ifnet *ifp)
 1495 {
 1496         struct vge_softc        *sc = ifp->if_softc;
 1497         int                     i;
 1498 
 1499         /*
 1500          * Cancel pending I/O and free all RX/TX buffers.
 1501          */
 1502         vge_stop(sc);
 1503         vge_reset(sc);
 1504 
 1505         /* Initialize RX descriptors list */
 1506         if (vge_rx_list_init(sc) == ENOBUFS) {
 1507                 printf("%s: init failed: no memory for RX buffers\n",
 1508                     sc->vge_dev.dv_xname);
 1509                 vge_stop(sc);
 1510                 return (ENOBUFS);
 1511         }
 1512         /* Initialize TX descriptors */
 1513         if (vge_tx_list_init(sc) == ENOBUFS) {
 1514                 printf("%s: init failed: no memory for TX buffers\n",
 1515                     sc->vge_dev.dv_xname);
 1516                 vge_stop(sc);
 1517                 return (ENOBUFS);
 1518         }
 1519 
 1520         /* Set our station address */
 1521         for (i = 0; i < ETHER_ADDR_LEN; i++)
 1522                 CSR_WRITE_1(sc, VGE_PAR0 + i, sc->arpcom.ac_enaddr[i]);
 1523 
 1524         /*
 1525          * Set receive FIFO threshold. Also allow transmission and
 1526          * reception of VLAN tagged frames.
 1527          */
 1528         CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT);
 1529         CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES|VGE_VTAG_OPT2);
 1530 
 1531         /* Set DMA burst length */
 1532         CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN);
 1533         CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128);
 1534 
 1535         CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK);
 1536 
 1537         /* Set collision backoff algorithm */
 1538         CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM|
 1539             VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT);
 1540         CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET);
 1541 
 1542         /* Disable LPSEL field in priority resolution */
 1543         CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS);
 1544 
 1545         /*
 1546          * Load the addresses of the DMA queues into the chip.
 1547          * Note that we only use one transmit queue.
 1548          */
 1549 
 1550         CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0,
 1551             VGE_ADDR_LO(sc->vge_ldata.vge_tx_listseg.ds_addr));
 1552         CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1);
 1553 
 1554         CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO,
 1555             VGE_ADDR_LO(sc->vge_ldata.vge_rx_listseg.ds_addr));
 1556         CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1);
 1557         CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT);
 1558 
 1559         /* Enable and wake up the RX descriptor queue */
 1560         CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
 1561         CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
 1562 
 1563         /* Enable the TX descriptor queue */
 1564         CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0);
 1565 
 1566         /* Set up the receive filter -- allow large frames for VLANs. */
 1567         CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST|VGE_RXCTL_RX_GIANT);
 1568 
 1569         /* If we want promiscuous mode, set the allframes bit. */
 1570         if (ifp->if_flags & IFF_PROMISC) {
 1571                 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC);
 1572         }
 1573 
 1574         /* Set capture broadcast bit to capture broadcast frames. */
 1575         if (ifp->if_flags & IFF_BROADCAST) {
 1576                 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST);
 1577         }
 1578 
 1579         /* Set multicast bit to capture multicast frames. */
 1580         if (ifp->if_flags & IFF_MULTICAST) {
 1581                 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST);
 1582         }
 1583 
 1584         /* Init the cam filter. */
 1585         vge_cam_clear(sc);
 1586 
 1587         /* Init the multicast filter. */
 1588         vge_setmulti(sc);
 1589 
 1590         /* Enable flow control */
 1591 
 1592         CSR_WRITE_1(sc, VGE_CRS2, 0x8B);
 1593 
 1594         /* Enable jumbo frame reception (if desired) */
 1595 
 1596         /* Start the MAC. */
 1597         CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP);
 1598         CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL);
 1599         CSR_WRITE_1(sc, VGE_CRS0,
 1600             VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START);
 1601 
 1602         /*
 1603          * Configure one-shot timer for microsecond
 1604          * resulution and load it for 500 usecs.
 1605          */
 1606         CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES);
 1607         CSR_WRITE_2(sc, VGE_SSTIMER, 400);
 1608 
 1609         /*
 1610          * Configure interrupt moderation for receive. Enable
 1611          * the holdoff counter and load it, and set the RX
 1612          * suppression count to the number of descriptors we
 1613          * want to allow before triggering an interrupt.
 1614          * The holdoff timer is in units of 20 usecs.
 1615          */
 1616 
 1617 #ifdef notyet
 1618         CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE);
 1619         /* Select the interrupt holdoff timer page. */
 1620         CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
 1621         CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF);
 1622         CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */
 1623 
 1624         /* Enable use of the holdoff timer. */
 1625         CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF);
 1626         CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD);
 1627 
 1628         /* Select the RX suppression threshold page. */
 1629         CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
 1630         CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR);
 1631         CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */
 1632 
 1633         /* Restore the page select bits. */
 1634         CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
 1635         CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
 1636 #endif
 1637 
 1638         /*
 1639          * Enable interrupts.
 1640          */
 1641         CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
 1642         CSR_WRITE_4(sc, VGE_ISR, 0);
 1643         CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
 1644 
 1645         /* Restore BMCR state */
 1646         mii_mediachg(&sc->sc_mii);
 1647 
 1648         ifp->if_flags |= IFF_RUNNING;
 1649         ifp->if_flags &= ~IFF_OACTIVE;
 1650 
 1651         sc->vge_if_flags = 0;
 1652         sc->vge_link = 0;
 1653 
 1654         if (!timeout_pending(&sc->timer_handle))
 1655                 timeout_add(&sc->timer_handle, hz);
 1656 
 1657         return (0);
 1658 }
 1659 
 1660 /*
 1661  * Set media options.
 1662  */
 1663 int
 1664 vge_ifmedia_upd(struct ifnet *ifp)
 1665 {
 1666         struct vge_softc *sc = ifp->if_softc;
 1667 
 1668         return (mii_mediachg(&sc->sc_mii));
 1669 }
 1670 
 1671 /*
 1672  * Report current media status.
 1673  */
 1674 void
 1675 vge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
 1676 {
 1677         struct vge_softc *sc = ifp->if_softc;
 1678 
 1679         mii_pollstat(&sc->sc_mii);
 1680         ifmr->ifm_active = sc->sc_mii.mii_media_active;
 1681         ifmr->ifm_status = sc->sc_mii.mii_media_status;
 1682 }
 1683 
 1684 void
 1685 vge_miibus_statchg(struct device *dev)
 1686 {
 1687         struct vge_softc        *sc = (struct vge_softc *)dev;
 1688         struct mii_data         *mii;
 1689         struct ifmedia_entry    *ife;
 1690 
 1691         mii = &sc->sc_mii;
 1692         ife = mii->mii_media.ifm_cur;
 1693 
 1694         /*
 1695          * If the user manually selects a media mode, we need to turn
 1696          * on the forced MAC mode bit in the DIAGCTL register. If the
 1697          * user happens to choose a full duplex mode, we also need to
 1698          * set the 'force full duplex' bit. This applies only to
 1699          * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC
 1700          * mode is disabled, and in 1000baseT mode, full duplex is
 1701          * always implied, so we turn on the forced mode bit but leave
 1702          * the FDX bit cleared.
 1703          */
 1704 
 1705         switch (IFM_SUBTYPE(ife->ifm_media)) {
 1706         case IFM_AUTO:
 1707                 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
 1708                 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
 1709                 break;
 1710         case IFM_1000_T:
 1711                 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
 1712                 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
 1713                 break;
 1714         case IFM_100_TX:
 1715         case IFM_10_T:
 1716                 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
 1717                 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) {
 1718                         CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
 1719                 } else {
 1720                         CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
 1721                 }
 1722                 break;
 1723         default:
 1724                 printf("%s: unknown media type: %x\n",
 1725                     sc->vge_dev.dv_xname, IFM_SUBTYPE(ife->ifm_media));
 1726                 break;
 1727         }
 1728 }
 1729 
 1730 int
 1731 vge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
 1732 {
 1733         struct vge_softc        *sc = ifp->if_softc;
 1734         struct ifreq            *ifr = (struct ifreq *) data;
 1735         struct ifaddr           *ifa = (struct ifaddr *) data;
 1736         int                     s, error = 0;
 1737 
 1738         s = splnet();
 1739 
 1740         if ((error = ether_ioctl(ifp, &sc->arpcom, command, data)) > 0) {
 1741                 splx(s);
 1742                 return (error);
 1743         }
 1744 
 1745         switch (command) {
 1746         case SIOCSIFADDR:
 1747                 ifp->if_flags |= IFF_UP;
 1748                 switch (ifa->ifa_addr->sa_family) {
 1749 #ifdef INET
 1750                 case AF_INET:
 1751                         vge_init(ifp);
 1752                         arp_ifinit(&sc->arpcom, ifa);
 1753                         break;
 1754 #endif
 1755                 default:
 1756                         vge_init(ifp);
 1757                         break;
 1758                 }
 1759                 break;
 1760         case SIOCSIFMTU:
 1761                 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ifp->if_hardmtu)
 1762                         error = EINVAL;
 1763                 else if (ifp->if_mtu != ifr->ifr_mtu)
 1764                         ifp->if_mtu = ifr->ifr_mtu;
 1765                 break;
 1766         case SIOCSIFFLAGS:
 1767                 if (ifp->if_flags & IFF_UP) {
 1768                         if (ifp->if_flags & IFF_RUNNING &&
 1769                             ifp->if_flags & IFF_PROMISC &&
 1770                             !(sc->vge_if_flags & IFF_PROMISC)) {
 1771                                 CSR_SETBIT_1(sc, VGE_RXCTL,
 1772                                     VGE_RXCTL_RX_PROMISC);
 1773                                 vge_setmulti(sc);
 1774                         } else if (ifp->if_flags & IFF_RUNNING &&
 1775                             !(ifp->if_flags & IFF_PROMISC) &&
 1776                             sc->vge_if_flags & IFF_PROMISC) {
 1777                                 CSR_CLRBIT_1(sc, VGE_RXCTL,
 1778                                     VGE_RXCTL_RX_PROMISC);
 1779                                 vge_setmulti(sc);
 1780                         } else
 1781                                 vge_init(ifp);
 1782                 } else {
 1783                         if (ifp->if_flags & IFF_RUNNING)
 1784                                 vge_stop(sc);
 1785                 }
 1786                 sc->vge_if_flags = ifp->if_flags;
 1787                 break;
 1788         case SIOCADDMULTI:
 1789         case SIOCDELMULTI:
 1790                 error = (command == SIOCADDMULTI) ?
 1791                     ether_addmulti(ifr, &sc->arpcom) :
 1792                     ether_delmulti(ifr, &sc->arpcom);
 1793 
 1794                 if (error == ENETRESET) {
 1795                         if (ifp->if_flags & IFF_RUNNING)
 1796                                 vge_setmulti(sc);
 1797                         error = 0;
 1798                 }
 1799                 break;
 1800         case SIOCGIFMEDIA:
 1801         case SIOCSIFMEDIA:
 1802                 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
 1803                 break;
 1804         default:
 1805                 error = ENOTTY;
 1806                 break;
 1807         }
 1808 
 1809         splx(s);
 1810         return (error);
 1811 }
 1812 
 1813 void
 1814 vge_watchdog(struct ifnet *ifp)
 1815 {
 1816         struct vge_softc *sc = ifp->if_softc;
 1817         int s;
 1818 
 1819         s = splnet();
 1820         printf("%s: watchdog timeout\n", sc->vge_dev.dv_xname);
 1821         ifp->if_oerrors++;
 1822 
 1823         vge_txeof(sc);
 1824         vge_rxeof(sc);
 1825 
 1826         vge_init(ifp);
 1827 
 1828         splx(s);
 1829 }
 1830 
 1831 /*
 1832  * Stop the adapter and free any mbufs allocated to the
 1833  * RX and TX lists.
 1834  */
 1835 void
 1836 vge_stop(struct vge_softc *sc)
 1837 {
 1838         int                     i;
 1839         struct ifnet            *ifp;
 1840 
 1841         ifp = &sc->arpcom.ac_if;
 1842         ifp->if_timer = 0;
 1843         if (timeout_pending(&sc->timer_handle))
 1844                 timeout_del(&sc->timer_handle);
 1845 
 1846         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
 1847 
 1848         CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
 1849         CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP);
 1850         CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
 1851         CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF);
 1852         CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF);
 1853         CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0);
 1854 
 1855         if (sc->vge_head != NULL) {
 1856                 m_freem(sc->vge_head);
 1857                 sc->vge_head = sc->vge_tail = NULL;
 1858         }
 1859 
 1860         /* Free the TX list buffers. */
 1861         for (i = 0; i < VGE_TX_DESC_CNT; i++) {
 1862                 if (sc->vge_ldata.vge_tx_mbuf[i] != NULL) {
 1863                         bus_dmamap_unload(sc->sc_dmat,
 1864                             sc->vge_ldata.vge_tx_dmamap[i]);
 1865                         m_freem(sc->vge_ldata.vge_tx_mbuf[i]);
 1866                         sc->vge_ldata.vge_tx_mbuf[i] = NULL;
 1867                 }
 1868         }
 1869 
 1870         /* Free the RX list buffers. */
 1871         for (i = 0; i < VGE_RX_DESC_CNT; i++) {
 1872                 if (sc->vge_ldata.vge_rx_mbuf[i] != NULL) {
 1873                         bus_dmamap_unload(sc->sc_dmat,
 1874                             sc->vge_ldata.vge_rx_dmamap[i]);
 1875                         m_freem(sc->vge_ldata.vge_rx_mbuf[i]);
 1876                         sc->vge_ldata.vge_rx_mbuf[i] = NULL;
 1877                 }
 1878         }
 1879 }

/* [<][>][^][v][top][bottom][index][help] */