root/dev/pci/if_msk.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. sk_win_read_4
  2. sk_win_read_2
  3. sk_win_read_1
  4. sk_win_write_4
  5. sk_win_write_2
  6. sk_win_write_1
  7. msk_miibus_readreg
  8. msk_miibus_writereg
  9. msk_miibus_statchg
  10. msk_setfilt
  11. msk_setmulti
  12. msk_setpromisc
  13. msk_init_rx_ring
  14. msk_init_tx_ring
  15. msk_newbuf
  16. msk_alloc_jumbo_mem
  17. msk_jalloc
  18. msk_jfree
  19. msk_ifmedia_upd
  20. msk_ifmedia_sts
  21. msk_ioctl
  22. mskc_probe
  23. mskc_reset
  24. msk_probe
  25. msk_reset
  26. msk_attach
  27. mskcprint
  28. mskc_attach
  29. msk_encap
  30. msk_start
  31. msk_watchdog
  32. mskc_shutdown
  33. msk_rxvalid
  34. msk_rxeof
  35. msk_txeof
  36. msk_tick
  37. msk_intr_yukon
  38. msk_intr
  39. msk_init_yukon
  40. msk_init
  41. msk_stop
  42. msk_dump_txdesc
  43. msk_dump_bytes
  44. msk_dump_mbuf

    1 /*      $OpenBSD: if_msk.c,v 1.56 2007/06/27 19:15:47 kettenis Exp $    */
    2 
    3 /*
    4  * Copyright (c) 1997, 1998, 1999, 2000
    5  *      Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  * 3. All advertising materials mentioning features or use of this software
   16  *    must display the following acknowledgement:
   17  *      This product includes software developed by Bill Paul.
   18  * 4. Neither the name of the author nor the names of any co-contributors
   19  *    may be used to endorse or promote products derived from this software
   20  *    without specific prior written permission.
   21  *
   22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
   23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
   26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   32  * THE POSSIBILITY OF SUCH DAMAGE.
   33  *
   34  * $FreeBSD: /c/ncvs/src/sys/pci/if_sk.c,v 1.20 2000/04/22 02:16:37 wpaul Exp $
   35  */
   36 
   37 /*
   38  * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
   39  *
   40  * Permission to use, copy, modify, and distribute this software for any
   41  * purpose with or without fee is hereby granted, provided that the above
   42  * copyright notice and this permission notice appear in all copies.
   43  *
   44  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
   45  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
   46  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
   47  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
   48  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
   49  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
   50  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
   51  */
   52 
   53 /*
   54  * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
   55  * the SK-984x series adapters, both single port and dual port.
   56  * References:
   57  *      The XaQti XMAC II datasheet,
   58  * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
   59  *      The SysKonnect GEnesis manual, http://www.syskonnect.com
   60  *
   61  * Note: XaQti has been acquired by Vitesse, and Vitesse does not have the
   62  * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
   63  * convenience to others until Vitesse corrects this problem:
   64  *
   65  * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
   66  *
   67  * Written by Bill Paul <wpaul@ee.columbia.edu>
   68  * Department of Electrical Engineering
   69  * Columbia University, New York City
   70  */
   71 
   72 /*
   73  * The SysKonnect gigabit ethernet adapters consist of two main
   74  * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
   75  * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
   76  * components and a PHY while the GEnesis controller provides a PCI
   77  * interface with DMA support. Each card may have between 512K and
   78  * 2MB of SRAM on board depending on the configuration.
   79  *
   80  * The SysKonnect GEnesis controller can have either one or two XMAC
   81  * chips connected to it, allowing single or dual port NIC configurations.
   82  * SysKonnect has the distinction of being the only vendor on the market
   83  * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
   84  * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
   85  * XMAC registers. This driver takes advantage of these features to allow
   86  * both XMACs to operate as independent interfaces.
   87  */
   88  
   89 #include "bpfilter.h"
   90 
   91 #include <sys/param.h>
   92 #include <sys/systm.h>
   93 #include <sys/sockio.h>
   94 #include <sys/mbuf.h>
   95 #include <sys/malloc.h>
   96 #include <sys/kernel.h>
   97 #include <sys/socket.h>
   98 #include <sys/timeout.h>
   99 #include <sys/device.h>
  100 #include <sys/queue.h>
  101 
  102 #include <net/if.h>
  103 #include <net/if_dl.h>
  104 #include <net/if_types.h>
  105 
  106 #ifdef INET
  107 #include <netinet/in.h>
  108 #include <netinet/in_systm.h>
  109 #include <netinet/in_var.h>
  110 #include <netinet/ip.h>
  111 #include <netinet/udp.h>
  112 #include <netinet/tcp.h>
  113 #include <netinet/if_ether.h>
  114 #endif
  115 
  116 #include <net/if_media.h>
  117 #include <net/if_vlan_var.h>
  118 
  119 #if NBPFILTER > 0
  120 #include <net/bpf.h>
  121 #endif
  122 
  123 #include <dev/mii/mii.h>
  124 #include <dev/mii/miivar.h>
  125 #include <dev/mii/brgphyreg.h>
  126 
  127 #include <dev/pci/pcireg.h>
  128 #include <dev/pci/pcivar.h>
  129 #include <dev/pci/pcidevs.h>
  130 
  131 #include <dev/pci/if_skreg.h>
  132 #include <dev/pci/if_mskvar.h>
  133 
  134 int mskc_probe(struct device *, void *, void *);
  135 void mskc_attach(struct device *, struct device *self, void *aux);
  136 void mskc_reset(struct sk_softc *);
  137 void mskc_shutdown(void *);
  138 int msk_probe(struct device *, void *, void *);
  139 void msk_attach(struct device *, struct device *self, void *aux);
  140 void msk_reset(struct sk_if_softc *);
  141 int mskcprint(void *, const char *);
  142 int msk_intr(void *);
  143 void msk_intr_yukon(struct sk_if_softc *);
  144 static __inline int msk_rxvalid(struct sk_softc *, u_int32_t, u_int32_t);
  145 void msk_rxeof(struct sk_if_softc *, u_int16_t, u_int32_t);
  146 void msk_txeof(struct sk_if_softc *);
  147 int msk_encap(struct sk_if_softc *, struct mbuf *, u_int32_t *);
  148 void msk_start(struct ifnet *);
  149 int msk_ioctl(struct ifnet *, u_long, caddr_t);
  150 void msk_init(void *);
  151 void msk_init_yukon(struct sk_if_softc *);
  152 void msk_stop(struct sk_if_softc *);
  153 void msk_watchdog(struct ifnet *);
  154 int msk_ifmedia_upd(struct ifnet *);
  155 void msk_ifmedia_sts(struct ifnet *, struct ifmediareq *);
  156 int msk_newbuf(struct sk_if_softc *, int, struct mbuf *, bus_dmamap_t);
  157 int msk_alloc_jumbo_mem(struct sk_if_softc *);
  158 void *msk_jalloc(struct sk_if_softc *);
  159 void msk_jfree(caddr_t, u_int, void *);
  160 int msk_init_rx_ring(struct sk_if_softc *);
  161 int msk_init_tx_ring(struct sk_if_softc *);
  162 
  163 int msk_miibus_readreg(struct device *, int, int);
  164 void msk_miibus_writereg(struct device *, int, int, int);
  165 void msk_miibus_statchg(struct device *);
  166 
  167 void msk_setfilt(struct sk_if_softc *, caddr_t, int);
  168 void msk_setmulti(struct sk_if_softc *);
  169 void msk_setpromisc(struct sk_if_softc *);
  170 void msk_tick(void *);
  171 
  172 #ifdef MSK_DEBUG
  173 #define DPRINTF(x)      if (mskdebug) printf x
  174 #define DPRINTFN(n,x)   if (mskdebug >= (n)) printf x
  175 int     mskdebug = 0;
  176 
  177 void msk_dump_txdesc(struct msk_tx_desc *, int);
  178 void msk_dump_mbuf(struct mbuf *);
  179 void msk_dump_bytes(const char *, int);
  180 #else
  181 #define DPRINTF(x)
  182 #define DPRINTFN(n,x)
  183 #endif
  184 
  185 /* supported device vendors */
  186 const struct pci_matchid mskc_devices[] = {
  187         { PCI_VENDOR_DLINK,             PCI_PRODUCT_DLINK_DGE550SX },
  188         { PCI_VENDOR_DLINK,             PCI_PRODUCT_DLINK_DGE550T_B1 },
  189         { PCI_VENDOR_DLINK,             PCI_PRODUCT_DLINK_DGE560SX },
  190         { PCI_VENDOR_DLINK,             PCI_PRODUCT_DLINK_DGE560T },
  191         { PCI_VENDOR_MARVELL,           PCI_PRODUCT_MARVELL_YUKON_C032 },
  192         { PCI_VENDOR_MARVELL,           PCI_PRODUCT_MARVELL_YUKON_C033 },
  193         { PCI_VENDOR_MARVELL,           PCI_PRODUCT_MARVELL_YUKON_C034 },
  194         { PCI_VENDOR_MARVELL,           PCI_PRODUCT_MARVELL_YUKON_C036 },
  195         { PCI_VENDOR_MARVELL,           PCI_PRODUCT_MARVELL_YUKON_C042 },
  196         { PCI_VENDOR_MARVELL,           PCI_PRODUCT_MARVELL_YUKON_8021CU },
  197         { PCI_VENDOR_MARVELL,           PCI_PRODUCT_MARVELL_YUKON_8021X },
  198         { PCI_VENDOR_MARVELL,           PCI_PRODUCT_MARVELL_YUKON_8022CU },
  199         { PCI_VENDOR_MARVELL,           PCI_PRODUCT_MARVELL_YUKON_8022X },
  200         { PCI_VENDOR_MARVELL,           PCI_PRODUCT_MARVELL_YUKON_8035 },
  201         { PCI_VENDOR_MARVELL,           PCI_PRODUCT_MARVELL_YUKON_8036 },
  202         { PCI_VENDOR_MARVELL,           PCI_PRODUCT_MARVELL_YUKON_8038 },
  203         { PCI_VENDOR_MARVELL,           PCI_PRODUCT_MARVELL_YUKON_8039 },
  204         { PCI_VENDOR_MARVELL,           PCI_PRODUCT_MARVELL_YUKON_8050 },
  205         { PCI_VENDOR_MARVELL,           PCI_PRODUCT_MARVELL_YUKON_8052 },
  206         { PCI_VENDOR_MARVELL,           PCI_PRODUCT_MARVELL_YUKON_8053 },
  207         { PCI_VENDOR_MARVELL,           PCI_PRODUCT_MARVELL_YUKON_8055 },
  208         { PCI_VENDOR_MARVELL,           PCI_PRODUCT_MARVELL_YUKON_8056 },
  209         { PCI_VENDOR_MARVELL,           PCI_PRODUCT_MARVELL_YUKON_8058 },
  210         { PCI_VENDOR_MARVELL,           PCI_PRODUCT_MARVELL_YUKON_8061CU },
  211         { PCI_VENDOR_MARVELL,           PCI_PRODUCT_MARVELL_YUKON_8061X },
  212         { PCI_VENDOR_MARVELL,           PCI_PRODUCT_MARVELL_YUKON_8062CU },
  213         { PCI_VENDOR_MARVELL,           PCI_PRODUCT_MARVELL_YUKON_8062X },
  214         { PCI_VENDOR_MARVELL,           PCI_PRODUCT_MARVELL_YUKON_8070 },
  215         { PCI_VENDOR_MARVELL,           PCI_PRODUCT_MARVELL_YUKON_8071 },
  216         { PCI_VENDOR_SCHNEIDERKOCH,     PCI_PRODUCT_SCHNEIDERKOCH_SK9Sxx },
  217         { PCI_VENDOR_SCHNEIDERKOCH,     PCI_PRODUCT_SCHNEIDERKOCH_SK9Exx }
  218 };
  219 
  220 static inline u_int32_t
  221 sk_win_read_4(struct sk_softc *sc, u_int32_t reg)
  222 {
  223         return CSR_READ_4(sc, reg);
  224 }
  225 
  226 static inline u_int16_t
  227 sk_win_read_2(struct sk_softc *sc, u_int32_t reg)
  228 {
  229         return CSR_READ_2(sc, reg);
  230 }
  231 
  232 static inline u_int8_t
  233 sk_win_read_1(struct sk_softc *sc, u_int32_t reg)
  234 {
  235         return CSR_READ_1(sc, reg);
  236 }
  237 
  238 static inline void
  239 sk_win_write_4(struct sk_softc *sc, u_int32_t reg, u_int32_t x)
  240 {
  241         CSR_WRITE_4(sc, reg, x);
  242 }
  243 
  244 static inline void
  245 sk_win_write_2(struct sk_softc *sc, u_int32_t reg, u_int16_t x)
  246 {
  247         CSR_WRITE_2(sc, reg, x);
  248 }
  249 
  250 static inline void
  251 sk_win_write_1(struct sk_softc *sc, u_int32_t reg, u_int8_t x)
  252 {
  253         CSR_WRITE_1(sc, reg, x);
  254 }
  255 
  256 int
  257 msk_miibus_readreg(struct device *dev, int phy, int reg)
  258 {
  259         struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
  260         u_int16_t val;
  261         int i;
  262 
  263         SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
  264                       YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ);
  265         
  266         for (i = 0; i < SK_TIMEOUT; i++) {
  267                 DELAY(1);
  268                 val = SK_YU_READ_2(sc_if, YUKON_SMICR);
  269                 if (val & YU_SMICR_READ_VALID)
  270                         break;
  271         }
  272 
  273         if (i == SK_TIMEOUT) {
  274                 printf("%s: phy failed to come ready\n",
  275                        sc_if->sk_dev.dv_xname);
  276                 return (0);
  277         }
  278         
  279         DPRINTFN(9, ("msk_miibus_readreg: i=%d, timeout=%d\n", i,
  280                      SK_TIMEOUT));
  281 
  282         val = SK_YU_READ_2(sc_if, YUKON_SMIDR);
  283 
  284         DPRINTFN(9, ("msk_miibus_readreg phy=%d, reg=%#x, val=%#x\n",
  285                      phy, reg, val));
  286 
  287         return (val);
  288 }
  289 
  290 void
  291 msk_miibus_writereg(struct device *dev, int phy, int reg, int val)
  292 {
  293         struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
  294         int i;
  295 
  296         DPRINTFN(9, ("msk_miibus_writereg phy=%d reg=%#x val=%#x\n",
  297                      phy, reg, val));
  298 
  299         SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val);
  300         SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
  301                       YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE);
  302 
  303         for (i = 0; i < SK_TIMEOUT; i++) {
  304                 DELAY(1);
  305                 if (!(SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY))
  306                         break;
  307         }
  308 
  309         if (i == SK_TIMEOUT)
  310                 printf("%s: phy write timed out\n", sc_if->sk_dev.dv_xname);
  311 }
  312 
  313 void
  314 msk_miibus_statchg(struct device *dev)
  315 {
  316         struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
  317         struct mii_data *mii = &sc_if->sk_mii;
  318         struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
  319         int gpcr;
  320 
  321         gpcr = SK_YU_READ_2(sc_if, YUKON_GPCR);
  322         gpcr &= (YU_GPCR_TXEN | YU_GPCR_RXEN);
  323 
  324         if (IFM_SUBTYPE(ife->ifm_media) != IFM_AUTO) {
  325                 /* Set speed. */
  326                 gpcr |= YU_GPCR_SPEED_DIS;
  327                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
  328                 case IFM_1000_SX:
  329                 case IFM_1000_LX:
  330                 case IFM_1000_CX:
  331                 case IFM_1000_T:
  332                         gpcr |= (YU_GPCR_GIG | YU_GPCR_SPEED);
  333                         break;
  334                 case IFM_100_TX:
  335                         gpcr |= YU_GPCR_SPEED;
  336                         break;
  337                 }
  338 
  339                 /* Set duplex. */
  340                 gpcr |= YU_GPCR_DPLX_DIS;
  341                 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
  342                         gpcr |= YU_GPCR_DUPLEX;
  343 
  344                 /* Disable flow control. */
  345                 gpcr |= YU_GPCR_FCTL_DIS;
  346                 gpcr |= (YU_GPCR_FCTL_TX_DIS | YU_GPCR_FCTL_RX_DIS);
  347         }
  348 
  349         SK_YU_WRITE_2(sc_if, YUKON_GPCR, gpcr);
  350 
  351         DPRINTFN(9, ("msk_miibus_statchg: gpcr=%x\n",
  352                      SK_YU_READ_2(((struct sk_if_softc *)dev), YUKON_GPCR)));
  353 }
  354 
  355 void
  356 msk_setfilt(struct sk_if_softc *sc_if, caddr_t addr, int slot)
  357 {
  358         int base = XM_RXFILT_ENTRY(slot);
  359 
  360         SK_XM_WRITE_2(sc_if, base, *(u_int16_t *)(&addr[0]));
  361         SK_XM_WRITE_2(sc_if, base + 2, *(u_int16_t *)(&addr[2]));
  362         SK_XM_WRITE_2(sc_if, base + 4, *(u_int16_t *)(&addr[4]));
  363 }
  364 
  365 void
  366 msk_setmulti(struct sk_if_softc *sc_if)
  367 {
  368         struct ifnet *ifp= &sc_if->arpcom.ac_if;
  369         u_int32_t hashes[2] = { 0, 0 };
  370         int h;
  371         struct arpcom *ac = &sc_if->arpcom;
  372         struct ether_multi *enm;
  373         struct ether_multistep step;
  374 
  375         /* First, zot all the existing filters. */
  376         SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0);
  377         SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0);
  378         SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0);
  379         SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0);
  380 
  381 
  382         /* Now program new ones. */
  383 allmulti:
  384         if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
  385                 hashes[0] = 0xFFFFFFFF;
  386                 hashes[1] = 0xFFFFFFFF;
  387         } else {
  388                 /* First find the tail of the list. */
  389                 ETHER_FIRST_MULTI(step, ac, enm);
  390                 while (enm != NULL) {
  391                         if (bcmp(enm->enm_addrlo, enm->enm_addrhi,
  392                                  ETHER_ADDR_LEN)) {
  393                                 ifp->if_flags |= IFF_ALLMULTI;
  394                                 goto allmulti;
  395                         }
  396                         h = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) &
  397                             ((1 << SK_HASH_BITS) - 1);
  398                         if (h < 32)
  399                                 hashes[0] |= (1 << h);
  400                         else
  401                                 hashes[1] |= (1 << (h - 32));
  402 
  403                         ETHER_NEXT_MULTI(step, enm);
  404                 }
  405         }
  406 
  407         SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff);
  408         SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff);
  409         SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff);
  410         SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff);
  411 }
  412 
  413 void
  414 msk_setpromisc(struct sk_if_softc *sc_if)
  415 {
  416         struct ifnet *ifp = &sc_if->arpcom.ac_if;
  417 
  418         if (ifp->if_flags & IFF_PROMISC)
  419                 SK_YU_CLRBIT_2(sc_if, YUKON_RCR,
  420                     YU_RCR_UFLEN | YU_RCR_MUFLEN);
  421         else
  422                 SK_YU_SETBIT_2(sc_if, YUKON_RCR,
  423                     YU_RCR_UFLEN | YU_RCR_MUFLEN);
  424 }
  425 
  426 int
  427 msk_init_rx_ring(struct sk_if_softc *sc_if)
  428 {
  429         struct msk_chain_data   *cd = &sc_if->sk_cdata;
  430         struct msk_ring_data    *rd = sc_if->sk_rdata;
  431         int                     i, nexti;
  432 
  433         bzero((char *)rd->sk_rx_ring,
  434             sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT);
  435 
  436         for (i = 0; i < MSK_RX_RING_CNT; i++) {
  437                 cd->sk_rx_chain[i].sk_le = &rd->sk_rx_ring[i];
  438                 if (i == (MSK_RX_RING_CNT - 1))
  439                         nexti = 0;
  440                 else
  441                         nexti = i + 1;
  442                 cd->sk_rx_chain[i].sk_next = &cd->sk_rx_chain[nexti];
  443         }
  444 
  445         for (i = 0; i < MSK_RX_RING_CNT; i++) {
  446                 if (msk_newbuf(sc_if, i, NULL,
  447                     sc_if->sk_cdata.sk_rx_jumbo_map) == ENOBUFS) {
  448                         printf("%s: failed alloc of %dth mbuf\n",
  449                             sc_if->sk_dev.dv_xname, i);
  450                         return (ENOBUFS);
  451                 }
  452         }
  453 
  454         sc_if->sk_cdata.sk_rx_prod = MSK_RX_RING_CNT - 1;
  455         sc_if->sk_cdata.sk_rx_cons = 0;
  456 
  457         return (0);
  458 }
  459 
  460 int
  461 msk_init_tx_ring(struct sk_if_softc *sc_if)
  462 {
  463         struct sk_softc         *sc = sc_if->sk_softc;
  464         struct msk_chain_data   *cd = &sc_if->sk_cdata;
  465         struct msk_ring_data    *rd = sc_if->sk_rdata;
  466         bus_dmamap_t            dmamap;
  467         struct sk_txmap_entry   *entry;
  468         int                     i, nexti;
  469 
  470         bzero((char *)sc_if->sk_rdata->sk_tx_ring,
  471             sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT);
  472 
  473         SIMPLEQ_INIT(&sc_if->sk_txmap_head);
  474         for (i = 0; i < MSK_TX_RING_CNT; i++) {
  475                 cd->sk_tx_chain[i].sk_le = &rd->sk_tx_ring[i];
  476                 if (i == (MSK_TX_RING_CNT - 1))
  477                         nexti = 0;
  478                 else
  479                         nexti = i + 1;
  480                 cd->sk_tx_chain[i].sk_next = &cd->sk_tx_chain[nexti];
  481 
  482                 if (bus_dmamap_create(sc->sc_dmatag, SK_JLEN, SK_NTXSEG,
  483                    SK_JLEN, 0, BUS_DMA_NOWAIT, &dmamap))
  484                         return (ENOBUFS);
  485 
  486                 entry = malloc(sizeof(*entry), M_DEVBUF, M_NOWAIT);
  487                 if (!entry) {
  488                         bus_dmamap_destroy(sc->sc_dmatag, dmamap);
  489                         return (ENOBUFS);
  490                 }
  491                 entry->dmamap = dmamap;
  492                 SIMPLEQ_INSERT_HEAD(&sc_if->sk_txmap_head, entry, link);
  493         }
  494 
  495         sc_if->sk_cdata.sk_tx_prod = 0;
  496         sc_if->sk_cdata.sk_tx_cons = 0;
  497         sc_if->sk_cdata.sk_tx_cnt = 0;
  498 
  499         MSK_CDTXSYNC(sc_if, 0, MSK_TX_RING_CNT,
  500             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  501 
  502         return (0);
  503 }
  504 
  505 int
  506 msk_newbuf(struct sk_if_softc *sc_if, int i, struct mbuf *m,
  507           bus_dmamap_t dmamap)
  508 {
  509         struct mbuf             *m_new = NULL;
  510         struct sk_chain         *c;
  511         struct msk_rx_desc      *r;
  512 
  513         if (m == NULL) {
  514                 caddr_t buf = NULL;
  515 
  516                 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
  517                 if (m_new == NULL)
  518                         return (ENOBUFS);
  519                 
  520                 /* Allocate the jumbo buffer */
  521                 buf = msk_jalloc(sc_if);
  522                 if (buf == NULL) {
  523                         m_freem(m_new);
  524                         DPRINTFN(1, ("%s jumbo allocation failed -- packet "
  525                             "dropped!\n", sc_if->arpcom.ac_if.if_xname));
  526                         return (ENOBUFS);
  527                 }
  528 
  529                 /* Attach the buffer to the mbuf */
  530                 m_new->m_len = m_new->m_pkthdr.len = SK_JLEN;
  531                 MEXTADD(m_new, buf, SK_JLEN, 0, msk_jfree, sc_if);
  532         } else {
  533                 /*
  534                  * We're re-using a previously allocated mbuf;
  535                  * be sure to re-init pointers and lengths to
  536                  * default values.
  537                  */
  538                 m_new = m;
  539                 m_new->m_len = m_new->m_pkthdr.len = SK_JLEN;
  540                 m_new->m_data = m_new->m_ext.ext_buf;
  541         }
  542         m_adj(m_new, ETHER_ALIGN);
  543 
  544         c = &sc_if->sk_cdata.sk_rx_chain[i];
  545         r = c->sk_le;
  546         c->sk_mbuf = m_new;
  547         r->sk_addr = htole32(dmamap->dm_segs[0].ds_addr +
  548             (((vaddr_t)m_new->m_data
  549              - (vaddr_t)sc_if->sk_cdata.sk_jumbo_buf)));
  550         r->sk_len = htole16(SK_JLEN);
  551         r->sk_ctl = 0;
  552         r->sk_opcode = SK_Y2_RXOPC_PACKET | SK_Y2_RXOPC_OWN;
  553 
  554         MSK_CDRXSYNC(sc_if, i, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
  555 
  556         return (0);
  557 }
  558 
  559 /*
  560  * Memory management for jumbo frames.
  561  */
  562 
  563 int
  564 msk_alloc_jumbo_mem(struct sk_if_softc *sc_if)
  565 {
  566         struct sk_softc         *sc = sc_if->sk_softc;
  567         caddr_t                 ptr, kva;
  568         bus_dma_segment_t       seg;
  569         int             i, rseg, state, error;
  570         struct sk_jpool_entry   *entry;
  571 
  572         state = error = 0;
  573 
  574         /* Grab a big chunk o' storage. */
  575         if (bus_dmamem_alloc(sc->sc_dmatag, MSK_JMEM, PAGE_SIZE, 0,
  576                              &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
  577                 printf(": can't alloc rx buffers");
  578                 return (ENOBUFS);
  579         }
  580 
  581         state = 1;
  582         if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg, MSK_JMEM, &kva,
  583                            BUS_DMA_NOWAIT)) {
  584                 printf(": can't map dma buffers (%d bytes)", MSK_JMEM);
  585                 error = ENOBUFS;
  586                 goto out;
  587         }
  588 
  589         state = 2;
  590         if (bus_dmamap_create(sc->sc_dmatag, MSK_JMEM, 1, MSK_JMEM, 0,
  591             BUS_DMA_NOWAIT, &sc_if->sk_cdata.sk_rx_jumbo_map)) {
  592                 printf(": can't create dma map");
  593                 error = ENOBUFS;
  594                 goto out;
  595         }
  596 
  597         state = 3;
  598         if (bus_dmamap_load(sc->sc_dmatag, sc_if->sk_cdata.sk_rx_jumbo_map,
  599                             kva, MSK_JMEM, NULL, BUS_DMA_NOWAIT)) {
  600                 printf(": can't load dma map");
  601                 error = ENOBUFS;
  602                 goto out;
  603         }
  604 
  605         state = 4;
  606         sc_if->sk_cdata.sk_jumbo_buf = (caddr_t)kva;
  607         DPRINTFN(1,("msk_jumbo_buf = 0x%08X\n", sc_if->sk_cdata.sk_jumbo_buf));
  608 
  609         LIST_INIT(&sc_if->sk_jfree_listhead);
  610         LIST_INIT(&sc_if->sk_jinuse_listhead);
  611 
  612         /*
  613          * Now divide it up into 9K pieces and save the addresses
  614          * in an array.
  615          */
  616         ptr = sc_if->sk_cdata.sk_jumbo_buf;
  617         for (i = 0; i < MSK_JSLOTS; i++) {
  618                 sc_if->sk_cdata.sk_jslots[i] = ptr;
  619                 ptr += SK_JLEN;
  620                 entry = malloc(sizeof(struct sk_jpool_entry),
  621                     M_DEVBUF, M_NOWAIT);
  622                 if (entry == NULL) {
  623                         sc_if->sk_cdata.sk_jumbo_buf = NULL;
  624                         printf(": no memory for jumbo buffer queue!");
  625                         error = ENOBUFS;
  626                         goto out;
  627                 }
  628                 entry->slot = i;
  629                 LIST_INSERT_HEAD(&sc_if->sk_jfree_listhead,
  630                                  entry, jpool_entries);
  631         }
  632 out:
  633         if (error != 0) {
  634                 switch (state) {
  635                 case 4:
  636                         bus_dmamap_unload(sc->sc_dmatag,
  637                             sc_if->sk_cdata.sk_rx_jumbo_map);
  638                 case 3:
  639                         bus_dmamap_destroy(sc->sc_dmatag,
  640                             sc_if->sk_cdata.sk_rx_jumbo_map);
  641                 case 2:
  642                         bus_dmamem_unmap(sc->sc_dmatag, kva, MSK_JMEM);
  643                 case 1:
  644                         bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
  645                         break;
  646                 default:
  647                         break;
  648                 }
  649         }
  650 
  651         return (error);
  652 }
  653 
  654 /*
  655  * Allocate a jumbo buffer.
  656  */
  657 void *
  658 msk_jalloc(struct sk_if_softc *sc_if)
  659 {
  660         struct sk_jpool_entry   *entry;
  661 
  662         entry = LIST_FIRST(&sc_if->sk_jfree_listhead);
  663 
  664         if (entry == NULL)
  665                 return (NULL);
  666 
  667         LIST_REMOVE(entry, jpool_entries);
  668         LIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries);
  669         return (sc_if->sk_cdata.sk_jslots[entry->slot]);
  670 }
  671 
  672 /*
  673  * Release a jumbo buffer.
  674  */
  675 void
  676 msk_jfree(caddr_t buf, u_int size, void *arg)
  677 {
  678         struct sk_jpool_entry *entry;
  679         struct sk_if_softc *sc;
  680         int i;
  681 
  682         /* Extract the softc struct pointer. */
  683         sc = (struct sk_if_softc *)arg;
  684 
  685         if (sc == NULL)
  686                 panic("msk_jfree: can't find softc pointer!");
  687 
  688         /* calculate the slot this buffer belongs to */
  689         i = ((vaddr_t)buf
  690              - (vaddr_t)sc->sk_cdata.sk_jumbo_buf) / SK_JLEN;
  691 
  692         if ((i < 0) || (i >= MSK_JSLOTS))
  693                 panic("msk_jfree: asked to free buffer that we don't manage!");
  694 
  695         entry = LIST_FIRST(&sc->sk_jinuse_listhead);
  696         if (entry == NULL)
  697                 panic("msk_jfree: buffer not in use!");
  698         entry->slot = i;
  699         LIST_REMOVE(entry, jpool_entries);
  700         LIST_INSERT_HEAD(&sc->sk_jfree_listhead, entry, jpool_entries);
  701 }
  702 
  703 /*
  704  * Set media options.
  705  */
  706 int
  707 msk_ifmedia_upd(struct ifnet *ifp)
  708 {
  709         struct sk_if_softc *sc_if = ifp->if_softc;
  710 
  711         mii_mediachg(&sc_if->sk_mii);
  712         return (0);
  713 }
  714 
  715 /*
  716  * Report current media status.
  717  */
  718 void
  719 msk_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
  720 {
  721         struct sk_if_softc *sc_if = ifp->if_softc;
  722 
  723         mii_pollstat(&sc_if->sk_mii);
  724         ifmr->ifm_active = sc_if->sk_mii.mii_media_active;
  725         ifmr->ifm_status = sc_if->sk_mii.mii_media_status;
  726 }
  727 
  728 int
  729 msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
  730 {
  731         struct sk_if_softc *sc_if = ifp->if_softc;
  732         struct ifreq *ifr = (struct ifreq *) data;
  733         struct ifaddr *ifa = (struct ifaddr *) data;
  734         struct mii_data *mii;
  735         int s, error = 0;
  736 
  737         s = splnet();
  738 
  739         if ((error = ether_ioctl(ifp, &sc_if->arpcom, command, data)) > 0) {
  740                 splx(s);
  741                 return (error);
  742         }
  743 
  744         switch(command) {
  745         case SIOCSIFADDR:
  746                 ifp->if_flags |= IFF_UP;
  747                 if (!(ifp->if_flags & IFF_RUNNING))
  748                         msk_init(sc_if);
  749 #ifdef INET
  750                 if (ifa->ifa_addr->sa_family == AF_INET)
  751                         arp_ifinit(&sc_if->arpcom, ifa);
  752 #endif /* INET */
  753                 break;
  754         case SIOCSIFMTU:
  755                 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ifp->if_hardmtu)
  756                         error = EINVAL;
  757                 else if (ifp->if_mtu != ifr->ifr_mtu)
  758                         ifp->if_mtu = ifr->ifr_mtu;
  759                 break;
  760         case SIOCSIFFLAGS:
  761                 if (ifp->if_flags & IFF_UP) {
  762                         if (ifp->if_flags & IFF_RUNNING &&
  763                             (sc_if->sk_if_flags ^ ifp->if_flags) &
  764                              IFF_PROMISC) {
  765                                 msk_setpromisc(sc_if);
  766                                 msk_setmulti(sc_if);
  767                         } else {
  768                                 if (!(ifp->if_flags & IFF_RUNNING))
  769                                         msk_init(sc_if);
  770                         }
  771                 } else {
  772                         if (ifp->if_flags & IFF_RUNNING)
  773                                 msk_stop(sc_if);
  774                 }
  775                 sc_if->sk_if_flags = ifp->if_flags;
  776                 break;
  777         case SIOCADDMULTI:
  778         case SIOCDELMULTI:
  779                 error = (command == SIOCADDMULTI) ?
  780                     ether_addmulti(ifr, &sc_if->arpcom) :
  781                     ether_delmulti(ifr, &sc_if->arpcom);
  782 
  783                 if (error == ENETRESET) {
  784                         /*
  785                          * Multicast list has changed; set the hardware
  786                          * filter accordingly.
  787                          */
  788                         if (ifp->if_flags & IFF_RUNNING)
  789                                 msk_setmulti(sc_if);
  790                         error = 0;
  791                 }
  792                 break;
  793         case SIOCGIFMEDIA:
  794         case SIOCSIFMEDIA:
  795                 mii = &sc_if->sk_mii;
  796                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
  797                 break;
  798         default:
  799                 error = ENOTTY;
  800                 break;
  801         }
  802 
  803         splx(s);
  804 
  805         return (error);
  806 }
  807 
  808 /*
  809  * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
  810  * IDs against our list and return a device name if we find a match.
  811  */
  812 int
  813 mskc_probe(struct device *parent, void *match, void *aux)
  814 {
  815         return (pci_matchbyid((struct pci_attach_args *)aux, mskc_devices,
  816             sizeof(mskc_devices)/sizeof(mskc_devices[0])));
  817 }
  818 
  819 /*
  820  * Force the GEnesis into reset, then bring it out of reset.
  821  */
  822 void
  823 mskc_reset(struct sk_softc *sc)
  824 {
  825         u_int32_t imtimer_ticks, reg1;
  826         int reg;
  827 
  828         DPRINTFN(2, ("mskc_reset\n"));
  829 
  830         CSR_WRITE_1(sc, SK_CSR, SK_CSR_SW_RESET);
  831         CSR_WRITE_1(sc, SK_CSR, SK_CSR_MASTER_RESET);
  832 
  833         DELAY(1000);
  834         CSR_WRITE_1(sc, SK_CSR, SK_CSR_SW_UNRESET);
  835         DELAY(2);
  836         CSR_WRITE_1(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
  837 
  838         sk_win_write_1(sc, SK_TESTCTL1, 2);
  839 
  840         reg1 = sk_win_read_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG1));
  841         if (sc->sk_type == SK_YUKON_XL && sc->sk_rev > SK_YUKON_XL_REV_A1)
  842                 reg1 |= (SK_Y2_REG1_PHY1_COMA | SK_Y2_REG1_PHY2_COMA);
  843         else
  844                 reg1 &= ~(SK_Y2_REG1_PHY1_COMA | SK_Y2_REG1_PHY2_COMA);
  845         sk_win_write_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG1), reg1);
  846 
  847         if (sc->sk_type == SK_YUKON_XL && sc->sk_rev > SK_YUKON_XL_REV_A1)
  848                 sk_win_write_1(sc, SK_Y2_CLKGATE,
  849                     SK_Y2_CLKGATE_LINK1_GATE_DIS |
  850                     SK_Y2_CLKGATE_LINK2_GATE_DIS |
  851                     SK_Y2_CLKGATE_LINK1_CORE_DIS |
  852                     SK_Y2_CLKGATE_LINK2_CORE_DIS |
  853                     SK_Y2_CLKGATE_LINK1_PCI_DIS | SK_Y2_CLKGATE_LINK2_PCI_DIS);
  854         else
  855                 sk_win_write_1(sc, SK_Y2_CLKGATE, 0);
  856 
  857         CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET);
  858         CSR_WRITE_2(sc, SK_LINK_CTRL + SK_WIN_LEN, SK_LINK_RESET_SET);
  859         DELAY(1000);
  860         CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR);
  861         CSR_WRITE_2(sc, SK_LINK_CTRL + SK_WIN_LEN, SK_LINK_RESET_CLEAR);
  862 
  863         sk_win_write_1(sc, SK_TESTCTL1, 1);
  864 
  865         DPRINTFN(2, ("mskc_reset: sk_csr=%x\n", CSR_READ_1(sc, SK_CSR)));
  866         DPRINTFN(2, ("mskc_reset: sk_link_ctrl=%x\n",
  867                      CSR_READ_2(sc, SK_LINK_CTRL)));
  868 
  869         /* Disable ASF */
  870         CSR_WRITE_1(sc, SK_Y2_ASF_CSR, SK_Y2_ASF_RESET);
  871         CSR_WRITE_2(sc, SK_CSR, SK_CSR_ASF_OFF);
  872 
  873         /* Clear I2C IRQ noise */
  874         CSR_WRITE_4(sc, SK_I2CHWIRQ, 1);
  875 
  876         /* Disable hardware timer */
  877         CSR_WRITE_1(sc, SK_TIMERCTL, SK_IMCTL_STOP);
  878         CSR_WRITE_1(sc, SK_TIMERCTL, SK_IMCTL_IRQ_CLEAR);
  879 
  880         /* Disable descriptor polling */
  881         CSR_WRITE_4(sc, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_STOP);
  882 
  883         /* Disable time stamps */
  884         CSR_WRITE_1(sc, SK_TSTAMP_CTL, SK_TSTAMP_STOP);
  885         CSR_WRITE_1(sc, SK_TSTAMP_CTL, SK_TSTAMP_IRQ_CLEAR);
  886 
  887         /* Enable RAM interface */
  888         sk_win_write_1(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
  889         for (reg = SK_TO0;reg <= SK_TO11; reg++)
  890                 sk_win_write_1(sc, reg, 36);
  891         sk_win_write_1(sc, SK_RAMCTL + (SK_WIN_LEN / 2), SK_RAMCTL_UNRESET);
  892         for (reg = SK_TO0;reg <= SK_TO11; reg++)
  893                 sk_win_write_1(sc, reg + (SK_WIN_LEN / 2), 36);
  894 
  895         /*
  896          * Configure interrupt moderation. The moderation timer
  897          * defers interrupts specified in the interrupt moderation
  898          * timer mask based on the timeout specified in the interrupt
  899          * moderation timer init register. Each bit in the timer
  900          * register represents one tick, so to specify a timeout in
  901          * microseconds, we have to multiply by the correct number of
  902          * ticks-per-microsecond.
  903          */
  904         switch (sc->sk_type) {
  905         case SK_YUKON_EC:
  906         case SK_YUKON_XL:
  907         case SK_YUKON_FE:
  908                 imtimer_ticks = SK_IMTIMER_TICKS_YUKON_EC;
  909                 break;
  910         default:
  911                 imtimer_ticks = SK_IMTIMER_TICKS_YUKON;
  912         }
  913 
  914         /* Reset status ring. */
  915         bzero((char *)sc->sk_status_ring,
  916             MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc));
  917         sc->sk_status_idx = 0;
  918 
  919         sk_win_write_4(sc, SK_STAT_BMU_CSR, SK_STAT_BMU_RESET);
  920         sk_win_write_4(sc, SK_STAT_BMU_CSR, SK_STAT_BMU_UNRESET);
  921 
  922         sk_win_write_2(sc, SK_STAT_BMU_LIDX, MSK_STATUS_RING_CNT - 1);
  923         sk_win_write_4(sc, SK_STAT_BMU_ADDRLO,
  924             sc->sk_status_map->dm_segs[0].ds_addr);
  925         sk_win_write_4(sc, SK_STAT_BMU_ADDRHI,
  926             (u_int64_t)sc->sk_status_map->dm_segs[0].ds_addr >> 32);
  927         sk_win_write_2(sc, SK_STAT_BMU_TX_THRESH, 10);
  928         sk_win_write_1(sc, SK_STAT_BMU_FIFOWM, 16);
  929         sk_win_write_1(sc, SK_STAT_BMU_FIFOIWM, 16);
  930 
  931 #if 0
  932         sk_win_write_4(sc, SK_Y2_LEV_TIMERINIT, SK_IM_USECS(100));
  933         sk_win_write_4(sc, 0x0ec0, SK_IM_USECS(1000));
  934 
  935         sk_win_write_4(sc, 0x0ed0, SK_IM_USECS(20));
  936 #else
  937         sk_win_write_4(sc, SK_Y2_ISR_ITIMERINIT, SK_IM_USECS(4));
  938 #endif
  939 
  940         sk_win_write_4(sc, SK_STAT_BMU_CSR, SK_STAT_BMU_ON);
  941 
  942         sk_win_write_1(sc, SK_Y2_LEV_ITIMERCTL, SK_IMCTL_START);
  943         sk_win_write_1(sc, SK_Y2_TX_ITIMERCTL, SK_IMCTL_START);
  944         sk_win_write_1(sc, SK_Y2_ISR_ITIMERCTL, SK_IMCTL_START);
  945 }
  946 
  947 int
  948 msk_probe(struct device *parent, void *match, void *aux)
  949 {
  950         struct skc_attach_args *sa = aux;
  951 
  952         if (sa->skc_port != SK_PORT_A && sa->skc_port != SK_PORT_B)
  953                 return (0);
  954 
  955         switch (sa->skc_type) {
  956         case SK_YUKON_XL:
  957         case SK_YUKON_EC_U:
  958         case SK_YUKON_EX:
  959         case SK_YUKON_EC:
  960         case SK_YUKON_FE:
  961                 return (1);
  962         }
  963 
  964         return (0);
  965 }
  966 
  967 void
  968 msk_reset(struct sk_if_softc *sc_if)
  969 {
  970         /* GMAC and GPHY Reset */
  971         SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
  972         SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET);
  973         DELAY(1000);
  974         SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_CLEAR);
  975         SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF |
  976                       SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR);
  977 }
  978 
  979 /*
  980  * Each XMAC chip is attached as a separate logical IP interface.
  981  * Single port cards will have only one logical interface of course.
  982  */
  983 void
  984 msk_attach(struct device *parent, struct device *self, void *aux)
  985 {
  986         struct sk_if_softc *sc_if = (struct sk_if_softc *) self;
  987         struct sk_softc *sc = (struct sk_softc *)parent;
  988         struct skc_attach_args *sa = aux;
  989         struct ifnet *ifp;
  990         caddr_t kva;
  991         bus_dma_segment_t seg;
  992         int i, rseg;
  993         u_int32_t chunk;
  994         int mii_flags;
  995 
  996         sc_if->sk_port = sa->skc_port;
  997         sc_if->sk_softc = sc;
  998         sc->sk_if[sa->skc_port] = sc_if;
  999 
 1000         DPRINTFN(2, ("begin msk_attach: port=%d\n", sc_if->sk_port));
 1001 
 1002         /*
 1003          * Get station address for this interface. Note that
 1004          * dual port cards actually come with three station
 1005          * addresses: one for each port, plus an extra. The
 1006          * extra one is used by the SysKonnect driver software
 1007          * as a 'virtual' station address for when both ports
 1008          * are operating in failover mode. Currently we don't
 1009          * use this extra address.
 1010          */
 1011         for (i = 0; i < ETHER_ADDR_LEN; i++)
 1012                 sc_if->arpcom.ac_enaddr[i] =
 1013                     sk_win_read_1(sc, SK_MAC0_0 + (sa->skc_port * 8) + i);
 1014 
 1015         printf(": address %s\n",
 1016             ether_sprintf(sc_if->arpcom.ac_enaddr));
 1017 
 1018         /*
 1019          * Set up RAM buffer addresses. The Yukon2 has a small amount
 1020          * of SRAM on it, somewhere between 4K and 48K.  We need to
 1021          * divide this up between the transmitter and receiver.  We
 1022          * give the receiver 2/3 of the memory (rounded down), and the
 1023          * transmitter whatever remains.
 1024          */
 1025         chunk = (2 * (sc->sk_ramsize / sizeof(u_int64_t)) / 3) & ~0xff;
 1026         sc_if->sk_rx_ramstart = 0;
 1027         sc_if->sk_rx_ramend = sc_if->sk_rx_ramstart + chunk - 1;
 1028         chunk = (sc->sk_ramsize / sizeof(u_int64_t)) - chunk;
 1029         sc_if->sk_tx_ramstart = sc_if->sk_rx_ramend + 1;
 1030         sc_if->sk_tx_ramend = sc_if->sk_tx_ramstart + chunk - 1;
 1031 
 1032         DPRINTFN(2, ("msk_attach: rx_ramstart=%#x rx_ramend=%#x\n"
 1033                      "           tx_ramstart=%#x tx_ramend=%#x\n",
 1034                      sc_if->sk_rx_ramstart, sc_if->sk_rx_ramend,
 1035                      sc_if->sk_tx_ramstart, sc_if->sk_tx_ramend));
 1036 
 1037         /* Allocate the descriptor queues. */
 1038         if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct msk_ring_data),
 1039             PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
 1040                 printf(": can't alloc rx buffers\n");
 1041                 goto fail;
 1042         }
 1043         if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg,
 1044             sizeof(struct msk_ring_data), &kva, BUS_DMA_NOWAIT)) {
 1045                 printf(": can't map dma buffers (%lu bytes)\n",
 1046                        (ulong)sizeof(struct msk_ring_data));
 1047                 goto fail_1;
 1048         }
 1049         if (bus_dmamap_create(sc->sc_dmatag, sizeof(struct msk_ring_data), 1,
 1050             sizeof(struct msk_ring_data), 0, BUS_DMA_NOWAIT,
 1051             &sc_if->sk_ring_map)) {
 1052                 printf(": can't create dma map\n");
 1053                 goto fail_2;
 1054         }
 1055         if (bus_dmamap_load(sc->sc_dmatag, sc_if->sk_ring_map, kva,
 1056             sizeof(struct msk_ring_data), NULL, BUS_DMA_NOWAIT)) {
 1057                 printf(": can't load dma map\n");
 1058                 goto fail_3;
 1059         }
 1060         sc_if->sk_rdata = (struct msk_ring_data *)kva;
 1061         bzero(sc_if->sk_rdata, sizeof(struct msk_ring_data));
 1062 
 1063         /* Try to allocate memory for jumbo buffers. */
 1064         if (msk_alloc_jumbo_mem(sc_if)) {
 1065                 printf(": jumbo buffer allocation failed\n");
 1066                 goto fail_3;
 1067         }
 1068 
 1069         ifp = &sc_if->arpcom.ac_if;
 1070         ifp->if_softc = sc_if;
 1071         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
 1072         ifp->if_ioctl = msk_ioctl;
 1073         ifp->if_start = msk_start;
 1074         ifp->if_watchdog = msk_watchdog;
 1075         ifp->if_baudrate = 1000000000;
 1076         if (sc->sk_type != SK_YUKON_FE)
 1077                 ifp->if_hardmtu = SK_JUMBO_MTU;
 1078         IFQ_SET_MAXLEN(&ifp->if_snd, MSK_TX_RING_CNT - 1);
 1079         IFQ_SET_READY(&ifp->if_snd);
 1080         bcopy(sc_if->sk_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
 1081 
 1082         ifp->if_capabilities = IFCAP_VLAN_MTU;
 1083 
 1084         msk_reset(sc_if);
 1085 
 1086         /*
 1087          * Do miibus setup.
 1088          */
 1089         msk_init_yukon(sc_if);
 1090 
 1091         DPRINTFN(2, ("msk_attach: 1\n"));
 1092 
 1093         sc_if->sk_mii.mii_ifp = ifp;
 1094         sc_if->sk_mii.mii_readreg = msk_miibus_readreg;
 1095         sc_if->sk_mii.mii_writereg = msk_miibus_writereg;
 1096         sc_if->sk_mii.mii_statchg = msk_miibus_statchg;
 1097 
 1098         ifmedia_init(&sc_if->sk_mii.mii_media, 0,
 1099             msk_ifmedia_upd, msk_ifmedia_sts);
 1100         mii_flags = MIIF_DOPAUSE;
 1101         if (sc->sk_fibertype)
 1102                 mii_flags |= MIIF_HAVEFIBER;
 1103         mii_attach(self, &sc_if->sk_mii, 0xffffffff, 0,
 1104             MII_OFFSET_ANY, mii_flags);
 1105         if (LIST_FIRST(&sc_if->sk_mii.mii_phys) == NULL) {
 1106                 printf("%s: no PHY found!\n", sc_if->sk_dev.dv_xname);
 1107                 ifmedia_add(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_MANUAL,
 1108                             0, NULL);
 1109                 ifmedia_set(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_MANUAL);
 1110         } else
 1111                 ifmedia_set(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_AUTO);
 1112 
 1113         timeout_set(&sc_if->sk_tick_ch, msk_tick, sc_if);
 1114 
 1115         /*
 1116          * Call MI attach routines.
 1117          */
 1118         if_attach(ifp);
 1119         ether_ifattach(ifp);
 1120 
 1121         shutdownhook_establish(mskc_shutdown, sc);
 1122 
 1123         DPRINTFN(2, ("msk_attach: end\n"));
 1124         return;
 1125 
 1126 fail_3:
 1127         bus_dmamap_destroy(sc->sc_dmatag, sc_if->sk_ring_map);
 1128 fail_2:
 1129         bus_dmamem_unmap(sc->sc_dmatag, kva, sizeof(struct msk_ring_data));
 1130 fail_1:
 1131         bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
 1132 fail:
 1133         sc->sk_if[sa->skc_port] = NULL;
 1134 }
 1135 
 1136 int
 1137 mskcprint(void *aux, const char *pnp)
 1138 {
 1139         struct skc_attach_args *sa = aux;
 1140 
 1141         if (pnp)
 1142                 printf("sk port %c at %s",
 1143                     (sa->skc_port == SK_PORT_A) ? 'A' : 'B', pnp);
 1144         else
 1145                 printf(" port %c", (sa->skc_port == SK_PORT_A) ? 'A' : 'B');
 1146         return (UNCONF);
 1147 }
 1148 
 1149 /*
 1150  * Attach the interface. Allocate softc structures, do ifmedia
 1151  * setup and ethernet/BPF attach.
 1152  */
 1153 void
 1154 mskc_attach(struct device *parent, struct device *self, void *aux)
 1155 {
 1156         struct sk_softc *sc = (struct sk_softc *)self;
 1157         struct pci_attach_args *pa = aux;
 1158         struct skc_attach_args skca;
 1159         pci_chipset_tag_t pc = pa->pa_pc;
 1160         pcireg_t command, memtype;
 1161         pci_intr_handle_t ih;
 1162         const char *intrstr = NULL;
 1163         bus_size_t size;
 1164         u_int8_t hw, pmd;
 1165         char *revstr = NULL;
 1166         caddr_t kva;
 1167         bus_dma_segment_t seg;
 1168         int rseg;
 1169 
 1170         DPRINTFN(2, ("begin mskc_attach\n"));
 1171 
 1172         /*
 1173          * Handle power management nonsense.
 1174          */
 1175         command = pci_conf_read(pc, pa->pa_tag, SK_PCI_CAPID) & 0x000000FF;
 1176 
 1177         if (command == 0x01) {
 1178                 command = pci_conf_read(pc, pa->pa_tag, SK_PCI_PWRMGMTCTRL);
 1179                 if (command & SK_PSTATE_MASK) {
 1180                         u_int32_t               iobase, membase, irq;
 1181 
 1182                         /* Save important PCI config data. */
 1183                         iobase = pci_conf_read(pc, pa->pa_tag, SK_PCI_LOIO);
 1184                         membase = pci_conf_read(pc, pa->pa_tag, SK_PCI_LOMEM);
 1185                         irq = pci_conf_read(pc, pa->pa_tag, SK_PCI_INTLINE);
 1186 
 1187                         /* Reset the power state. */
 1188                         printf("%s chip is in D%d power mode "
 1189                             "-- setting to D0\n", sc->sk_dev.dv_xname,
 1190                             command & SK_PSTATE_MASK);
 1191                         command &= 0xFFFFFFFC;
 1192                         pci_conf_write(pc, pa->pa_tag,
 1193                             SK_PCI_PWRMGMTCTRL, command);
 1194 
 1195                         /* Restore PCI config data. */
 1196                         pci_conf_write(pc, pa->pa_tag, SK_PCI_LOIO, iobase);
 1197                         pci_conf_write(pc, pa->pa_tag, SK_PCI_LOMEM, membase);
 1198                         pci_conf_write(pc, pa->pa_tag, SK_PCI_INTLINE, irq);
 1199                 }
 1200         }
 1201 
 1202         /*
 1203          * Map control/status registers.
 1204          */
 1205 
 1206         memtype = pci_mapreg_type(pc, pa->pa_tag, SK_PCI_LOMEM);
 1207         switch (memtype) {
 1208         case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
 1209         case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
 1210                 if (pci_mapreg_map(pa, SK_PCI_LOMEM,
 1211                                    memtype, 0, &sc->sk_btag, &sc->sk_bhandle,
 1212                                    NULL, &size, 0) == 0)
 1213                         break;
 1214         default:
 1215                 printf(": can't map mem space\n");
 1216                 return;
 1217         }
 1218 
 1219         sc->sc_dmatag = pa->pa_dmat;
 1220 
 1221         sc->sk_type = sk_win_read_1(sc, SK_CHIPVER);
 1222         sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4);
 1223 
 1224         /* bail out here if chip is not recognized */
 1225         if (!(SK_IS_YUKON2(sc))) {
 1226                 printf(": unknown chip type: %d\n", sc->sk_type);
 1227                 goto fail_1;
 1228         }
 1229         DPRINTFN(2, ("mskc_attach: allocate interrupt\n"));
 1230 
 1231         /* Allocate interrupt */
 1232         if (pci_intr_map(pa, &ih)) {
 1233                 printf(": couldn't map interrupt\n");
 1234                 goto fail_1;
 1235         }
 1236 
 1237         intrstr = pci_intr_string(pc, ih);
 1238         sc->sk_intrhand = pci_intr_establish(pc, ih, IPL_NET, msk_intr, sc,
 1239             self->dv_xname);
 1240         if (sc->sk_intrhand == NULL) {
 1241                 printf(": couldn't establish interrupt");
 1242                 if (intrstr != NULL)
 1243                         printf(" at %s", intrstr);
 1244                 printf("\n");
 1245                 goto fail_1;
 1246         }
 1247 
 1248         if (bus_dmamem_alloc(sc->sc_dmatag,
 1249             MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc),
 1250             PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
 1251                 printf(": can't alloc status buffers\n");
 1252                 goto fail_2;
 1253         }
 1254 
 1255         if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg,
 1256             MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc),
 1257             &kva, BUS_DMA_NOWAIT)) {
 1258                 printf(": can't map dma buffers (%lu bytes)\n",
 1259                     (ulong)(MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc)));
 1260                 goto fail_3;
 1261         }
 1262         if (bus_dmamap_create(sc->sc_dmatag,
 1263             MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc), 1,
 1264             MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc), 0,
 1265             BUS_DMA_NOWAIT, &sc->sk_status_map)) {
 1266                 printf(": can't create dma map\n");
 1267                 goto fail_4;
 1268         }
 1269         if (bus_dmamap_load(sc->sc_dmatag, sc->sk_status_map, kva,
 1270             MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc),
 1271             NULL, BUS_DMA_NOWAIT)) {
 1272                 printf(": can't load dma map\n");
 1273                 goto fail_5;
 1274         }
 1275         sc->sk_status_ring = (struct msk_status_desc *)kva;
 1276         bzero(sc->sk_status_ring,
 1277             MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc));
 1278 
 1279         /* Reset the adapter. */
 1280         mskc_reset(sc);
 1281 
 1282         sc->sk_ramsize = sk_win_read_1(sc, SK_EPROM0) * 4096;
 1283         DPRINTFN(2, ("mskc_attach: ramsize=%dK\n", sc->sk_ramsize / 1024));
 1284 
 1285         pmd = sk_win_read_1(sc, SK_PMDTYPE);
 1286         if (pmd == 'L' || pmd == 'S' || pmd == 'P')
 1287                 sc->sk_fibertype = 1;
 1288 
 1289         switch (sc->sk_type) {
 1290         case SK_YUKON_XL:
 1291                 sc->sk_name = "Yukon-2 XL";
 1292                 break;
 1293         case SK_YUKON_EC_U:
 1294                 sc->sk_name = "Yukon-2 EC Ultra";
 1295                 break;
 1296         case SK_YUKON_EX:
 1297                 sc->sk_name = "Yukon-2 Extreme";
 1298                 break;
 1299         case SK_YUKON_EC:
 1300                 sc->sk_name = "Yukon-2 EC";
 1301                 break;
 1302         case SK_YUKON_FE:
 1303                 sc->sk_name = "Yukon-2 FE";
 1304                 break;
 1305         default:
 1306                 sc->sk_name = "Yukon (Unknown)";
 1307         }
 1308 
 1309         if (sc->sk_type == SK_YUKON_XL) {
 1310                 switch (sc->sk_rev) {
 1311                 case SK_YUKON_XL_REV_A0:
 1312                         revstr = "A0";
 1313                         break;
 1314                 case SK_YUKON_XL_REV_A1:
 1315                         revstr = "A1";
 1316                         break;
 1317                 case SK_YUKON_XL_REV_A2:
 1318                         revstr = "A2";
 1319                         break;
 1320                 case SK_YUKON_XL_REV_A3:
 1321                         revstr = "A3";
 1322                         break;
 1323                 default:
 1324                         ;
 1325                 }
 1326         }
 1327 
 1328         if (sc->sk_type == SK_YUKON_EC) {
 1329                 switch (sc->sk_rev) {
 1330                 case SK_YUKON_EC_REV_A1:
 1331                         revstr = "A1";
 1332                         break;
 1333                 case SK_YUKON_EC_REV_A2:
 1334                         revstr = "A2";
 1335                         break;
 1336                 case SK_YUKON_EC_REV_A3:
 1337                         revstr = "A3";
 1338                         break;
 1339                 default:
 1340                         ;
 1341                 }
 1342         }
 1343 
 1344         if (sc->sk_type == SK_YUKON_EC_U) {
 1345                 switch (sc->sk_rev) {
 1346                 case SK_YUKON_EC_U_REV_A0:
 1347                         revstr = "A0";
 1348                         break;
 1349                 case SK_YUKON_EC_U_REV_A1:
 1350                         revstr = "A1";
 1351                         break;
 1352                 default:
 1353                         ;
 1354                 }
 1355         }
 1356 
 1357         /* Announce the product name. */
 1358         printf(", %s", sc->sk_name);
 1359         if (revstr != NULL)
 1360                 printf(" rev. %s", revstr);
 1361         printf(" (0x%x): %s\n", sc->sk_rev, intrstr);
 1362 
 1363         sc->sk_macs = 1;
 1364 
 1365         hw = sk_win_read_1(sc, SK_Y2_HWRES);
 1366         if ((hw & SK_Y2_HWRES_LINK_MASK) == SK_Y2_HWRES_LINK_DUAL) {
 1367                 if ((sk_win_read_1(sc, SK_Y2_CLKGATE) &
 1368                     SK_Y2_CLKGATE_LINK2_INACTIVE) == 0)
 1369                         sc->sk_macs++;
 1370         }
 1371 
 1372         skca.skc_port = SK_PORT_A;
 1373         skca.skc_type = sc->sk_type;
 1374         skca.skc_rev = sc->sk_rev;
 1375         (void)config_found(&sc->sk_dev, &skca, mskcprint);
 1376 
 1377         if (sc->sk_macs > 1) {
 1378                 skca.skc_port = SK_PORT_B;
 1379                 skca.skc_type = sc->sk_type;
 1380                 skca.skc_rev = sc->sk_rev;
 1381                 (void)config_found(&sc->sk_dev, &skca, mskcprint);
 1382         }
 1383 
 1384         /* Turn on the 'driver is loaded' LED. */
 1385         CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
 1386 
 1387         return;
 1388 
 1389 fail_5:
 1390         bus_dmamap_destroy(sc->sc_dmatag, sc->sk_status_map);
 1391 fail_4:
 1392         bus_dmamem_unmap(sc->sc_dmatag, kva, 
 1393             MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc));
 1394 fail_3:
 1395         bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
 1396 fail_2:
 1397         pci_intr_disestablish(pc, sc->sk_intrhand);
 1398 fail_1:
 1399         bus_space_unmap(sc->sk_btag, sc->sk_bhandle, size);
 1400 }
 1401 
 1402 int
 1403 msk_encap(struct sk_if_softc *sc_if, struct mbuf *m_head, u_int32_t *txidx)
 1404 {
 1405         struct sk_softc         *sc = sc_if->sk_softc;
 1406         struct msk_tx_desc              *f = NULL;
 1407         u_int32_t               frag, cur;
 1408         int                     i;
 1409         struct sk_txmap_entry   *entry;
 1410         bus_dmamap_t            txmap;
 1411 
 1412         DPRINTFN(2, ("msk_encap\n"));
 1413 
 1414         entry = SIMPLEQ_FIRST(&sc_if->sk_txmap_head);
 1415         if (entry == NULL) {
 1416                 DPRINTFN(2, ("msk_encap: no txmap available\n"));
 1417                 return (ENOBUFS);
 1418         }
 1419         txmap = entry->dmamap;
 1420 
 1421         cur = frag = *txidx;
 1422 
 1423 #ifdef MSK_DEBUG
 1424         if (mskdebug >= 2)
 1425                 msk_dump_mbuf(m_head);
 1426 #endif
 1427 
 1428         /*
 1429          * Start packing the mbufs in this chain into
 1430          * the fragment pointers. Stop when we run out
 1431          * of fragments or hit the end of the mbuf chain.
 1432          */
 1433         if (bus_dmamap_load_mbuf(sc->sc_dmatag, txmap, m_head,
 1434             BUS_DMA_NOWAIT)) {
 1435                 DPRINTFN(2, ("msk_encap: dmamap failed\n"));
 1436                 return (ENOBUFS);
 1437         }
 1438 
 1439         if (txmap->dm_nsegs > (MSK_TX_RING_CNT - sc_if->sk_cdata.sk_tx_cnt - 2)) {
 1440                 DPRINTFN(2, ("msk_encap: too few descriptors free\n"));
 1441                 bus_dmamap_unload(sc->sc_dmatag, txmap);
 1442                 return (ENOBUFS);
 1443         }
 1444 
 1445         DPRINTFN(2, ("msk_encap: dm_nsegs=%d\n", txmap->dm_nsegs));
 1446 
 1447         /* Sync the DMA map. */
 1448         bus_dmamap_sync(sc->sc_dmatag, txmap, 0, txmap->dm_mapsize,
 1449             BUS_DMASYNC_PREWRITE);
 1450 
 1451         for (i = 0; i < txmap->dm_nsegs; i++) {
 1452                 f = &sc_if->sk_rdata->sk_tx_ring[frag];
 1453                 f->sk_addr = htole32(txmap->dm_segs[i].ds_addr);
 1454                 f->sk_len = htole16(txmap->dm_segs[i].ds_len);
 1455                 f->sk_ctl = 0;
 1456                 if (i == 0)
 1457                         f->sk_opcode = SK_Y2_TXOPC_PACKET;
 1458                 else
 1459                         f->sk_opcode = SK_Y2_TXOPC_BUFFER | SK_Y2_TXOPC_OWN;
 1460                 cur = frag;
 1461                 SK_INC(frag, MSK_TX_RING_CNT);
 1462         }
 1463 
 1464         sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head;
 1465         SIMPLEQ_REMOVE_HEAD(&sc_if->sk_txmap_head, link);
 1466 
 1467         sc_if->sk_cdata.sk_tx_map[cur] = entry;
 1468         sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |= SK_Y2_TXCTL_LASTFRAG;
 1469 
 1470         /* Sync descriptors before handing to chip */
 1471         MSK_CDTXSYNC(sc_if, *txidx, txmap->dm_nsegs,
 1472             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 1473 
 1474         sc_if->sk_rdata->sk_tx_ring[*txidx].sk_opcode |= SK_Y2_TXOPC_OWN;
 1475 
 1476         /* Sync first descriptor to hand it off */
 1477         MSK_CDTXSYNC(sc_if, *txidx, 1,
 1478             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 1479 
 1480         sc_if->sk_cdata.sk_tx_cnt += txmap->dm_nsegs;
 1481 
 1482 #ifdef MSK_DEBUG
 1483         if (mskdebug >= 2) {
 1484                 struct msk_tx_desc *le;
 1485                 u_int32_t idx;
 1486                 for (idx = *txidx; idx != frag; SK_INC(idx, MSK_TX_RING_CNT)) {
 1487                         le = &sc_if->sk_rdata->sk_tx_ring[idx];
 1488                         msk_dump_txdesc(le, idx);
 1489                 }
 1490         }
 1491 #endif
 1492 
 1493         *txidx = frag;
 1494 
 1495         DPRINTFN(2, ("msk_encap: completed successfully\n"));
 1496 
 1497         return (0);
 1498 }
 1499 
 1500 void
 1501 msk_start(struct ifnet *ifp)
 1502 {
 1503         struct sk_if_softc      *sc_if = ifp->if_softc;
 1504         struct mbuf             *m_head = NULL;
 1505         u_int32_t               idx = sc_if->sk_cdata.sk_tx_prod;
 1506         int                     pkts = 0;
 1507 
 1508         DPRINTFN(2, ("msk_start\n"));
 1509 
 1510         while (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) {
 1511                 IFQ_POLL(&ifp->if_snd, m_head);
 1512                 if (m_head == NULL)
 1513                         break;
 1514 
 1515                 /*
 1516                  * Pack the data into the transmit ring. If we
 1517                  * don't have room, set the OACTIVE flag and wait
 1518                  * for the NIC to drain the ring.
 1519                  */
 1520                 if (msk_encap(sc_if, m_head, &idx)) {
 1521                         ifp->if_flags |= IFF_OACTIVE;
 1522                         break;
 1523                 }
 1524 
 1525                 /* now we are committed to transmit the packet */
 1526                 IFQ_DEQUEUE(&ifp->if_snd, m_head);
 1527                 pkts++;
 1528 
 1529                 /*
 1530                  * If there's a BPF listener, bounce a copy of this frame
 1531                  * to him.
 1532                  */
 1533 #if NBPFILTER > 0
 1534                 if (ifp->if_bpf)
 1535                         bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
 1536 #endif
 1537         }
 1538         if (pkts == 0)
 1539                 return;
 1540 
 1541         /* Transmit */
 1542         if (idx != sc_if->sk_cdata.sk_tx_prod) {
 1543                 sc_if->sk_cdata.sk_tx_prod = idx;
 1544                 SK_IF_WRITE_2(sc_if, 1, SK_TXQA1_Y2_PREF_PUTIDX, idx);
 1545 
 1546                 /* Set a timeout in case the chip goes out to lunch. */
 1547                 ifp->if_timer = 5;
 1548         }
 1549 }
 1550 
 1551 void
 1552 msk_watchdog(struct ifnet *ifp)
 1553 {
 1554         struct sk_if_softc *sc_if = ifp->if_softc;
 1555 
 1556         /*
 1557          * Reclaim first as there is a possibility of losing Tx completion
 1558          * interrupts.
 1559          */
 1560         msk_txeof(sc_if);
 1561         if (sc_if->sk_cdata.sk_tx_cnt != 0) {
 1562                 printf("%s: watchdog timeout\n", sc_if->sk_dev.dv_xname);
 1563 
 1564                 ifp->if_oerrors++;
 1565 
 1566                 /* XXX Resets both ports; we shouldn't do that. */
 1567                 mskc_reset(sc_if->sk_softc);
 1568                 msk_reset(sc_if);
 1569                 msk_init(sc_if);
 1570         }
 1571 }
 1572 
 1573 void
 1574 mskc_shutdown(void *v)
 1575 {
 1576         struct sk_softc         *sc = v;
 1577 
 1578         DPRINTFN(2, ("msk_shutdown\n"));
 1579 
 1580         /* Turn off the 'driver is loaded' LED. */
 1581         CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
 1582 
 1583         /*
 1584          * Reset the GEnesis controller. Doing this should also
 1585          * assert the resets on the attached XMAC(s).
 1586          */
 1587         mskc_reset(sc);
 1588 }
 1589 
 1590 static __inline int
 1591 msk_rxvalid(struct sk_softc *sc, u_int32_t stat, u_int32_t len)
 1592 {
 1593         if ((stat & (YU_RXSTAT_CRCERR | YU_RXSTAT_LONGERR |
 1594             YU_RXSTAT_MIIERR | YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC |
 1595             YU_RXSTAT_JABBER)) != 0 ||
 1596             (stat & YU_RXSTAT_RXOK) != YU_RXSTAT_RXOK ||
 1597             YU_RXSTAT_BYTES(stat) != len)
 1598                 return (0);
 1599 
 1600         return (1);
 1601 }
 1602 
 1603 void
 1604 msk_rxeof(struct sk_if_softc *sc_if, u_int16_t len, u_int32_t rxstat)
 1605 {
 1606         struct sk_softc         *sc = sc_if->sk_softc;
 1607         struct ifnet            *ifp = &sc_if->arpcom.ac_if;
 1608         struct mbuf             *m;
 1609         struct sk_chain         *cur_rx;
 1610         int                     cur, total_len = len;
 1611         bus_dmamap_t            dmamap;
 1612 
 1613         DPRINTFN(2, ("msk_rxeof\n"));
 1614 
 1615         cur = sc_if->sk_cdata.sk_rx_cons;
 1616         SK_INC(sc_if->sk_cdata.sk_rx_cons, MSK_RX_RING_CNT);
 1617         SK_INC(sc_if->sk_cdata.sk_rx_prod, MSK_RX_RING_CNT);
 1618 
 1619         /* Sync the descriptor */
 1620         MSK_CDRXSYNC(sc_if, cur, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
 1621 
 1622         cur_rx = &sc_if->sk_cdata.sk_rx_chain[cur];
 1623         dmamap = sc_if->sk_cdata.sk_rx_jumbo_map;
 1624 
 1625         bus_dmamap_sync(sc_if->sk_softc->sc_dmatag, dmamap, 0,
 1626             dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
 1627 
 1628         m = cur_rx->sk_mbuf;
 1629         cur_rx->sk_mbuf = NULL;
 1630 
 1631         if (total_len < SK_MIN_FRAMELEN ||
 1632             total_len > SK_JUMBO_FRAMELEN ||
 1633             msk_rxvalid(sc, rxstat, total_len) == 0) {
 1634                 ifp->if_ierrors++;
 1635                 msk_newbuf(sc_if, cur, m, dmamap);
 1636                 return;
 1637         }
 1638 
 1639         /*
 1640          * Try to allocate a new jumbo buffer. If that fails, copy the
 1641          * packet to mbufs and put the jumbo buffer back in the ring
 1642          * so it can be re-used. If allocating mbufs fails, then we
 1643          * have to drop the packet.
 1644          */
 1645         if (msk_newbuf(sc_if, cur, NULL, dmamap) == ENOBUFS) {
 1646                 struct mbuf             *m0;
 1647                 m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
 1648                     total_len + ETHER_ALIGN, 0, ifp, NULL);
 1649                 msk_newbuf(sc_if, cur, m, dmamap);
 1650                 if (m0 == NULL) {
 1651                         ifp->if_ierrors++;
 1652                         return;
 1653                 }
 1654                 m_adj(m0, ETHER_ALIGN);
 1655                 m = m0;
 1656         } else {
 1657                 m->m_pkthdr.rcvif = ifp;
 1658                 m->m_pkthdr.len = m->m_len = total_len;
 1659         }
 1660 
 1661         ifp->if_ipackets++;
 1662 
 1663 #if NBPFILTER > 0
 1664         if (ifp->if_bpf)
 1665                 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
 1666 #endif
 1667 
 1668         /* pass it on. */
 1669         ether_input_mbuf(ifp, m);
 1670 }
 1671 
 1672 void
 1673 msk_txeof(struct sk_if_softc *sc_if)
 1674 {
 1675         struct sk_softc         *sc = sc_if->sk_softc;
 1676         struct msk_tx_desc      *cur_tx;
 1677         struct ifnet            *ifp = &sc_if->arpcom.ac_if;
 1678         u_int32_t               idx, reg, sk_ctl;
 1679         struct sk_txmap_entry   *entry;
 1680 
 1681         DPRINTFN(2, ("msk_txeof\n"));
 1682 
 1683         if (sc_if->sk_port == SK_PORT_A)
 1684                 reg = SK_STAT_BMU_TXA1_RIDX;
 1685         else
 1686                 reg = SK_STAT_BMU_TXA2_RIDX;
 1687 
 1688         /*
 1689          * Go through our tx ring and free mbufs for those
 1690          * frames that have been sent.
 1691          */
 1692         idx = sc_if->sk_cdata.sk_tx_cons;
 1693         while (idx != sk_win_read_2(sc, reg)) {
 1694                 MSK_CDTXSYNC(sc_if, idx, 1,
 1695                     BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
 1696 
 1697                 cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx];
 1698                 sk_ctl = cur_tx->sk_ctl;
 1699 #ifdef MSK_DEBUG
 1700                 if (mskdebug >= 2)
 1701                         msk_dump_txdesc(cur_tx, idx);
 1702 #endif
 1703                 if (sk_ctl & SK_Y2_TXCTL_LASTFRAG)
 1704                         ifp->if_opackets++;
 1705                 if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) {
 1706                         entry = sc_if->sk_cdata.sk_tx_map[idx];
 1707 
 1708                         m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf);
 1709                         sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL;
 1710 
 1711                         bus_dmamap_sync(sc->sc_dmatag, entry->dmamap, 0,
 1712                             entry->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
 1713 
 1714                         bus_dmamap_unload(sc->sc_dmatag, entry->dmamap);
 1715                         SIMPLEQ_INSERT_TAIL(&sc_if->sk_txmap_head, entry,
 1716                                           link);
 1717                         sc_if->sk_cdata.sk_tx_map[idx] = NULL;
 1718                 }
 1719                 sc_if->sk_cdata.sk_tx_cnt--;
 1720                 SK_INC(idx, MSK_TX_RING_CNT);
 1721         }
 1722         ifp->if_timer = sc_if->sk_cdata.sk_tx_cnt > 0 ? 5 : 0;
 1723 
 1724         if (sc_if->sk_cdata.sk_tx_cnt < MSK_TX_RING_CNT - 2)
 1725                 ifp->if_flags &= ~IFF_OACTIVE;
 1726 
 1727         sc_if->sk_cdata.sk_tx_cons = idx;
 1728 }
 1729 
 1730 void
 1731 msk_tick(void *xsc_if)
 1732 {
 1733         struct sk_if_softc *sc_if = xsc_if;  
 1734         struct mii_data *mii = &sc_if->sk_mii;
 1735         int s;
 1736 
 1737         s = splnet();
 1738         mii_tick(mii);
 1739         splx(s);
 1740         timeout_add(&sc_if->sk_tick_ch, hz);
 1741 }
 1742 
 1743 void
 1744 msk_intr_yukon(struct sk_if_softc *sc_if)
 1745 {
 1746         u_int8_t status;
 1747 
 1748         status = SK_IF_READ_1(sc_if, 0, SK_GMAC_ISR);
 1749         /* RX overrun */
 1750         if ((status & SK_GMAC_INT_RX_OVER) != 0) {
 1751                 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
 1752                     SK_RFCTL_RX_FIFO_OVER);
 1753         }
 1754         /* TX underrun */
 1755         if ((status & SK_GMAC_INT_TX_UNDER) != 0) {
 1756                 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST,
 1757                     SK_TFCTL_TX_FIFO_UNDER);
 1758         }
 1759 
 1760         DPRINTFN(2, ("msk_intr_yukon status=%#x\n", status));
 1761 }
 1762 
 1763 int
 1764 msk_intr(void *xsc)
 1765 {
 1766         struct sk_softc         *sc = xsc;
 1767         struct sk_if_softc      *sc_if0 = sc->sk_if[SK_PORT_A];
 1768         struct sk_if_softc      *sc_if1 = sc->sk_if[SK_PORT_B];
 1769         struct ifnet            *ifp0 = NULL, *ifp1 = NULL;
 1770         int                     claimed = 0;
 1771         u_int32_t               status;
 1772         struct msk_status_desc  *cur_st;
 1773 
 1774         status = CSR_READ_4(sc, SK_Y2_ISSR2);
 1775         if (status == 0) {
 1776                 CSR_WRITE_4(sc, SK_Y2_ICR, 2);
 1777                 return (0);
 1778         }
 1779 
 1780         status = CSR_READ_4(sc, SK_ISR);
 1781 
 1782         if (sc_if0 != NULL)
 1783                 ifp0 = &sc_if0->arpcom.ac_if;
 1784         if (sc_if1 != NULL)
 1785                 ifp1 = &sc_if1->arpcom.ac_if;
 1786 
 1787         if (sc_if0 && (status & SK_Y2_IMR_MAC1) &&
 1788             (ifp0->if_flags & IFF_RUNNING)) {
 1789                 msk_intr_yukon(sc_if0);
 1790         }
 1791 
 1792         if (sc_if1 && (status & SK_Y2_IMR_MAC2) &&
 1793             (ifp1->if_flags & IFF_RUNNING)) {
 1794                 msk_intr_yukon(sc_if1);
 1795         }
 1796 
 1797         MSK_CDSTSYNC(sc, sc->sk_status_idx,
 1798             BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
 1799         cur_st = &sc->sk_status_ring[sc->sk_status_idx];
 1800 
 1801         while (cur_st->sk_opcode & SK_Y2_STOPC_OWN) {
 1802                 cur_st->sk_opcode &= ~SK_Y2_STOPC_OWN;
 1803                 switch (cur_st->sk_opcode) {
 1804                 case SK_Y2_STOPC_RXSTAT:
 1805                         msk_rxeof(sc->sk_if[cur_st->sk_link],
 1806                             letoh16(cur_st->sk_len),
 1807                             letoh32(cur_st->sk_status));
 1808                         SK_IF_WRITE_2(sc->sk_if[cur_st->sk_link], 0,
 1809                             SK_RXQ1_Y2_PREF_PUTIDX,
 1810                             sc->sk_if[cur_st->sk_link]->sk_cdata.sk_rx_prod);
 1811                         break;
 1812                 case SK_Y2_STOPC_TXSTAT:
 1813                         if (sc_if0)
 1814                                 msk_txeof(sc_if0);
 1815                         if (sc_if1)
 1816                                 msk_txeof(sc_if1);
 1817                         break;
 1818                 default:
 1819                         printf("opcode=0x%x\n", cur_st->sk_opcode);
 1820                         break;
 1821                 }
 1822                 SK_INC(sc->sk_status_idx, MSK_STATUS_RING_CNT);
 1823 
 1824                 MSK_CDSTSYNC(sc, sc->sk_status_idx,
 1825                     BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
 1826                 cur_st = &sc->sk_status_ring[sc->sk_status_idx];
 1827         }
 1828 
 1829         if (status & SK_Y2_IMR_BMU) {
 1830                 CSR_WRITE_4(sc, SK_STAT_BMU_CSR, SK_STAT_BMU_IRQ_CLEAR);
 1831                 claimed = 1;
 1832         }
 1833 
 1834         CSR_WRITE_4(sc, SK_Y2_ICR, 2);
 1835 
 1836         if (ifp0 != NULL && !IFQ_IS_EMPTY(&ifp0->if_snd))
 1837                 msk_start(ifp0);
 1838         if (ifp1 != NULL && !IFQ_IS_EMPTY(&ifp1->if_snd))
 1839                 msk_start(ifp1);
 1840 
 1841         return (claimed);
 1842 }
 1843 
 1844 void
 1845 msk_init_yukon(struct sk_if_softc *sc_if)
 1846 {
 1847         u_int32_t               v;
 1848         u_int16_t               reg;
 1849         struct sk_softc         *sc;
 1850         int                     i;
 1851 
 1852         sc = sc_if->sk_softc;
 1853 
 1854         DPRINTFN(2, ("msk_init_yukon: start: sk_csr=%#x\n",
 1855                      CSR_READ_4(sc_if->sk_softc, SK_CSR)));
 1856 
 1857         DPRINTFN(6, ("msk_init_yukon: 1\n"));
 1858 
 1859         DPRINTFN(3, ("msk_init_yukon: gmac_ctrl=%#x\n",
 1860                      SK_IF_READ_4(sc_if, 0, SK_GMAC_CTRL)));
 1861 
 1862         DPRINTFN(6, ("msk_init_yukon: 3\n"));
 1863 
 1864         /* unused read of the interrupt source register */
 1865         DPRINTFN(6, ("msk_init_yukon: 4\n"));
 1866         SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
 1867 
 1868         DPRINTFN(6, ("msk_init_yukon: 4a\n"));
 1869         reg = SK_YU_READ_2(sc_if, YUKON_PAR);
 1870         DPRINTFN(6, ("msk_init_yukon: YUKON_PAR=%#x\n", reg));
 1871 
 1872         /* MIB Counter Clear Mode set */
 1873         reg |= YU_PAR_MIB_CLR;
 1874         DPRINTFN(6, ("msk_init_yukon: YUKON_PAR=%#x\n", reg));
 1875         DPRINTFN(6, ("msk_init_yukon: 4b\n"));
 1876         SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
 1877 
 1878         /* MIB Counter Clear Mode clear */
 1879         DPRINTFN(6, ("msk_init_yukon: 5\n"));
 1880         reg &= ~YU_PAR_MIB_CLR;
 1881         SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
 1882 
 1883         /* receive control reg */
 1884         DPRINTFN(6, ("msk_init_yukon: 7\n"));
 1885         SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR);
 1886 
 1887         /* transmit parameter register */
 1888         DPRINTFN(6, ("msk_init_yukon: 8\n"));
 1889         SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) |
 1890                       YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) );
 1891 
 1892         /* serial mode register */
 1893         DPRINTFN(6, ("msk_init_yukon: 9\n"));
 1894         reg = YU_SMR_DATA_BLIND(0x1c) |
 1895               YU_SMR_MFL_VLAN |
 1896               YU_SMR_IPG_DATA(0x1e);
 1897 
 1898         if (sc->sk_type != SK_YUKON_FE)
 1899                 reg |= YU_SMR_MFL_JUMBO;
 1900 
 1901         SK_YU_WRITE_2(sc_if, YUKON_SMR, reg);
 1902 
 1903         DPRINTFN(6, ("msk_init_yukon: 10\n"));
 1904         /* Setup Yukon's address */
 1905         for (i = 0; i < 3; i++) {
 1906                 /* Write Source Address 1 (unicast filter) */
 1907                 SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4, 
 1908                               sc_if->arpcom.ac_enaddr[i * 2] |
 1909                               sc_if->arpcom.ac_enaddr[i * 2 + 1] << 8);
 1910         }
 1911 
 1912         for (i = 0; i < 3; i++) {
 1913                 reg = sk_win_read_2(sc_if->sk_softc,
 1914                                     SK_MAC1_0 + i * 2 + sc_if->sk_port * 8);
 1915                 SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg);
 1916         }
 1917 
 1918         /* Set promiscuous mode */
 1919         msk_setpromisc(sc_if);
 1920 
 1921         /* Set multicast filter */
 1922         DPRINTFN(6, ("msk_init_yukon: 11\n"));
 1923         msk_setmulti(sc_if);
 1924 
 1925         /* enable interrupt mask for counter overflows */
 1926         DPRINTFN(6, ("msk_init_yukon: 12\n"));
 1927         SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0);
 1928         SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0);
 1929         SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0);
 1930 
 1931         /* Configure RX MAC FIFO Flush Mask */
 1932         v = YU_RXSTAT_FOFL | YU_RXSTAT_CRCERR | YU_RXSTAT_MIIERR |
 1933             YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | YU_RXSTAT_RUNT |
 1934             YU_RXSTAT_JABBER;
 1935         SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_MASK, v);
 1936 
 1937         /* Configure RX MAC FIFO */
 1938         SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR);
 1939         SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_OPERATION_ON |
 1940             SK_RFCTL_FIFO_FLUSH_ON);
 1941 
 1942         /* Increase flush threshould to 64 bytes */
 1943         SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_THRESHOLD,
 1944             SK_RFCTL_FIFO_THRESHOLD + 1);
 1945 
 1946         /* Configure TX MAC FIFO */
 1947         SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR);
 1948         SK_IF_WRITE_2(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON);
 1949 
 1950 #if 1
 1951         SK_YU_WRITE_2(sc_if, YUKON_GPCR, YU_GPCR_TXEN | YU_GPCR_RXEN);
 1952 #endif
 1953         DPRINTFN(6, ("msk_init_yukon: end\n"));
 1954 }
 1955 
 1956 /*
 1957  * Note that to properly initialize any part of the GEnesis chip,
 1958  * you first have to take it out of reset mode.
 1959  */
 1960 void
 1961 msk_init(void *xsc_if)
 1962 {
 1963         struct sk_if_softc      *sc_if = xsc_if;
 1964         struct sk_softc         *sc = sc_if->sk_softc;
 1965         struct ifnet            *ifp = &sc_if->arpcom.ac_if;
 1966         struct mii_data         *mii = &sc_if->sk_mii;
 1967         int                     s;
 1968 
 1969         DPRINTFN(2, ("msk_init\n"));
 1970 
 1971         s = splnet();
 1972 
 1973         /* Cancel pending I/O and free all RX/TX buffers. */
 1974         msk_stop(sc_if);
 1975 
 1976         /* Configure I2C registers */
 1977 
 1978         /* Configure XMAC(s) */
 1979         msk_init_yukon(sc_if);
 1980         mii_mediachg(mii);
 1981 
 1982         /* Configure transmit arbiter(s) */
 1983         SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_ON);
 1984 #if 0
 1985             SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
 1986 #endif
 1987 
 1988         /* Configure RAMbuffers */
 1989         SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
 1990         SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
 1991         SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
 1992         SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
 1993         SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
 1994         SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
 1995 
 1996         SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_UNRESET);
 1997         SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_STORENFWD_ON);
 1998         SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_START, sc_if->sk_tx_ramstart);
 1999         SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_WR_PTR, sc_if->sk_tx_ramstart);
 2000         SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_RD_PTR, sc_if->sk_tx_ramstart);
 2001         SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_END, sc_if->sk_tx_ramend);
 2002         SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_ON);
 2003 
 2004         /* Configure BMUs */
 2005         SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, 0x00000016);
 2006         SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, 0x00000d28);
 2007         SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, 0x00000080);
 2008         SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_WATERMARK, 0x00000600);
 2009 
 2010         SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, 0x00000016);
 2011         SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, 0x00000d28);
 2012         SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, 0x00000080);
 2013         SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_WATERMARK, 0x00000600);
 2014 
 2015         /* Make sure the sync transmit queue is disabled. */
 2016         SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET);
 2017 
 2018         /* Init descriptors */
 2019         if (msk_init_rx_ring(sc_if) == ENOBUFS) {
 2020                 printf("%s: initialization failed: no "
 2021                     "memory for rx buffers\n", sc_if->sk_dev.dv_xname);
 2022                 msk_stop(sc_if);
 2023                 splx(s);
 2024                 return;
 2025         }
 2026 
 2027         if (msk_init_tx_ring(sc_if) == ENOBUFS) {
 2028                 printf("%s: initialization failed: no "
 2029                     "memory for tx buffers\n", sc_if->sk_dev.dv_xname);
 2030                 msk_stop(sc_if);
 2031                 splx(s);
 2032                 return;
 2033         }
 2034 
 2035         /* Initialize prefetch engine. */
 2036         SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000001);
 2037         SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000002);
 2038         SK_IF_WRITE_2(sc_if, 0, SK_RXQ1_Y2_PREF_LIDX, MSK_RX_RING_CNT - 1);
 2039         SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_ADDRLO,
 2040             MSK_RX_RING_ADDR(sc_if, 0));
 2041         SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_ADDRHI,
 2042             (u_int64_t)MSK_RX_RING_ADDR(sc_if, 0) >> 32);
 2043         SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000008);
 2044         SK_IF_READ_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR);
 2045 
 2046         SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000001);
 2047         SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000002);
 2048         SK_IF_WRITE_2(sc_if, 1, SK_TXQA1_Y2_PREF_LIDX, MSK_TX_RING_CNT - 1);
 2049         SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_ADDRLO,
 2050             MSK_TX_RING_ADDR(sc_if, 0));
 2051         SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_ADDRHI,
 2052             (u_int64_t)MSK_TX_RING_ADDR(sc_if, 0) >> 32);
 2053         SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000008);
 2054         SK_IF_READ_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR);
 2055 
 2056         SK_IF_WRITE_2(sc_if, 0, SK_RXQ1_Y2_PREF_PUTIDX,
 2057             sc_if->sk_cdata.sk_rx_prod);
 2058 
 2059         /* Configure interrupt handling */
 2060         if (sc_if->sk_port == SK_PORT_A)
 2061                 sc->sk_intrmask |= SK_Y2_INTRS1;
 2062         else
 2063                 sc->sk_intrmask |= SK_Y2_INTRS2;
 2064         sc->sk_intrmask |= SK_Y2_IMR_BMU;
 2065         CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
 2066 
 2067         ifp->if_flags |= IFF_RUNNING;
 2068         ifp->if_flags &= ~IFF_OACTIVE;
 2069 
 2070         timeout_add(&sc_if->sk_tick_ch, hz);
 2071 
 2072         splx(s);
 2073 }
 2074 
 2075 void
 2076 msk_stop(struct sk_if_softc *sc_if)
 2077 {
 2078         struct sk_softc         *sc = sc_if->sk_softc;
 2079         struct ifnet            *ifp = &sc_if->arpcom.ac_if;
 2080         struct sk_txmap_entry   *dma;
 2081         int                     i;
 2082 
 2083         DPRINTFN(2, ("msk_stop\n"));
 2084 
 2085         timeout_del(&sc_if->sk_tick_ch);
 2086 
 2087         ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
 2088 
 2089         /* Stop transfer of Tx descriptors */
 2090 
 2091         /* Stop transfer of Rx descriptors */
 2092 
 2093         /* Turn off various components of this interface. */
 2094         SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
 2095         SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET);
 2096         SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET);
 2097         SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
 2098         SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
 2099         SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, SK_TXBMU_OFFLINE);
 2100         SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
 2101         SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
 2102         SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
 2103         SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_TXLEDCTL_COUNTER_STOP);
 2104         SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
 2105         SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
 2106 
 2107         SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000001);
 2108         SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000001);
 2109 
 2110         /* Disable interrupts */
 2111         if (sc_if->sk_port == SK_PORT_A)
 2112                 sc->sk_intrmask &= ~SK_Y2_INTRS1;
 2113         else
 2114                 sc->sk_intrmask &= ~SK_Y2_INTRS2;
 2115         CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
 2116 
 2117         SK_XM_READ_2(sc_if, XM_ISR);
 2118         SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
 2119 
 2120         /* Free RX and TX mbufs still in the queues. */
 2121         for (i = 0; i < MSK_RX_RING_CNT; i++) {
 2122                 if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) {
 2123                         m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf);
 2124                         sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL;
 2125                 }
 2126         }
 2127 
 2128         for (i = 0; i < MSK_TX_RING_CNT; i++) {
 2129                 if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) {
 2130                         m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf);
 2131                         sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL;
 2132                         SIMPLEQ_INSERT_HEAD(&sc_if->sk_txmap_head,
 2133                             sc_if->sk_cdata.sk_tx_map[i], link);
 2134                         sc_if->sk_cdata.sk_tx_map[i] = 0;
 2135                 }
 2136         }
 2137 
 2138         while ((dma = SIMPLEQ_FIRST(&sc_if->sk_txmap_head))) {
 2139                 SIMPLEQ_REMOVE_HEAD(&sc_if->sk_txmap_head, link);
 2140                 bus_dmamap_destroy(sc->sc_dmatag, dma->dmamap);
 2141                 free(dma, M_DEVBUF);
 2142         }
 2143 }
 2144 
 2145 struct cfattach mskc_ca = {
 2146         sizeof(struct sk_softc), mskc_probe, mskc_attach,
 2147 };
 2148 
 2149 struct cfdriver mskc_cd = {
 2150         0, "mskc", DV_DULL
 2151 };
 2152 
 2153 struct cfattach msk_ca = {
 2154         sizeof(struct sk_if_softc), msk_probe, msk_attach,
 2155 };
 2156 
 2157 struct cfdriver msk_cd = {
 2158         0, "msk", DV_IFNET
 2159 };
 2160 
 2161 #ifdef MSK_DEBUG
 2162 void
 2163 msk_dump_txdesc(struct msk_tx_desc *le, int idx)
 2164 {
 2165 #define DESC_PRINT(X)                                   \
 2166         if (X)                                  \
 2167                 printf("txdesc[%d]." #X "=%#x\n",       \
 2168                        idx, X);
 2169 
 2170         DESC_PRINT(letoh32(le->sk_addr));
 2171         DESC_PRINT(letoh16(le->sk_len));
 2172         DESC_PRINT(le->sk_ctl);
 2173         DESC_PRINT(le->sk_opcode);
 2174 #undef DESC_PRINT
 2175 }
 2176 
 2177 void
 2178 msk_dump_bytes(const char *data, int len)
 2179 {
 2180         int c, i, j;
 2181 
 2182         for (i = 0; i < len; i += 16) {
 2183                 printf("%08x  ", i);
 2184                 c = len - i;
 2185                 if (c > 16) c = 16;
 2186 
 2187                 for (j = 0; j < c; j++) {
 2188                         printf("%02x ", data[i + j] & 0xff);
 2189                         if ((j & 0xf) == 7 && j > 0)
 2190                                 printf(" ");
 2191                 }
 2192                 
 2193                 for (; j < 16; j++)
 2194                         printf("   ");
 2195                 printf("  ");
 2196 
 2197                 for (j = 0; j < c; j++) {
 2198                         int ch = data[i + j] & 0xff;
 2199                         printf("%c", ' ' <= ch && ch <= '~' ? ch : ' ');
 2200                 }
 2201                 
 2202                 printf("\n");
 2203                 
 2204                 if (c < 16)
 2205                         break;
 2206         }
 2207 }
 2208 
 2209 void
 2210 msk_dump_mbuf(struct mbuf *m)
 2211 {
 2212         int count = m->m_pkthdr.len;
 2213 
 2214         printf("m=%#lx, m->m_pkthdr.len=%#d\n", m, m->m_pkthdr.len);
 2215 
 2216         while (count > 0 && m) {
 2217                 printf("m=%#lx, m->m_data=%#lx, m->m_len=%d\n",
 2218                        m, m->m_data, m->m_len);
 2219                 msk_dump_bytes(mtod(m, char *), m->m_len);
 2220 
 2221                 count -= m->m_len;
 2222                 m = m->m_next;
 2223         }
 2224 }
 2225 #endif

/* [<][>][^][v][top][bottom][index][help] */