root/dev/pci/if_pcn.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. pcn_csr_read
  2. pcn_csr_write
  3. pcn_bcr_read
  4. pcn_bcr_write
  5. pcn_lookup_variant
  6. pcn_match
  7. pcn_attach
  8. pcn_shutdown
  9. pcn_start
  10. pcn_watchdog
  11. pcn_ioctl
  12. pcn_intr
  13. pcn_spnd
  14. pcn_txintr
  15. pcn_rxintr
  16. pcn_tick
  17. pcn_reset
  18. pcn_init
  19. pcn_rxdrain
  20. pcn_stop
  21. pcn_add_rxbuf
  22. pcn_set_filter
  23. pcn_79c970_mediainit
  24. pcn_79c970_mediastatus
  25. pcn_79c970_mediachange
  26. pcn_79c971_mediainit
  27. pcn_79c971_mediastatus
  28. pcn_79c971_mediachange
  29. pcn_mii_readreg
  30. pcn_mii_writereg
  31. pcn_mii_statchg

    1 /*      $OpenBSD: if_pcn.c,v 1.15 2006/11/09 14:25:23 reyk Exp $        */
    2 /*      $NetBSD: if_pcn.c,v 1.26 2005/05/07 09:15:44 is Exp $   */
    3 
    4 /*
    5  * Copyright (c) 2001 Wasabi Systems, Inc.
    6  * All rights reserved.
    7  *
    8  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  * 3. All advertising materials mentioning features or use of this software
   19  *    must display the following acknowledgement:
   20  *      This product includes software developed for the NetBSD Project by
   21  *      Wasabi Systems, Inc.
   22  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
   23  *    or promote products derived from this software without specific prior
   24  *    written permission.
   25  *
   26  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
   27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
   30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   36  * POSSIBILITY OF SUCH DAMAGE.
   37  */
   38 
   39 /*
   40  * Device driver for the AMD PCnet-PCI series of Ethernet
   41  * chips:
   42  *
   43  *      * Am79c970 PCnet-PCI Single-Chip Ethernet Controller for PCI
   44  *        Local Bus
   45  *
   46  *      * Am79c970A PCnet-PCI II Single-Chip Full-Duplex Ethernet Controller
   47  *        for PCI Local Bus
   48  *
   49  *      * Am79c971 PCnet-FAST Single-Chip Full-Duplex 10/100Mbps
   50  *        Ethernet Controller for PCI Local Bus
   51  *
   52  *      * Am79c972 PCnet-FAST+ Enhanced 10/100Mbps PCI Ethernet Controller
   53  *        with OnNow Support
   54  *
   55  *      * Am79c973/Am79c975 PCnet-FAST III Single-Chip 10/100Mbps PCI
   56  *        Ethernet Controller with Integrated PHY
   57  *
   58  * This also supports the virtual PCnet-PCI Ethernet interface found
   59  * in VMware.
   60  *
   61  * TODO:
   62  *
   63  *      * Split this into bus-specific and bus-independent portions.
   64  *        The core could also be used for the ILACC (Am79900) 32-bit
   65  *        Ethernet chip (XXX only if we use an ILACC-compatible SWSTYLE).
   66  */
   67 
   68 #if 0
   69 #include <sys/cdefs.h>
   70 __KERNEL_RCSID(0, "$NetBSD: if_pcn.c,v 1.26 2005/05/07 09:15:44 is Exp $");
   71 #endif
   72 
   73 #include "bpfilter.h"
   74 
   75 #include <sys/param.h>
   76 #include <sys/systm.h>
   77 #include <sys/timeout.h>
   78 #include <sys/mbuf.h>
   79 #include <sys/malloc.h>
   80 #include <sys/kernel.h>
   81 #include <sys/socket.h>
   82 #include <sys/ioctl.h>
   83 #include <sys/errno.h>
   84 #include <sys/device.h>
   85 #include <sys/queue.h>
   86 
   87 #include <net/if.h>
   88 #include <net/if_dl.h>
   89 
   90 #ifdef INET
   91 #include <netinet/in.h>
   92 #include <netinet/in_systm.h>
   93 #include <netinet/in_var.h>
   94 #include <netinet/ip.h>
   95 #include <netinet/if_ether.h>
   96 #endif
   97 
   98 #include <net/if_media.h>
   99 
  100 #if NBPFILTER > 0
  101 #include <net/bpf.h>
  102 #endif
  103 
  104 #include <machine/bus.h>
  105 #include <machine/intr.h>
  106 #include <machine/endian.h>
  107 
  108 #include <dev/mii/mii.h>
  109 #include <dev/mii/miivar.h>
  110 
  111 #include <dev/ic/am79900reg.h>
  112 #include <dev/ic/lancereg.h>
  113 
  114 #include <dev/pci/pcireg.h>
  115 #include <dev/pci/pcivar.h>
  116 #include <dev/pci/pcidevs.h>
  117 
  118 /*
  119  * Register definitions for the AMD PCnet-PCI series of Ethernet
  120  * chips.
  121  *
  122  * These are only the registers that we access directly from PCI
  123  * space.  Everything else (accessed via the RAP + RDP/BDP) is
  124  * defined in <dev/ic/lancereg.h>.
  125  */
  126 
  127 /*
  128  * PCI configuration space.
  129  */
  130 
  131 #define PCN_PCI_CBIO    (PCI_MAPREG_START + 0x00)
  132 #define PCN_PCI_CBMEM   (PCI_MAPREG_START + 0x04)
  133 
  134 /*
  135  * I/O map in Word I/O mode.
  136  */
  137 
  138 #define PCN16_APROM     0x00
  139 #define PCN16_RDP       0x10
  140 #define PCN16_RAP       0x12
  141 #define PCN16_RESET     0x14
  142 #define PCN16_BDP       0x16
  143 
  144 /*
  145  * I/O map in DWord I/O mode.
  146  */
  147 
  148 #define PCN32_APROM     0x00
  149 #define PCN32_RDP       0x10
  150 #define PCN32_RAP       0x14
  151 #define PCN32_RESET     0x18
  152 #define PCN32_BDP       0x1c
  153 
  154 /*
  155  * Transmit descriptor list size.  This is arbitrary, but allocate
  156  * enough descriptors for 128 pending transmissions, and 4 segments
  157  * per packet.  This MUST work out to a power of 2.
  158  *
  159  * NOTE: We can't have any more than 512 Tx descriptors, SO BE CAREFUL!
  160  *
  161  * So we play a little trick here.  We give each packet up to 16
  162  * DMA segments, but only allocate the max of 512 descriptors.  The
  163  * transmit logic can deal with this, we just are hoping to sneak by.
  164  */
  165 #define PCN_NTXSEGS             16
  166 
  167 #define PCN_TXQUEUELEN          128
  168 #define PCN_TXQUEUELEN_MASK     (PCN_TXQUEUELEN - 1)
  169 #define PCN_NTXDESC             512
  170 #define PCN_NTXDESC_MASK        (PCN_NTXDESC - 1)
  171 #define PCN_NEXTTX(x)           (((x) + 1) & PCN_NTXDESC_MASK)
  172 #define PCN_NEXTTXS(x)          (((x) + 1) & PCN_TXQUEUELEN_MASK)
  173 
  174 /* Tx interrupt every N + 1 packets. */
  175 #define PCN_TXINTR_MASK         7
  176 
  177 /*
  178  * Receive descriptor list size.  We have one Rx buffer per incoming
  179  * packet, so this logic is a little simpler.
  180  */
  181 #define PCN_NRXDESC             128
  182 #define PCN_NRXDESC_MASK        (PCN_NRXDESC - 1)
  183 #define PCN_NEXTRX(x)           (((x) + 1) & PCN_NRXDESC_MASK)
  184 
  185 /*
  186  * Control structures are DMA'd to the PCnet chip.  We allocate them in
  187  * a single clump that maps to a single DMA segment to make several things
  188  * easier.
  189  */
  190 struct pcn_control_data {
  191         /* The transmit descriptors. */
  192         struct letmd pcd_txdescs[PCN_NTXDESC];
  193 
  194         /* The receive descriptors. */
  195         struct lermd pcd_rxdescs[PCN_NRXDESC];
  196 
  197         /* The init block. */
  198         struct leinit pcd_initblock;
  199 };
  200 
  201 #define PCN_CDOFF(x)    offsetof(struct pcn_control_data, x)
  202 #define PCN_CDTXOFF(x)  PCN_CDOFF(pcd_txdescs[(x)])
  203 #define PCN_CDRXOFF(x)  PCN_CDOFF(pcd_rxdescs[(x)])
  204 #define PCN_CDINITOFF   PCN_CDOFF(pcd_initblock)
  205 
  206 /*
  207  * Software state for transmit jobs.
  208  */
  209 struct pcn_txsoft {
  210         struct mbuf *txs_mbuf;          /* head of our mbuf chain */
  211         bus_dmamap_t txs_dmamap;        /* our DMA map */
  212         int txs_firstdesc;              /* first descriptor in packet */
  213         int txs_lastdesc;               /* last descriptor in packet */
  214 };
  215 
  216 /*
  217  * Software state for receive jobs.
  218  */
  219 struct pcn_rxsoft {
  220         struct mbuf *rxs_mbuf;          /* head of our mbuf chain */
  221         bus_dmamap_t rxs_dmamap;        /* our DMA map */
  222 };
  223 
  224 /*
  225  * Description of Rx FIFO watermarks for various revisions.
  226  */
  227 static const char * const pcn_79c970_rcvfw[] = {
  228         "16 bytes",
  229         "64 bytes",
  230         "128 bytes",
  231         NULL,
  232 };
  233 
  234 static const char * const pcn_79c971_rcvfw[] = {
  235         "16 bytes",
  236         "64 bytes",
  237         "112 bytes",
  238         NULL,
  239 };
  240 
  241 /*
  242  * Description of Tx start points for various revisions.
  243  */
  244 static const char * const pcn_79c970_xmtsp[] = {
  245         "8 bytes",
  246         "64 bytes",
  247         "128 bytes",
  248         "248 bytes",
  249 };
  250 
  251 static const char * const pcn_79c971_xmtsp[] = {
  252         "20 bytes",
  253         "64 bytes",
  254         "128 bytes",
  255         "248 bytes",
  256 };
  257 
  258 static const char * const pcn_79c971_xmtsp_sram[] = {
  259         "44 bytes",
  260         "64 bytes",
  261         "128 bytes",
  262         "store-and-forward",
  263 };
  264 
  265 /*
  266  * Description of Tx FIFO watermarks for various revisions.
  267  */
  268 static const char * const pcn_79c970_xmtfw[] = {
  269         "16 bytes",
  270         "64 bytes",
  271         "128 bytes",
  272         NULL,
  273 };
  274 
  275 static const char * const pcn_79c971_xmtfw[] = {
  276         "16 bytes",
  277         "64 bytes",
  278         "108 bytes",
  279         NULL,
  280 };
  281 
  282 /*
  283  * Software state per device.
  284  */
  285 struct pcn_softc {
  286         struct device sc_dev;           /* generic device information */
  287         bus_space_tag_t sc_st;          /* bus space tag */
  288         bus_space_handle_t sc_sh;       /* bus space handle */
  289         bus_dma_tag_t sc_dmat;          /* bus DMA tag */
  290         struct arpcom sc_arpcom;        /* Ethernet common data */
  291         void *sc_sdhook;                /* shutdown hook */
  292 
  293         /* Points to our media routines, etc. */
  294         const struct pcn_variant *sc_variant;
  295 
  296         void *sc_ih;                    /* interrupt cookie */
  297 
  298         struct mii_data sc_mii;         /* MII/media information */
  299 
  300         struct timeout sc_tick_timeout; /* tick timeout */
  301 
  302         bus_dmamap_t sc_cddmamap;       /* control data DMA map */
  303 #define sc_cddma        sc_cddmamap->dm_segs[0].ds_addr
  304 
  305         /* Software state for transmit and receive descriptors. */
  306         struct pcn_txsoft sc_txsoft[PCN_TXQUEUELEN];
  307         struct pcn_rxsoft sc_rxsoft[PCN_NRXDESC];
  308 
  309         /* Control data structures */
  310         struct pcn_control_data *sc_control_data;
  311 #define sc_txdescs      sc_control_data->pcd_txdescs
  312 #define sc_rxdescs      sc_control_data->pcd_rxdescs
  313 #define sc_initblock    sc_control_data->pcd_initblock
  314 
  315         const char * const *sc_rcvfw_desc;      /* Rx FIFO watermark info */
  316         int sc_rcvfw;
  317 
  318         const char * const *sc_xmtsp_desc;      /* Tx start point info */
  319         int sc_xmtsp;
  320 
  321         const char * const *sc_xmtfw_desc;      /* Tx FIFO watermark info */
  322         int sc_xmtfw;
  323 
  324         int sc_flags;                   /* misc. flags; see below */
  325         int sc_swstyle;                 /* the software style in use */
  326 
  327         int sc_txfree;                  /* number of free Tx descriptors */
  328         int sc_txnext;                  /* next ready Tx descriptor */
  329 
  330         int sc_txsfree;                 /* number of free Tx jobs */
  331         int sc_txsnext;                 /* next free Tx job */
  332         int sc_txsdirty;                /* dirty Tx jobs */
  333 
  334         int sc_rxptr;                   /* next ready Rx descriptor/job */
  335 
  336         uint32_t sc_csr5;               /* prototype CSR5 register */
  337         uint32_t sc_mode;               /* prototype MODE register */
  338 };
  339 
  340 /* sc_flags */
  341 #define PCN_F_HAS_MII           0x0001  /* has MII */
  342 
  343 #define PCN_CDTXADDR(sc, x)     ((sc)->sc_cddma + PCN_CDTXOFF((x)))
  344 #define PCN_CDRXADDR(sc, x)     ((sc)->sc_cddma + PCN_CDRXOFF((x)))
  345 #define PCN_CDINITADDR(sc)      ((sc)->sc_cddma + PCN_CDINITOFF)
  346 
  347 #define PCN_CDTXSYNC(sc, x, n, ops)                                     \
  348 do {                                                                    \
  349         int __x, __n;                                                   \
  350                                                                         \
  351         __x = (x);                                                      \
  352         __n = (n);                                                      \
  353                                                                         \
  354         /* If it will wrap around, sync to the end of the ring. */      \
  355         if ((__x + __n) > PCN_NTXDESC) {                                \
  356                 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,       \
  357                     PCN_CDTXOFF(__x), sizeof(struct letmd) *            \
  358                     (PCN_NTXDESC - __x), (ops));                        \
  359                 __n -= (PCN_NTXDESC - __x);                             \
  360                 __x = 0;                                                \
  361         }                                                               \
  362                                                                         \
  363         /* Now sync whatever is left. */                                \
  364         bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,               \
  365             PCN_CDTXOFF(__x), sizeof(struct letmd) * __n, (ops));       \
  366 } while (/*CONSTCOND*/0)
  367 
  368 #define PCN_CDRXSYNC(sc, x, ops)                                        \
  369         bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,               \
  370             PCN_CDRXOFF((x)), sizeof(struct lermd), (ops))
  371 
  372 #define PCN_CDINITSYNC(sc, ops)                                         \
  373         bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,               \
  374             PCN_CDINITOFF, sizeof(struct leinit), (ops))
  375 
  376 #define PCN_INIT_RXDESC(sc, x)                                          \
  377 do {                                                                    \
  378         struct pcn_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];               \
  379         struct lermd *__rmd = &(sc)->sc_rxdescs[(x)];                   \
  380         struct mbuf *__m = __rxs->rxs_mbuf;                             \
  381                                                                         \
  382         /*                                                              \
  383          * Note: We scoot the packet forward 2 bytes in the buffer      \
  384          * so that the payload after the Ethernet header is aligned     \
  385          * to a 4-byte boundary.                                        \
  386          */                                                             \
  387         __m->m_data = __m->m_ext.ext_buf + 2;                           \
  388                                                                         \
  389         if ((sc)->sc_swstyle == LE_B20_SSTYLE_PCNETPCI3) {              \
  390                 __rmd->rmd2 =                                           \
  391                     htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2); \
  392                 __rmd->rmd0 = 0;                                        \
  393         } else {                                                        \
  394                 __rmd->rmd2 = 0;                                        \
  395                 __rmd->rmd0 =                                           \
  396                     htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2); \
  397         }                                                               \
  398         __rmd->rmd1 = htole32(LE_R1_OWN|LE_R1_ONES|                     \
  399             (LE_BCNT(MCLBYTES - 2) & LE_R1_BCNT_MASK));                 \
  400         PCN_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);\
  401 } while(/*CONSTCOND*/0)
  402 
  403 void    pcn_start(struct ifnet *);
  404 void    pcn_watchdog(struct ifnet *);
  405 int     pcn_ioctl(struct ifnet *, u_long, caddr_t);
  406 int     pcn_init(struct ifnet *);
  407 void    pcn_stop(struct ifnet *, int);
  408 
  409 void    pcn_shutdown(void *);
  410 
  411 void    pcn_reset(struct pcn_softc *);
  412 void    pcn_rxdrain(struct pcn_softc *);
  413 int     pcn_add_rxbuf(struct pcn_softc *, int);
  414 void    pcn_tick(void *);
  415 
  416 void    pcn_spnd(struct pcn_softc *);
  417 
  418 void    pcn_set_filter(struct pcn_softc *);
  419 
  420 int     pcn_intr(void *);
  421 void    pcn_txintr(struct pcn_softc *);
  422 int     pcn_rxintr(struct pcn_softc *);
  423 
  424 int     pcn_mii_readreg(struct device *, int, int);
  425 void    pcn_mii_writereg(struct device *, int, int, int);
  426 void    pcn_mii_statchg(struct device *);
  427 
  428 void    pcn_79c970_mediainit(struct pcn_softc *);
  429 int     pcn_79c970_mediachange(struct ifnet *);
  430 void    pcn_79c970_mediastatus(struct ifnet *, struct ifmediareq *);
  431 
  432 void    pcn_79c971_mediainit(struct pcn_softc *);
  433 int     pcn_79c971_mediachange(struct ifnet *);
  434 void    pcn_79c971_mediastatus(struct ifnet *, struct ifmediareq *);
  435 
  436 /*
  437  * Description of a PCnet-PCI variant.  Used to select media access
  438  * method, mostly, and to print a nice description of the chip.
  439  */
  440 static const struct pcn_variant {
  441         const char *pcv_desc;
  442         void (*pcv_mediainit)(struct pcn_softc *);
  443         uint16_t pcv_chipid;
  444 } pcn_variants[] = {
  445         { "Am79c970",
  446           pcn_79c970_mediainit,
  447           PARTID_Am79c970 },
  448 
  449         { "Am79c970A",
  450           pcn_79c970_mediainit,
  451           PARTID_Am79c970A },
  452 
  453         { "Am79c971",
  454           pcn_79c971_mediainit,
  455           PARTID_Am79c971 },
  456 
  457         { "Am79c972",
  458           pcn_79c971_mediainit,
  459           PARTID_Am79c972 },
  460 
  461         { "Am79c973",
  462           pcn_79c971_mediainit,
  463           PARTID_Am79c973 },
  464 
  465         { "Am79c975",
  466           pcn_79c971_mediainit,
  467           PARTID_Am79c975 },
  468 
  469         { "Am79c976",
  470           pcn_79c971_mediainit,
  471           PARTID_Am79c976 },
  472 
  473         { "Am79c978",
  474           pcn_79c971_mediainit,
  475           PARTID_Am79c978 },
  476 
  477         { "Unknown",
  478           pcn_79c971_mediainit,
  479           0 },
  480 };
  481 
  482 int     pcn_copy_small = 0;
  483 
  484 int     pcn_match(struct device *, void *, void *);
  485 void    pcn_attach(struct device *, struct device *, void *);
  486 
  487 struct cfattach pcn_ca = {
  488         sizeof(struct pcn_softc), pcn_match, pcn_attach,
  489 };
  490 
  491 const struct pci_matchid pcn_devices[] = {
  492         { PCI_VENDOR_AMD, PCI_PRODUCT_AMD_PCNET_PCI },
  493         { PCI_VENDOR_AMD, PCI_PRODUCT_AMD_PCHOME_PCI }
  494 };
  495 
  496 struct cfdriver pcn_cd = {
  497         0, "pcn", DV_IFNET
  498 };
  499 
  500 /*
  501  * Routines to read and write the PCnet-PCI CSR/BCR space.
  502  */
  503 
  504 static __inline uint32_t
  505 pcn_csr_read(struct pcn_softc *sc, int reg)
  506 {
  507 
  508         bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg);
  509         return (bus_space_read_4(sc->sc_st, sc->sc_sh, PCN32_RDP));
  510 }
  511 
  512 static __inline void
  513 pcn_csr_write(struct pcn_softc *sc, int reg, uint32_t val)
  514 {
  515 
  516         bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg);
  517         bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RDP, val);
  518 }
  519 
  520 static __inline uint32_t
  521 pcn_bcr_read(struct pcn_softc *sc, int reg)
  522 {
  523 
  524         bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg);
  525         return (bus_space_read_4(sc->sc_st, sc->sc_sh, PCN32_BDP));
  526 }
  527 
  528 static __inline void
  529 pcn_bcr_write(struct pcn_softc *sc, int reg, uint32_t val)
  530 {
  531 
  532         bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg);
  533         bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_BDP, val);
  534 }
  535 
  536 static const struct pcn_variant *
  537 pcn_lookup_variant(uint16_t chipid)
  538 {
  539         const struct pcn_variant *pcv;
  540 
  541         for (pcv = pcn_variants; pcv->pcv_chipid != 0; pcv++) {
  542                 if (chipid == pcv->pcv_chipid)
  543                         return (pcv);
  544         }
  545 
  546         /*
  547          * This covers unknown chips, which we simply treat like
  548          * a generic PCnet-FAST.
  549          */
  550         return (pcv);
  551 }
  552 
  553 int
  554 pcn_match(struct device *parent, void *match, void *aux)
  555 {
  556         struct pci_attach_args *pa = aux;
  557 
  558         /*
  559          * IBM makes a PCI variant of this card which shows up as a
  560          * Trident Microsystems 4DWAVE DX (ethernet network, revision 0x25)
  561          * this card is truly a pcn card, so we have a special case match for
  562          * it.
  563          */
  564         if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_TRIDENT &&
  565             PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_TRIDENT_4DWAVE_DX &&
  566             PCI_CLASS(pa->pa_class) == PCI_CLASS_NETWORK)
  567                 return(1);
  568 
  569         return (pci_matchbyid((struct pci_attach_args *)aux, pcn_devices,
  570             sizeof(pcn_devices)/sizeof(pcn_devices[0])));
  571 }
  572 
  573 void
  574 pcn_attach(struct device *parent, struct device *self, void *aux)
  575 {
  576         struct pcn_softc *sc = (struct pcn_softc *) self;
  577         struct pci_attach_args *pa = aux;
  578         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
  579         pci_chipset_tag_t pc = pa->pa_pc;
  580         pci_intr_handle_t ih;
  581         const char *intrstr = NULL;
  582         bus_space_tag_t iot, memt;
  583         bus_space_handle_t ioh, memh;
  584         bus_dma_segment_t seg;
  585         int ioh_valid, memh_valid;
  586         int i, rseg, error;
  587         pcireg_t pmode;
  588         uint32_t chipid, reg;
  589         uint8_t enaddr[ETHER_ADDR_LEN];
  590         int pmreg;
  591 
  592         timeout_set(&sc->sc_tick_timeout, pcn_tick, sc);
  593 
  594         /*
  595          * Map the device.
  596          */
  597         ioh_valid = (pci_mapreg_map(pa, PCN_PCI_CBIO, PCI_MAPREG_TYPE_IO, 0,
  598             &iot, &ioh, NULL, NULL, 0) == 0);
  599         memh_valid = (pci_mapreg_map(pa, PCN_PCI_CBMEM,
  600             PCI_MAPREG_TYPE_MEM|PCI_MAPREG_MEM_TYPE_32BIT, 0,
  601             &memt, &memh, NULL, NULL, 0) == 0);
  602 
  603         if (memh_valid) {
  604                 sc->sc_st = memt;
  605                 sc->sc_sh = memh;
  606         } else if (ioh_valid) {
  607                 sc->sc_st = iot;
  608                 sc->sc_sh = ioh;
  609         } else {
  610                 printf(": unable to map device registers\n");
  611                 return;
  612         }
  613 
  614         sc->sc_dmat = pa->pa_dmat;
  615 
  616         /* Get it out of power save mode, if needed. */
  617         if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
  618                 pmode = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) &
  619                     PCI_PMCSR_STATE_MASK;
  620                 if (pmode == PCI_PMCSR_STATE_D3) {
  621                         /*
  622                          * The card has lost all configuration data in
  623                          * this state, so punt.
  624                          */
  625                         printf(": unable to wake from power state D3\n");
  626                         return;
  627                 }
  628                 if (pmode != PCI_PMCSR_STATE_D0) {
  629                         printf(": waking up from power date D%d",
  630                             pmode);
  631                         pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR,
  632                             PCI_PMCSR_STATE_D0);
  633                 }
  634         }
  635 
  636         /*
  637          * Reset the chip to a known state.  This also puts the
  638          * chip into 32-bit mode.
  639          */
  640         pcn_reset(sc);
  641 
  642 #if !defined(PCN_NO_PROM)
  643 
  644         /*
  645          * Read the Ethernet address from the EEPROM.
  646          */
  647         for (i = 0; i < ETHER_ADDR_LEN; i++)
  648                 enaddr[i] = bus_space_read_1(sc->sc_st, sc->sc_sh,
  649                     PCN32_APROM + i);
  650 #else
  651         /*
  652          * The PROM is not used; instead we assume that the MAC address
  653          * has been programmed into the device's physical address
  654          * registers by the boot firmware
  655          */
  656 
  657         for (i=0; i < 3; i++) {
  658                 uint32_t val;
  659                 val = pcn_csr_read(sc, LE_CSR12 + i);
  660                 enaddr[2*i] = val & 0x0ff;
  661                 enaddr[2*i+1] = (val >> 8) & 0x0ff;
  662         }
  663 #endif
  664 
  665         /*
  666          * Now that the device is mapped, attempt to figure out what
  667          * kind of chip we have.  Note that IDL has all 32 bits of
  668          * the chip ID when we're in 32-bit mode.
  669          */
  670         chipid = pcn_csr_read(sc, LE_CSR88);
  671         sc->sc_variant = pcn_lookup_variant(CHIPID_PARTID(chipid));
  672 
  673         /*
  674          * Map and establish our interrupt.
  675          */
  676         if (pci_intr_map(pa, &ih)) {
  677                 printf(": unable to map interrupt\n");
  678                 return;
  679         }
  680         intrstr = pci_intr_string(pc, ih);
  681         sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, pcn_intr, sc,
  682             self->dv_xname);
  683         if (sc->sc_ih == NULL) {
  684                 printf(": unable to establish interrupt");
  685                 if (intrstr != NULL)
  686                         printf(" at %s", intrstr);
  687                 printf("\n");
  688                 return;
  689         }
  690 
  691         /*
  692          * Allocate the control data structures, and create and load the
  693          * DMA map for it.
  694          */
  695         if ((error = bus_dmamem_alloc(sc->sc_dmat,
  696              sizeof(struct pcn_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
  697              0)) != 0) {
  698                 printf(": unable to allocate control data, error = %d\n",
  699                     error);
  700                 return;
  701         }
  702 
  703         if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
  704              sizeof(struct pcn_control_data), (caddr_t *)&sc->sc_control_data,
  705              BUS_DMA_COHERENT)) != 0) {
  706                 printf(": unable to map control data, error = %d\n",
  707                     error);
  708                 goto fail_1;
  709         }
  710 
  711         if ((error = bus_dmamap_create(sc->sc_dmat,
  712              sizeof(struct pcn_control_data), 1,
  713              sizeof(struct pcn_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
  714                 printf(": unable to create control data DMA map, "
  715                     "error = %d\n", error);
  716                 goto fail_2;
  717         }
  718 
  719         if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
  720              sc->sc_control_data, sizeof(struct pcn_control_data), NULL,
  721              0)) != 0) {
  722                 printf(": unable to load control data DMA map, error = %d\n",
  723                     error);
  724                 goto fail_3;
  725         }
  726 
  727         /* Create the transmit buffer DMA maps. */
  728         for (i = 0; i < PCN_TXQUEUELEN; i++) {
  729                 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
  730                      PCN_NTXSEGS, MCLBYTES, 0, 0,
  731                      &sc->sc_txsoft[i].txs_dmamap)) != 0) {
  732                         printf(": unable to create tx DMA map %d, "
  733                             "error = %d\n", i, error);
  734                         goto fail_4;
  735                 }
  736         }
  737 
  738         /* Create the receive buffer DMA maps. */
  739         for (i = 0; i < PCN_NRXDESC; i++) {
  740                 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
  741                      MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
  742                         printf(": unable to create rx DMA map %d, "
  743                             "error = %d\n", i, error);
  744                         goto fail_5;
  745                 }
  746                 sc->sc_rxsoft[i].rxs_mbuf = NULL;
  747         }
  748 
  749         printf(", %s, rev %d: %s, address %s\n", sc->sc_variant->pcv_desc,
  750             CHIPID_VER(chipid), intrstr, ether_sprintf(enaddr));
  751 
  752         /* Initialize our media structures. */
  753         (*sc->sc_variant->pcv_mediainit)(sc);
  754 
  755         /*
  756          * Initialize FIFO watermark info.
  757          */
  758         switch (sc->sc_variant->pcv_chipid) {
  759         case PARTID_Am79c970:
  760         case PARTID_Am79c970A:
  761                 sc->sc_rcvfw_desc = pcn_79c970_rcvfw;
  762                 sc->sc_xmtsp_desc = pcn_79c970_xmtsp;
  763                 sc->sc_xmtfw_desc = pcn_79c970_xmtfw;
  764                 break;
  765 
  766         default:
  767                 sc->sc_rcvfw_desc = pcn_79c971_rcvfw;
  768                 /*
  769                  * Read BCR25 to determine how much SRAM is
  770                  * on the board.  If > 0, then we the chip
  771                  * uses different Start Point thresholds.
  772                  *
  773                  * Note BCR25 and BCR26 are loaded from the
  774                  * EEPROM on RST, and unaffected by S_RESET,
  775                  * so we don't really have to worry about
  776                  * them except for this.
  777                  */
  778                 reg = pcn_bcr_read(sc, LE_BCR25) & 0x00ff;
  779                 if (reg != 0)
  780                         sc->sc_xmtsp_desc = pcn_79c971_xmtsp_sram;
  781                 else
  782                         sc->sc_xmtsp_desc = pcn_79c971_xmtsp;
  783                 sc->sc_xmtfw_desc = pcn_79c971_xmtfw;
  784                 break;
  785         }
  786 
  787         /*
  788          * Set up defaults -- see the tables above for what these
  789          * values mean.
  790          *
  791          * XXX How should we tune RCVFW and XMTFW?
  792          */
  793         sc->sc_rcvfw = 1;       /* minimum for full-duplex */
  794         sc->sc_xmtsp = 1;
  795         sc->sc_xmtfw = 0;
  796 
  797         ifp = &sc->sc_arpcom.ac_if;
  798         bcopy(enaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
  799         bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
  800         ifp->if_softc = sc;
  801         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  802         ifp->if_ioctl = pcn_ioctl;
  803         ifp->if_start = pcn_start;
  804         ifp->if_watchdog = pcn_watchdog;
  805         IFQ_SET_MAXLEN(&ifp->if_snd, PCN_NTXDESC -1);
  806         IFQ_SET_READY(&ifp->if_snd);
  807 
  808         /* Attach the interface. */
  809         if_attach(ifp);
  810         ether_ifattach(ifp);
  811 
  812         /* Make sure the interface is shutdown during reboot. */
  813         sc->sc_sdhook = shutdownhook_establish(pcn_shutdown, sc);
  814         if (sc->sc_sdhook == NULL)
  815                 printf("%s: WARNING: unable to establish shutdown hook\n",
  816                     sc->sc_dev.dv_xname);
  817         return;
  818 
  819         /*
  820          * Free any resources we've allocated during the failed attach
  821          * attempt.  Do this in reverse order and fall through.
  822          */
  823  fail_5:
  824         for (i = 0; i < PCN_NRXDESC; i++) {
  825                 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
  826                         bus_dmamap_destroy(sc->sc_dmat,
  827                             sc->sc_rxsoft[i].rxs_dmamap);
  828         }
  829  fail_4:
  830         for (i = 0; i < PCN_TXQUEUELEN; i++) {
  831                 if (sc->sc_txsoft[i].txs_dmamap != NULL)
  832                         bus_dmamap_destroy(sc->sc_dmat,
  833                             sc->sc_txsoft[i].txs_dmamap);
  834         }
  835         bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
  836  fail_3:
  837         bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
  838  fail_2:
  839         bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
  840             sizeof(struct pcn_control_data));
  841  fail_1:
  842         bus_dmamem_free(sc->sc_dmat, &seg, rseg);
  843 }
  844 
  845 /*
  846  * pcn_shutdown:
  847  *
  848  *      Make sure the interface is stopped at reboot time.
  849  */
  850 void
  851 pcn_shutdown(void *arg)
  852 {
  853         struct pcn_softc *sc = arg;
  854 
  855         pcn_stop(&sc->sc_arpcom.ac_if, 1);
  856         pcn_reset(sc);
  857 }
  858 
  859 /*
  860  * pcn_start:           [ifnet interface function]
  861  *
  862  *      Start packet transmission on the interface.
  863  */
  864 void
  865 pcn_start(struct ifnet *ifp)
  866 {
  867         struct pcn_softc *sc = ifp->if_softc;
  868         struct mbuf *m0, *m;
  869         struct pcn_txsoft *txs;
  870         bus_dmamap_t dmamap;
  871         int error, nexttx, lasttx = -1, ofree, seg;
  872 
  873         if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
  874                 return;
  875 
  876         /*
  877          * Remember the previous number of free descriptors and
  878          * the first descriptor we'll use.
  879          */
  880         ofree = sc->sc_txfree;
  881 
  882         /*
  883          * Loop through the send queue, setting up transmit descriptors
  884          * until we drain the queue, or use up all available transmit
  885          * descriptors.
  886          */
  887         for (;;) {
  888                 /* Grab a packet off the queue. */
  889                 IFQ_POLL(&ifp->if_snd, m0);
  890                 if (m0 == NULL)
  891                         break;
  892                 m = NULL;
  893 
  894                 /* Get a work queue entry. */
  895                 if (sc->sc_txsfree == 0)
  896                         break;
  897 
  898                 txs = &sc->sc_txsoft[sc->sc_txsnext];
  899                 dmamap = txs->txs_dmamap;
  900 
  901                 /*
  902                  * Load the DMA map.  If this fails, the packet either
  903                  * didn't fit in the alloted number of segments, or we
  904                  * were short on resources.  In this case, we'll copy
  905                  * and try again.
  906                  */
  907                 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
  908                     BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
  909                         MGETHDR(m, M_DONTWAIT, MT_DATA);
  910                         if (m == NULL)
  911                                 break;
  912                         if (m0->m_pkthdr.len > MHLEN) {
  913                                 MCLGET(m, M_DONTWAIT);
  914                                 if ((m->m_flags & M_EXT) == 0) {
  915                                         m_freem(m);
  916                                         break;
  917                                 }
  918                         }
  919                         m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
  920                         m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
  921                         error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
  922                             m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
  923                         if (error)
  924                                 break;
  925                 }
  926 
  927                 /*
  928                  * Ensure we have enough descriptors free to describe
  929                  * the packet.  Note, we always reserve one descriptor
  930                  * at the end of the ring as a termination point, to
  931                  * prevent wrap-around.
  932                  */
  933                 if (dmamap->dm_nsegs > (sc->sc_txfree - 1)) {
  934                         /*
  935                          * Not enough free descriptors to transmit this
  936                          * packet.  We haven't committed anything yet,
  937                          * so just unload the DMA map, put the packet
  938                          * back on the queue, and punt.  Notify the upper
  939                          * layer that there are not more slots left.
  940                          *
  941                          * XXX We could allocate an mbuf and copy, but
  942                          * XXX is it worth it?
  943                          */
  944                         ifp->if_flags |= IFF_OACTIVE;
  945                         bus_dmamap_unload(sc->sc_dmat, dmamap);
  946                         if (m != NULL)
  947                                 m_freem(m);
  948                         break;
  949                 }
  950 
  951                 IFQ_DEQUEUE(&ifp->if_snd, m0);
  952                 if (m != NULL) {
  953                         m_freem(m0);
  954                         m0 = m;
  955                 }
  956 
  957                 /*
  958                  * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
  959                  */
  960 
  961                 /* Sync the DMA map. */
  962                 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
  963                     BUS_DMASYNC_PREWRITE);
  964 
  965                 /*
  966                  * Initialize the transmit descriptors.
  967                  */
  968                 if (sc->sc_swstyle == LE_B20_SSTYLE_PCNETPCI3) {
  969                         for (nexttx = sc->sc_txnext, seg = 0;
  970                              seg < dmamap->dm_nsegs;
  971                              seg++, nexttx = PCN_NEXTTX(nexttx)) {
  972                                 /*
  973                                  * If this is the first descriptor we're
  974                                  * enqueueing, don't set the OWN bit just
  975                                  * yet.  That could cause a race condition.
  976                                  * We'll do it below.
  977                                  */
  978                                 sc->sc_txdescs[nexttx].tmd0 = 0;
  979                                 sc->sc_txdescs[nexttx].tmd2 =
  980                                     htole32(dmamap->dm_segs[seg].ds_addr);
  981                                 sc->sc_txdescs[nexttx].tmd1 =
  982                                     htole32(LE_T1_ONES |
  983                                     (nexttx == sc->sc_txnext ? 0 : LE_T1_OWN) |
  984                                     (LE_BCNT(dmamap->dm_segs[seg].ds_len) &
  985                                      LE_T1_BCNT_MASK));
  986                                 lasttx = nexttx;
  987                         }
  988                 } else {
  989                         for (nexttx = sc->sc_txnext, seg = 0;
  990                              seg < dmamap->dm_nsegs;
  991                              seg++, nexttx = PCN_NEXTTX(nexttx)) {
  992                                 /*
  993                                  * If this is the first descriptor we're
  994                                  * enqueueing, don't set the OWN bit just
  995                                  * yet.  That could cause a race condition.
  996                                  * We'll do it below.
  997                                  */
  998                                 sc->sc_txdescs[nexttx].tmd0 =
  999                                     htole32(dmamap->dm_segs[seg].ds_addr);
 1000                                 sc->sc_txdescs[nexttx].tmd2 = 0;
 1001                                 sc->sc_txdescs[nexttx].tmd1 =
 1002                                     htole32(LE_T1_ONES |
 1003                                     (nexttx == sc->sc_txnext ? 0 : LE_T1_OWN) |
 1004                                     (LE_BCNT(dmamap->dm_segs[seg].ds_len) &
 1005                                      LE_T1_BCNT_MASK));
 1006                                 lasttx = nexttx;
 1007                         }
 1008                 }
 1009 
 1010                 KASSERT(lasttx != -1);
 1011                 /* Interrupt on the packet, if appropriate. */
 1012                 if ((sc->sc_txsnext & PCN_TXINTR_MASK) == 0)
 1013                         sc->sc_txdescs[lasttx].tmd1 |= htole32(LE_T1_LTINT);
 1014 
 1015                 /* Set `start of packet' and `end of packet' appropriately. */
 1016                 sc->sc_txdescs[lasttx].tmd1 |= htole32(LE_T1_ENP);
 1017                 sc->sc_txdescs[sc->sc_txnext].tmd1 |=
 1018                     htole32(LE_T1_OWN|LE_T1_STP);
 1019 
 1020                 /* Sync the descriptors we're using. */
 1021                 PCN_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
 1022                     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 1023 
 1024                 /* Kick the transmitter. */
 1025                 pcn_csr_write(sc, LE_CSR0, LE_C0_INEA|LE_C0_TDMD);
 1026 
 1027                 /*
 1028                  * Store a pointer to the packet so we can free it later,
 1029                  * and remember what txdirty will be once the packet is
 1030                  * done.
 1031                  */
 1032                 txs->txs_mbuf = m0;
 1033                 txs->txs_firstdesc = sc->sc_txnext;
 1034                 txs->txs_lastdesc = lasttx;
 1035 
 1036                 /* Advance the tx pointer. */
 1037                 sc->sc_txfree -= dmamap->dm_nsegs;
 1038                 sc->sc_txnext = nexttx;
 1039 
 1040                 sc->sc_txsfree--;
 1041                 sc->sc_txsnext = PCN_NEXTTXS(sc->sc_txsnext);
 1042 
 1043 #if NBPFILTER > 0
 1044                 /* Pass the packet to any BPF listeners. */
 1045                 if (ifp->if_bpf)
 1046                         bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
 1047 #endif /* NBPFILTER > 0 */
 1048         }
 1049 
 1050         if (sc->sc_txsfree == 0 || sc->sc_txfree == 0) {
 1051                 /* No more slots left; notify upper layer. */
 1052                 ifp->if_flags |= IFF_OACTIVE;
 1053         }
 1054 
 1055         if (sc->sc_txfree != ofree) {
 1056                 /* Set a watchdog timer in case the chip flakes out. */
 1057                 ifp->if_timer = 5;
 1058         }
 1059 }
 1060 
 1061 /*
 1062  * pcn_watchdog:        [ifnet interface function]
 1063  *
 1064  *      Watchdog timer handler.
 1065  */
 1066 void
 1067 pcn_watchdog(struct ifnet *ifp)
 1068 {
 1069         struct pcn_softc *sc = ifp->if_softc;
 1070 
 1071         /*
 1072          * Since we're not interrupting every packet, sweep
 1073          * up before we report an error.
 1074          */
 1075         pcn_txintr(sc);
 1076 
 1077         if (sc->sc_txfree != PCN_NTXDESC) {
 1078                 printf("%s: device timeout (txfree %d txsfree %d)\n",
 1079                     sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree);
 1080                 ifp->if_oerrors++;
 1081 
 1082                 /* Reset the interface. */
 1083                 (void) pcn_init(ifp);
 1084         }
 1085 
 1086         /* Try to get more packets going. */
 1087         pcn_start(ifp);
 1088 }
 1089 
 1090 /*
 1091  * pcn_ioctl:           [ifnet interface function]
 1092  *
 1093  *      Handle control requests from the operator.
 1094  */
 1095 int
 1096 pcn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
 1097 {
 1098         struct pcn_softc *sc = ifp->if_softc;
 1099         struct ifreq *ifr = (struct ifreq *) data;
 1100         struct ifaddr *ifa = (struct ifaddr *)data;
 1101         int s, error = 0;
 1102 
 1103         s = splnet();
 1104 
 1105         if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) {
 1106                 /* Try to get more packets going. */
 1107                 pcn_start(ifp);
 1108 
 1109                 splx(s);
 1110                 return (error);
 1111         }
 1112 
 1113         switch (cmd) {
 1114         case SIOCSIFADDR:
 1115                 ifp->if_flags |= IFF_UP;
 1116 
 1117                 switch (ifa->ifa_addr->sa_family) {
 1118 #ifdef INET
 1119                 case AF_INET:
 1120                         pcn_init(ifp);
 1121                         arp_ifinit(&sc->sc_arpcom, ifa);
 1122                         break;
 1123 #endif
 1124                 default:
 1125                         pcn_init(ifp);
 1126                         break;
 1127                 }
 1128                 break;
 1129 
 1130         case SIOCSIFMTU:
 1131                 if (ifr->ifr_mtu > ETHERMTU || ifr->ifr_mtu < ETHERMIN)
 1132                         error = EINVAL;
 1133                 else if (ifp->if_mtu != ifr->ifr_mtu)
 1134                         ifp->if_mtu = ifr->ifr_mtu;
 1135                 break;
 1136 
 1137         case SIOCSIFFLAGS:
 1138                 /*
 1139                  * If interface is marked up and not running, then start it.
 1140                  * If it is marked down and running, stop it.
 1141                  * XXX If it's up then re-initialize it. This is so flags
 1142                  * such as IFF_PROMISC are handled.
 1143                  */
 1144                 if (ifp->if_flags & IFF_UP)
 1145                         pcn_init(ifp);
 1146                 else if (ifp->if_flags & IFF_RUNNING)
 1147                         pcn_stop(ifp, 1);
 1148                 break;
 1149 
 1150         case SIOCADDMULTI:
 1151         case SIOCDELMULTI:
 1152                 error = (cmd == SIOCADDMULTI) ?
 1153                     ether_addmulti(ifr, &sc->sc_arpcom) :
 1154                     ether_delmulti(ifr, &sc->sc_arpcom);
 1155 
 1156                 if (error == ENETRESET) {
 1157                         /*
 1158                          * Multicast list has changed; set the hardware
 1159                          * filter accordingly.
 1160                          */
 1161                         if (ifp->if_flags & IFF_RUNNING)
 1162                                 error = pcn_init(ifp);
 1163                         else
 1164                                 error = 0;
 1165                 }
 1166                 break;
 1167 
 1168         case SIOCSIFMEDIA:
 1169         case SIOCGIFMEDIA:
 1170                 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
 1171                 break;
 1172 
 1173         default:
 1174                 error = ENOTTY;
 1175         }
 1176 
 1177         /* Try to get more packets going. */
 1178         pcn_start(ifp);
 1179 
 1180         splx(s);
 1181         return (error);
 1182 }
 1183 
 1184 /*
 1185  * pcn_intr:
 1186  *
 1187  *      Interrupt service routine.
 1188  */
 1189 int
 1190 pcn_intr(void *arg)
 1191 {
 1192         struct pcn_softc *sc = arg;
 1193         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
 1194         uint32_t csr0;
 1195         int wantinit, handled = 0;
 1196 
 1197         for (wantinit = 0; wantinit == 0;) {
 1198                 csr0 = pcn_csr_read(sc, LE_CSR0);
 1199                 if ((csr0 & LE_C0_INTR) == 0)
 1200                         break;
 1201 
 1202                 /* ACK the bits and re-enable interrupts. */
 1203                 pcn_csr_write(sc, LE_CSR0, csr0 &
 1204                     (LE_C0_INEA|LE_C0_BABL|LE_C0_MISS|LE_C0_MERR|LE_C0_RINT|
 1205                      LE_C0_TINT|LE_C0_IDON));
 1206 
 1207                 handled = 1;
 1208 
 1209                 if (csr0 & LE_C0_RINT)
 1210                         wantinit = pcn_rxintr(sc);
 1211 
 1212                 if (csr0 & LE_C0_TINT)
 1213                         pcn_txintr(sc);
 1214 
 1215                 if (csr0 & LE_C0_ERR) {
 1216                         if (csr0 & LE_C0_BABL)
 1217                                 ifp->if_oerrors++;
 1218                         if (csr0 & LE_C0_MISS)
 1219                                 ifp->if_ierrors++;
 1220                         if (csr0 & LE_C0_MERR) {
 1221                                 printf("%s: memory error\n",
 1222                                     sc->sc_dev.dv_xname);
 1223                                 wantinit = 1;
 1224                                 break;
 1225                         }
 1226                 }
 1227 
 1228                 if ((csr0 & LE_C0_RXON) == 0) {
 1229                         printf("%s: receiver disabled\n",
 1230                             sc->sc_dev.dv_xname);
 1231                         ifp->if_ierrors++;
 1232                         wantinit = 1;
 1233                 }
 1234 
 1235                 if ((csr0 & LE_C0_TXON) == 0) {
 1236                         printf("%s: transmitter disabled\n",
 1237                             sc->sc_dev.dv_xname);
 1238                         ifp->if_oerrors++;
 1239                         wantinit = 1;
 1240                 }
 1241         }
 1242 
 1243         if (handled) {
 1244                 if (wantinit)
 1245                         pcn_init(ifp);
 1246 
 1247                 /* Try to get more packets going. */
 1248                 pcn_start(ifp);
 1249         }
 1250 
 1251         return (handled);
 1252 }
 1253 
 1254 /*
 1255  * pcn_spnd:
 1256  *
 1257  *      Suspend the chip.
 1258  */
 1259 void
 1260 pcn_spnd(struct pcn_softc *sc)
 1261 {
 1262         int i;
 1263 
 1264         pcn_csr_write(sc, LE_CSR5, sc->sc_csr5 | LE_C5_SPND);
 1265 
 1266         for (i = 0; i < 10000; i++) {
 1267                 if (pcn_csr_read(sc, LE_CSR5) & LE_C5_SPND)
 1268                         return;
 1269                 delay(5);
 1270         }
 1271 
 1272         printf("%s: WARNING: chip failed to enter suspended state\n",
 1273             sc->sc_dev.dv_xname);
 1274 }
 1275 
 1276 /*
 1277  * pcn_txintr:
 1278  *
 1279  *      Helper; handle transmit interrupts.
 1280  */
 1281 void
 1282 pcn_txintr(struct pcn_softc *sc)
 1283 {
 1284         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
 1285         struct pcn_txsoft *txs;
 1286         uint32_t tmd1, tmd2, tmd;
 1287         int i, j;
 1288 
 1289         ifp->if_flags &= ~IFF_OACTIVE;
 1290 
 1291         /*
 1292          * Go through our Tx list and free mbufs for those
 1293          * frames which have been transmitted.
 1294          */
 1295         for (i = sc->sc_txsdirty; sc->sc_txsfree != PCN_TXQUEUELEN;
 1296              i = PCN_NEXTTXS(i), sc->sc_txsfree++) {
 1297                 txs = &sc->sc_txsoft[i];
 1298 
 1299                 PCN_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
 1300                     BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
 1301 
 1302                 tmd1 = letoh32(sc->sc_txdescs[txs->txs_lastdesc].tmd1);
 1303                 if (tmd1 & LE_T1_OWN)
 1304                         break;
 1305 
 1306                 /*
 1307                  * Slightly annoying -- we have to loop through the
 1308                  * descriptors we've used looking for ERR, since it
 1309                  * can appear on any descriptor in the chain.
 1310                  */
 1311                 for (j = txs->txs_firstdesc;; j = PCN_NEXTTX(j)) {
 1312                         tmd = letoh32(sc->sc_txdescs[j].tmd1);
 1313                         if (tmd & LE_T1_ERR) {
 1314                                 ifp->if_oerrors++;
 1315                                 if (sc->sc_swstyle == LE_B20_SSTYLE_PCNETPCI3)
 1316                                         tmd2 = letoh32(sc->sc_txdescs[j].tmd0);
 1317                                 else
 1318                                         tmd2 = letoh32(sc->sc_txdescs[j].tmd2);
 1319                                 if (tmd2 & LE_T2_UFLO) {
 1320                                         if (sc->sc_xmtsp < LE_C80_XMTSP_MAX) {
 1321                                                 sc->sc_xmtsp++;
 1322                                                 printf("%s: transmit "
 1323                                                     "underrun; new threshold: "
 1324                                                     "%s\n",
 1325                                                     sc->sc_dev.dv_xname,
 1326                                                     sc->sc_xmtsp_desc[
 1327                                                     sc->sc_xmtsp]);
 1328                                                 pcn_spnd(sc);
 1329                                                 pcn_csr_write(sc, LE_CSR80,
 1330                                                     LE_C80_RCVFW(sc->sc_rcvfw) |
 1331                                                     LE_C80_XMTSP(sc->sc_xmtsp) |
 1332                                                     LE_C80_XMTFW(sc->sc_xmtfw));
 1333                                                 pcn_csr_write(sc, LE_CSR5,
 1334                                                     sc->sc_csr5);
 1335                                         } else {
 1336                                                 printf("%s: transmit "
 1337                                                     "underrun\n",
 1338                                                     sc->sc_dev.dv_xname);
 1339                                         }
 1340                                 } else if (tmd2 & LE_T2_BUFF) {
 1341                                         printf("%s: transmit buffer error\n",
 1342                                             sc->sc_dev.dv_xname);
 1343                                 }
 1344                                 if (tmd2 & LE_T2_LCOL)
 1345                                         ifp->if_collisions++;
 1346                                 if (tmd2 & LE_T2_RTRY)
 1347                                         ifp->if_collisions += 16;
 1348                                 goto next_packet;
 1349                         }
 1350                         if (j == txs->txs_lastdesc)
 1351                                 break;
 1352                 }
 1353                 if (tmd1 & LE_T1_ONE)
 1354                         ifp->if_collisions++;
 1355                 else if (tmd & LE_T1_MORE) {
 1356                         /* Real number is unknown. */
 1357                         ifp->if_collisions += 2;
 1358                 }
 1359                 ifp->if_opackets++;
 1360  next_packet:
 1361                 sc->sc_txfree += txs->txs_dmamap->dm_nsegs;
 1362                 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
 1363                     0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
 1364                 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
 1365                 m_freem(txs->txs_mbuf);
 1366                 txs->txs_mbuf = NULL;
 1367         }
 1368 
 1369         /* Update the dirty transmit buffer pointer. */
 1370         sc->sc_txsdirty = i;
 1371 
 1372         /*
 1373          * If there are no more pending transmissions, cancel the watchdog
 1374          * timer.
 1375          */
 1376         if (sc->sc_txsfree == PCN_TXQUEUELEN)
 1377                 ifp->if_timer = 0;
 1378 }
 1379 
 1380 /*
 1381  * pcn_rxintr:
 1382  *
 1383  *      Helper; handle receive interrupts.
 1384  */
 1385 int
 1386 pcn_rxintr(struct pcn_softc *sc)
 1387 {
 1388         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
 1389         struct pcn_rxsoft *rxs;
 1390         struct mbuf *m;
 1391         uint32_t rmd1;
 1392         int i, len;
 1393 
 1394         for (i = sc->sc_rxptr;; i = PCN_NEXTRX(i)) {
 1395                 rxs = &sc->sc_rxsoft[i];
 1396 
 1397                 PCN_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
 1398 
 1399                 rmd1 = letoh32(sc->sc_rxdescs[i].rmd1);
 1400 
 1401                 if (rmd1 & LE_R1_OWN)
 1402                         break;
 1403 
 1404                 /*
 1405                  * Check for errors and make sure the packet fit into
 1406                  * a single buffer.  We have structured this block of
 1407                  * code the way it is in order to compress it into
 1408                  * one test in the common case (no error).
 1409                  */
 1410                 if (__predict_false((rmd1 & (LE_R1_STP|LE_R1_ENP|LE_R1_ERR)) !=
 1411                     (LE_R1_STP|LE_R1_ENP))) {
 1412                         /* Make sure the packet is in a single buffer. */
 1413                         if ((rmd1 & (LE_R1_STP|LE_R1_ENP)) !=
 1414                             (LE_R1_STP|LE_R1_ENP)) {
 1415                                 printf("%s: packet spilled into next buffer\n",
 1416                                     sc->sc_dev.dv_xname);
 1417                                 return (1);     /* pcn_intr() will re-init */
 1418                         }
 1419 
 1420                         /*
 1421                          * If the packet had an error, simple recycle the
 1422                          * buffer.
 1423                          */
 1424                         if (rmd1 & LE_R1_ERR) {
 1425                                 ifp->if_ierrors++;
 1426                                 /*
 1427                                  * If we got an overflow error, chances
 1428                                  * are there will be a CRC error.  In
 1429                                  * this case, just print the overflow
 1430                                  * error, and skip the others.
 1431                                  */
 1432                                 if (rmd1 & LE_R1_OFLO)
 1433                                         printf("%s: overflow error\n",
 1434                                             sc->sc_dev.dv_xname);
 1435                                 else {
 1436 #define PRINTIT(x, str)                                                 \
 1437                                         if (rmd1 & (x))                 \
 1438                                                 printf("%s: %s\n",      \
 1439                                                     sc->sc_dev.dv_xname, str);
 1440                                         PRINTIT(LE_R1_FRAM, "framing error");
 1441                                         PRINTIT(LE_R1_CRC, "CRC error");
 1442                                         PRINTIT(LE_R1_BUFF, "buffer error");
 1443                                 }
 1444 #undef PRINTIT
 1445                                 PCN_INIT_RXDESC(sc, i);
 1446                                 continue;
 1447                         }
 1448                 }
 1449 
 1450                 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
 1451                     rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
 1452 
 1453                 /*
 1454                  * No errors; receive the packet.
 1455                  */
 1456                 if (sc->sc_swstyle == LE_B20_SSTYLE_PCNETPCI3)
 1457                         len = letoh32(sc->sc_rxdescs[i].rmd0) & LE_R1_BCNT_MASK;
 1458                 else
 1459                         len = letoh32(sc->sc_rxdescs[i].rmd2) & LE_R1_BCNT_MASK;
 1460 
 1461                 /*
 1462                  * The LANCE family includes the CRC with every packet;
 1463                  * trim it off here.
 1464                  */
 1465                 len -= ETHER_CRC_LEN;
 1466 
 1467                 /*
 1468                  * If the packet is small enough to fit in a
 1469                  * single header mbuf, allocate one and copy
 1470                  * the data into it.  This greatly reduces
 1471                  * memory consumption when we receive lots
 1472                  * of small packets.
 1473                  *
 1474                  * Otherwise, we add a new buffer to the receive
 1475                  * chain.  If this fails, we drop the packet and
 1476                  * recycle the old buffer.
 1477                  */
 1478                 if (pcn_copy_small != 0 && len <= (MHLEN - 2)) {
 1479                         MGETHDR(m, M_DONTWAIT, MT_DATA);
 1480                         if (m == NULL)
 1481                                 goto dropit;
 1482                         m->m_data += 2;
 1483                         memcpy(mtod(m, caddr_t),
 1484                             mtod(rxs->rxs_mbuf, caddr_t), len);
 1485                         PCN_INIT_RXDESC(sc, i);
 1486                         bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
 1487                             rxs->rxs_dmamap->dm_mapsize,
 1488                             BUS_DMASYNC_PREREAD);
 1489                 } else {
 1490                         m = rxs->rxs_mbuf;
 1491                         if (pcn_add_rxbuf(sc, i) != 0) {
 1492  dropit:
 1493                                 ifp->if_ierrors++;
 1494                                 PCN_INIT_RXDESC(sc, i);
 1495                                 bus_dmamap_sync(sc->sc_dmat,
 1496                                     rxs->rxs_dmamap, 0,
 1497                                     rxs->rxs_dmamap->dm_mapsize,
 1498                                     BUS_DMASYNC_PREREAD);
 1499                                 continue;
 1500                         }
 1501                 }
 1502 
 1503                 m->m_pkthdr.rcvif = ifp;
 1504                 m->m_pkthdr.len = m->m_len = len;
 1505 
 1506 #if NBPFILTER > 0
 1507                 /* Pass this up to any BPF listeners. */
 1508                 if (ifp->if_bpf)
 1509                         bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
 1510 #endif /* NBPFILTER > 0 */
 1511 
 1512                 /* Pass it on. */
 1513                 ether_input_mbuf(ifp, m);
 1514                 ifp->if_ipackets++;
 1515         }
 1516 
 1517         /* Update the receive pointer. */
 1518         sc->sc_rxptr = i;
 1519         return (0);
 1520 }
 1521 
 1522 /*
 1523  * pcn_tick:
 1524  *
 1525  *      One second timer, used to tick the MII.
 1526  */
 1527 void
 1528 pcn_tick(void *arg)
 1529 {
 1530         struct pcn_softc *sc = arg;
 1531         int s;
 1532 
 1533         s = splnet();
 1534         mii_tick(&sc->sc_mii);
 1535         splx(s);
 1536 
 1537         timeout_add(&sc->sc_tick_timeout, hz);
 1538 }
 1539 
 1540 /*
 1541  * pcn_reset:
 1542  *
 1543  *      Perform a soft reset on the PCnet-PCI.
 1544  */
 1545 void
 1546 pcn_reset(struct pcn_softc *sc)
 1547 {
 1548 
 1549         /*
 1550          * The PCnet-PCI chip is reset by reading from the
 1551          * RESET register.  Note that while the NE2100 LANCE
 1552          * boards require a write after the read, the PCnet-PCI
 1553          * chips do not require this.
 1554          *
 1555          * Since we don't know if we're in 16-bit or 32-bit
 1556          * mode right now, issue both (it's safe) in the
 1557          * hopes that one will succeed.
 1558          */
 1559         (void) bus_space_read_2(sc->sc_st, sc->sc_sh, PCN16_RESET);
 1560         (void) bus_space_read_4(sc->sc_st, sc->sc_sh, PCN32_RESET);
 1561 
 1562         /* Wait 1ms for it to finish. */
 1563         delay(1000);
 1564 
 1565         /*
 1566          * Select 32-bit I/O mode by issuing a 32-bit write to the
 1567          * RDP.  Since the RAP is 0 after a reset, writing a 0
 1568          * to RDP is safe (since it simply clears CSR0).
 1569          */
 1570         bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RDP, 0);
 1571 }
 1572 
 1573 /*
 1574  * pcn_init:            [ifnet interface function]
 1575  *
 1576  *      Initialize the interface.  Must be called at splnet().
 1577  */
 1578 int
 1579 pcn_init(struct ifnet *ifp)
 1580 {
 1581         struct pcn_softc *sc = ifp->if_softc;
 1582         struct pcn_rxsoft *rxs;
 1583         uint8_t *enaddr = LLADDR(ifp->if_sadl);
 1584         int i, error = 0;
 1585         uint32_t reg;
 1586 
 1587         /* Cancel any pending I/O. */
 1588         pcn_stop(ifp, 0);
 1589 
 1590         /* Reset the chip to a known state. */
 1591         pcn_reset(sc);
 1592 
 1593         /*
 1594          * On the Am79c970, select SSTYLE 2, and SSTYLE 3 on everything
 1595          * else.
 1596          *
 1597          * XXX It'd be really nice to use SSTYLE 2 on all the chips,
 1598          * because the structure layout is compatible with ILACC,
 1599          * but the burst mode is only available in SSTYLE 3, and
 1600          * burst mode should provide some performance enhancement.
 1601          */
 1602         if (sc->sc_variant->pcv_chipid == PARTID_Am79c970)
 1603                 sc->sc_swstyle = LE_B20_SSTYLE_PCNETPCI2;
 1604         else
 1605                 sc->sc_swstyle = LE_B20_SSTYLE_PCNETPCI3;
 1606         pcn_bcr_write(sc, LE_BCR20, sc->sc_swstyle);
 1607 
 1608         /* Initialize the transmit descriptor ring. */
 1609         memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
 1610         PCN_CDTXSYNC(sc, 0, PCN_NTXDESC,
 1611             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 1612         sc->sc_txfree = PCN_NTXDESC;
 1613         sc->sc_txnext = 0;
 1614 
 1615         /* Initialize the transmit job descriptors. */
 1616         for (i = 0; i < PCN_TXQUEUELEN; i++)
 1617                 sc->sc_txsoft[i].txs_mbuf = NULL;
 1618         sc->sc_txsfree = PCN_TXQUEUELEN;
 1619         sc->sc_txsnext = 0;
 1620         sc->sc_txsdirty = 0;
 1621 
 1622         /*
 1623          * Initialize the receive descriptor and receive job
 1624          * descriptor rings.
 1625          */
 1626         for (i = 0; i < PCN_NRXDESC; i++) {
 1627                 rxs = &sc->sc_rxsoft[i];
 1628                 if (rxs->rxs_mbuf == NULL) {
 1629                         if ((error = pcn_add_rxbuf(sc, i)) != 0) {
 1630                                 printf("%s: unable to allocate or map rx "
 1631                                     "buffer %d, error = %d\n",
 1632                                     sc->sc_dev.dv_xname, i, error);
 1633                                 /*
 1634                                  * XXX Should attempt to run with fewer receive
 1635                                  * XXX buffers instead of just failing.
 1636                                  */
 1637                                 pcn_rxdrain(sc);
 1638                                 goto out;
 1639                         }
 1640                 } else
 1641                         PCN_INIT_RXDESC(sc, i);
 1642         }
 1643         sc->sc_rxptr = 0;
 1644 
 1645         /* Initialize MODE for the initialization block. */
 1646         sc->sc_mode = 0;
 1647         if (ifp->if_flags & IFF_PROMISC)
 1648                 sc->sc_mode |= LE_C15_PROM;
 1649         if ((ifp->if_flags & IFF_BROADCAST) == 0)
 1650                 sc->sc_mode |= LE_C15_DRCVBC;
 1651 
 1652         /*
 1653          * If we have MII, simply select MII in the MODE register,
 1654          * and clear ASEL.  Otherwise, let ASEL stand (for now),
 1655          * and leave PORTSEL alone (it is ignored with ASEL is set).
 1656          */
 1657         if (sc->sc_flags & PCN_F_HAS_MII) {
 1658                 pcn_bcr_write(sc, LE_BCR2,
 1659                     pcn_bcr_read(sc, LE_BCR2) & ~LE_B2_ASEL);
 1660                 sc->sc_mode |= LE_C15_PORTSEL(PORTSEL_MII);
 1661 
 1662                 /*
 1663                  * Disable MII auto-negotiation.  We handle that in
 1664                  * our own MII layer.
 1665                  */
 1666                 pcn_bcr_write(sc, LE_BCR32,
 1667                     pcn_bcr_read(sc, LE_BCR32) | LE_B32_DANAS);
 1668         }
 1669 
 1670         /*
 1671          * Set the Tx and Rx descriptor ring addresses in the init
 1672          * block, the TLEN and RLEN other fields of the init block
 1673          * MODE register.
 1674          */
 1675         sc->sc_initblock.init_rdra = htole32(PCN_CDRXADDR(sc, 0));
 1676         sc->sc_initblock.init_tdra = htole32(PCN_CDTXADDR(sc, 0));
 1677         sc->sc_initblock.init_mode = htole32(sc->sc_mode |
 1678             ((ffs(PCN_NTXDESC) - 1) << 28) |
 1679             ((ffs(PCN_NRXDESC) - 1) << 20));
 1680 
 1681         /* Set the station address in the init block. */
 1682         sc->sc_initblock.init_padr[0] = htole32(enaddr[0] |
 1683             (enaddr[1] << 8) | (enaddr[2] << 16) | (enaddr[3] << 24));
 1684         sc->sc_initblock.init_padr[1] = htole32(enaddr[4] |
 1685             (enaddr[5] << 8));
 1686 
 1687         /* Set the multicast filter in the init block. */
 1688         pcn_set_filter(sc);
 1689 
 1690         /* Initialize CSR3. */
 1691         pcn_csr_write(sc, LE_CSR3, LE_C3_MISSM|LE_C3_IDONM|LE_C3_DXSUFLO);
 1692 
 1693         /* Initialize CSR4. */
 1694         pcn_csr_write(sc, LE_CSR4, LE_C4_DMAPLUS|LE_C4_APAD_XMT|
 1695             LE_C4_MFCOM|LE_C4_RCVCCOM|LE_C4_TXSTRTM);
 1696 
 1697         /* Initialize CSR5. */
 1698         sc->sc_csr5 = LE_C5_LTINTEN|LE_C5_SINTE;
 1699         pcn_csr_write(sc, LE_CSR5, sc->sc_csr5);
 1700 
 1701         /*
 1702          * If we have an Am79c971 or greater, initialize CSR7.
 1703          *
 1704          * XXX Might be nice to use the MII auto-poll interrupt someday.
 1705          */
 1706         switch (sc->sc_variant->pcv_chipid) {
 1707         case PARTID_Am79c970:
 1708         case PARTID_Am79c970A:
 1709                 /* Not available on these chips. */
 1710                 break;
 1711 
 1712         default:
 1713                 pcn_csr_write(sc, LE_CSR7, LE_C7_FASTSPNDE);
 1714                 break;
 1715         }
 1716 
 1717         /*
 1718          * On the Am79c970A and greater, initialize BCR18 to
 1719          * enable burst mode.
 1720          *
 1721          * Also enable the "no underflow" option on the Am79c971 and
 1722          * higher, which prevents the chip from generating transmit
 1723          * underflows, yet sill provides decent performance.  Note if
 1724          * chip is not connected to external SRAM, then we still have
 1725          * to handle underflow errors (the NOUFLO bit is ignored in
 1726          * that case).
 1727          */
 1728         reg = pcn_bcr_read(sc, LE_BCR18);
 1729         switch (sc->sc_variant->pcv_chipid) {
 1730         case PARTID_Am79c970:
 1731                 break;
 1732 
 1733         case PARTID_Am79c970A:
 1734                 reg |= LE_B18_BREADE|LE_B18_BWRITE;
 1735                 break;
 1736 
 1737         default:
 1738                 reg |= LE_B18_BREADE|LE_B18_BWRITE|LE_B18_NOUFLO;
 1739                 break;
 1740         }
 1741         pcn_bcr_write(sc, LE_BCR18, reg);
 1742 
 1743         /*
 1744          * Initialize CSR80 (FIFO thresholds for Tx and Rx).
 1745          */
 1746         pcn_csr_write(sc, LE_CSR80, LE_C80_RCVFW(sc->sc_rcvfw) |
 1747             LE_C80_XMTSP(sc->sc_xmtsp) | LE_C80_XMTFW(sc->sc_xmtfw));
 1748 
 1749         /*
 1750          * Send the init block to the chip, and wait for it
 1751          * to be processed.
 1752          */
 1753         PCN_CDINITSYNC(sc, BUS_DMASYNC_PREWRITE);
 1754         pcn_csr_write(sc, LE_CSR1, PCN_CDINITADDR(sc) & 0xffff);
 1755         pcn_csr_write(sc, LE_CSR2, (PCN_CDINITADDR(sc) >> 16) & 0xffff);
 1756         pcn_csr_write(sc, LE_CSR0, LE_C0_INIT);
 1757         delay(100);
 1758         for (i = 0; i < 10000; i++) {
 1759                 if (pcn_csr_read(sc, LE_CSR0) & LE_C0_IDON)
 1760                         break;
 1761                 delay(10);
 1762         }
 1763         PCN_CDINITSYNC(sc, BUS_DMASYNC_POSTWRITE);
 1764         if (i == 10000) {
 1765                 printf("%s: timeout processing init block\n",
 1766                     sc->sc_dev.dv_xname);
 1767                 error = EIO;
 1768                 goto out;
 1769         }
 1770 
 1771         /* Set the media. */
 1772         (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
 1773 
 1774         /* Enable interrupts and external activity (and ACK IDON). */
 1775         pcn_csr_write(sc, LE_CSR0, LE_C0_INEA|LE_C0_STRT|LE_C0_IDON);
 1776 
 1777         if (sc->sc_flags & PCN_F_HAS_MII) {
 1778                 /* Start the one second MII clock. */
 1779                 timeout_add(&sc->sc_tick_timeout, hz);
 1780         }
 1781 
 1782         /* ...all done! */
 1783         ifp->if_flags |= IFF_RUNNING;
 1784         ifp->if_flags &= ~IFF_OACTIVE;
 1785 
 1786  out:
 1787         if (error)
 1788                 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
 1789         return (error);
 1790 }
 1791 
 1792 /*
 1793  * pcn_rxdrain:
 1794  *
 1795  *      Drain the receive queue.
 1796  */
 1797 void
 1798 pcn_rxdrain(struct pcn_softc *sc)
 1799 {
 1800         struct pcn_rxsoft *rxs;
 1801         int i;
 1802 
 1803         for (i = 0; i < PCN_NRXDESC; i++) {
 1804                 rxs = &sc->sc_rxsoft[i];
 1805                 if (rxs->rxs_mbuf != NULL) {
 1806                         bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
 1807                         m_freem(rxs->rxs_mbuf);
 1808                         rxs->rxs_mbuf = NULL;
 1809                 }
 1810         }
 1811 }
 1812 
 1813 /*
 1814  * pcn_stop:            [ifnet interface function]
 1815  *
 1816  *      Stop transmission on the interface.
 1817  */
 1818 void
 1819 pcn_stop(struct ifnet *ifp, int disable)
 1820 {
 1821         struct pcn_softc *sc = ifp->if_softc;
 1822         struct pcn_txsoft *txs;
 1823         int i;
 1824 
 1825         if (sc->sc_flags & PCN_F_HAS_MII) {
 1826                 /* Stop the one second clock. */
 1827                 timeout_del(&sc->sc_tick_timeout);
 1828 
 1829                 /* Down the MII. */
 1830                 mii_down(&sc->sc_mii);
 1831         }
 1832 
 1833         /* Mark the interface as down and cancel the watchdog timer. */
 1834         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
 1835         ifp->if_timer = 0;
 1836 
 1837         /* Stop the chip. */
 1838         pcn_csr_write(sc, LE_CSR0, LE_C0_STOP);
 1839 
 1840         /* Release any queued transmit buffers. */
 1841         for (i = 0; i < PCN_TXQUEUELEN; i++) {
 1842                 txs = &sc->sc_txsoft[i];
 1843                 if (txs->txs_mbuf != NULL) {
 1844                         bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
 1845                         m_freem(txs->txs_mbuf);
 1846                         txs->txs_mbuf = NULL;
 1847                 }
 1848         }
 1849 
 1850         if (disable)
 1851                 pcn_rxdrain(sc);
 1852 }
 1853 
 1854 /*
 1855  * pcn_add_rxbuf:
 1856  *
 1857  *      Add a receive buffer to the indicated descriptor.
 1858  */
 1859 int
 1860 pcn_add_rxbuf(struct pcn_softc *sc, int idx)
 1861 {
 1862         struct pcn_rxsoft *rxs = &sc->sc_rxsoft[idx];
 1863         struct mbuf *m;
 1864         int error;
 1865 
 1866         MGETHDR(m, M_DONTWAIT, MT_DATA);
 1867         if (m == NULL)
 1868                 return (ENOBUFS);
 1869 
 1870         MCLGET(m, M_DONTWAIT);
 1871         if ((m->m_flags & M_EXT) == 0) {
 1872                 m_freem(m);
 1873                 return (ENOBUFS);
 1874         }
 1875 
 1876         if (rxs->rxs_mbuf != NULL)
 1877                 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
 1878 
 1879         rxs->rxs_mbuf = m;
 1880 
 1881         error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
 1882             m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
 1883             BUS_DMA_READ|BUS_DMA_NOWAIT);
 1884         if (error) {
 1885                 printf("%s: can't load rx DMA map %d, error = %d\n",
 1886                     sc->sc_dev.dv_xname, idx, error);
 1887                 panic("pcn_add_rxbuf");
 1888         }
 1889 
 1890         bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
 1891             rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
 1892 
 1893         PCN_INIT_RXDESC(sc, idx);
 1894 
 1895         return (0);
 1896 }
 1897 
 1898 /*
 1899  * pcn_set_filter:
 1900  *
 1901  *      Set up the receive filter.
 1902  */
 1903 void
 1904 pcn_set_filter(struct pcn_softc *sc)
 1905 {
 1906         struct arpcom *ac = &sc->sc_arpcom;
 1907         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
 1908         struct ether_multi *enm;
 1909         struct ether_multistep step;
 1910         uint32_t crc;
 1911 
 1912         /*
 1913          * Set up the multicast address filter by passing all multicast
 1914          * addresses through a CRC generator, and then using the high
 1915          * order 6 bits as an index into the 64-bit logical address
 1916          * filter.  The high order bits select the word, while the rest
 1917          * of the bits select the bit within the word.
 1918          */
 1919 
 1920         if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC)
 1921                 goto allmulti;
 1922 
 1923         sc->sc_initblock.init_ladrf[0] =
 1924             sc->sc_initblock.init_ladrf[1] =
 1925             sc->sc_initblock.init_ladrf[2] =
 1926             sc->sc_initblock.init_ladrf[3] = 0;
 1927 
 1928         ETHER_FIRST_MULTI(step, ac, enm);
 1929         while (enm != NULL) {
 1930                 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
 1931                         /*
 1932                          * We must listen to a range of multicast addresses.
 1933                          * For now, just accept all multicasts, rather than
 1934                          * trying to set only those filter bits needed to match
 1935                          * the range.  (At this time, the only use of address
 1936                          * ranges is for IP multicast routing, for which the
 1937                          * range is big enough to require all bits set.)
 1938                          */
 1939                         goto allmulti;
 1940                 }
 1941 
 1942                 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
 1943 
 1944                 /* Just want the 6 most significant bits. */
 1945                 crc >>= 26;
 1946 
 1947                 /* Set the corresponding bit in the filter. */
 1948                 sc->sc_initblock.init_ladrf[crc >> 4] |=
 1949                     htole16(1 << (crc & 0xf));
 1950 
 1951                 ETHER_NEXT_MULTI(step, enm);
 1952         }
 1953 
 1954         ifp->if_flags &= ~IFF_ALLMULTI;
 1955         return;
 1956 
 1957  allmulti:
 1958         ifp->if_flags |= IFF_ALLMULTI;
 1959         sc->sc_initblock.init_ladrf[0] =
 1960             sc->sc_initblock.init_ladrf[1] =
 1961             sc->sc_initblock.init_ladrf[2] =
 1962             sc->sc_initblock.init_ladrf[3] = 0xffff;
 1963 }
 1964 
 1965 /*
 1966  * pcn_79c970_mediainit:
 1967  *
 1968  *      Initialize media for the Am79c970.
 1969  */
 1970 void
 1971 pcn_79c970_mediainit(struct pcn_softc *sc)
 1972 {
 1973         ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, pcn_79c970_mediachange,
 1974             pcn_79c970_mediastatus);
 1975 
 1976         ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_10_5,
 1977             PORTSEL_AUI, NULL);
 1978         if (sc->sc_variant->pcv_chipid == PARTID_Am79c970A)
 1979                 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_10_5|IFM_FDX,
 1980                     PORTSEL_AUI, NULL);
 1981 
 1982         ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_10_T,
 1983             PORTSEL_10T, NULL);
 1984         if (sc->sc_variant->pcv_chipid == PARTID_Am79c970A)
 1985                 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_10_T|IFM_FDX,
 1986                     PORTSEL_10T, NULL);
 1987 
 1988         ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO,
 1989             0, NULL);
 1990         if (sc->sc_variant->pcv_chipid == PARTID_Am79c970A)
 1991                 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO|IFM_FDX,
 1992                     0, NULL);
 1993 
 1994         ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
 1995 }
 1996 
 1997 /*
 1998  * pcn_79c970_mediastatus:      [ifmedia interface function]
 1999  *
 2000  *      Get the current interface media status (Am79c970 version).
 2001  */
 2002 void
 2003 pcn_79c970_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
 2004 {
 2005         struct pcn_softc *sc = ifp->if_softc;
 2006 
 2007         /*
 2008          * The currently selected media is always the active media.
 2009          * Note: We have no way to determine what media the AUTO
 2010          * process picked.
 2011          */
 2012         ifmr->ifm_active = sc->sc_mii.mii_media.ifm_media;
 2013 }
 2014 
 2015 /*
 2016  * pcn_79c970_mediachange:      [ifmedia interface function]
 2017  *
 2018  *      Set hardware to newly-selected media (Am79c970 version).
 2019  */
 2020 int
 2021 pcn_79c970_mediachange(struct ifnet *ifp)
 2022 {
 2023         struct pcn_softc *sc = ifp->if_softc;
 2024         uint32_t reg;
 2025 
 2026         if (IFM_SUBTYPE(sc->sc_mii.mii_media.ifm_media) == IFM_AUTO) {
 2027                 /*
 2028                  * CSR15:PORTSEL doesn't matter.  Just set BCR2:ASEL.
 2029                  */
 2030                 reg = pcn_bcr_read(sc, LE_BCR2);
 2031                 reg |= LE_B2_ASEL;
 2032                 pcn_bcr_write(sc, LE_BCR2, reg);
 2033         } else {
 2034                 /*
 2035                  * Clear BCR2:ASEL and set the new CSR15:PORTSEL value.
 2036                  */
 2037                 reg = pcn_bcr_read(sc, LE_BCR2);
 2038                 reg &= ~LE_B2_ASEL;
 2039                 pcn_bcr_write(sc, LE_BCR2, reg);
 2040 
 2041                 reg = pcn_csr_read(sc, LE_CSR15);
 2042                 reg = (reg & ~LE_C15_PORTSEL(PORTSEL_MASK)) |
 2043                     LE_C15_PORTSEL(sc->sc_mii.mii_media.ifm_cur->ifm_data);
 2044                 pcn_csr_write(sc, LE_CSR15, reg);
 2045         }
 2046 
 2047         if ((sc->sc_mii.mii_media.ifm_media & IFM_FDX) != 0) {
 2048                 reg = LE_B9_FDEN;
 2049                 if (IFM_SUBTYPE(sc->sc_mii.mii_media.ifm_media) == IFM_10_5)
 2050                         reg |= LE_B9_AUIFD;
 2051                 pcn_bcr_write(sc, LE_BCR9, reg);
 2052         } else
 2053                 pcn_bcr_write(sc, LE_BCR9, 0);
 2054 
 2055         return (0);
 2056 }
 2057 
 2058 /*
 2059  * pcn_79c971_mediainit:
 2060  *
 2061  *      Initialize media for the Am79c971.
 2062  */
 2063 void
 2064 pcn_79c971_mediainit(struct pcn_softc *sc)
 2065 {
 2066         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
 2067 
 2068         /* We have MII. */
 2069         sc->sc_flags |= PCN_F_HAS_MII;
 2070 
 2071         /*
 2072          * The built-in 10BASE-T interface is mapped to the MII
 2073          * on the PCNet-FAST.  Unfortunately, there's no EEPROM
 2074          * word that tells us which PHY to use. 
 2075          * This driver used to ignore all but the first PHY to 
 2076          * answer, but this code was removed to support multiple 
 2077          * external PHYs. As the default instance will be the first
 2078          * one to answer, no harm is done by letting the possibly
 2079          * non-connected internal PHY show up.
 2080          */
 2081 
 2082         /* Initialize our media structures and probe the MII. */
 2083         sc->sc_mii.mii_ifp = ifp;
 2084         sc->sc_mii.mii_readreg = pcn_mii_readreg;
 2085         sc->sc_mii.mii_writereg = pcn_mii_writereg;
 2086         sc->sc_mii.mii_statchg = pcn_mii_statchg;
 2087         ifmedia_init(&sc->sc_mii.mii_media, 0, pcn_79c971_mediachange,
 2088             pcn_79c971_mediastatus);
 2089 
 2090         mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
 2091             MII_OFFSET_ANY, 0);
 2092         if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
 2093                 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
 2094                 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
 2095         } else
 2096                 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
 2097 }
 2098 
 2099 /*
 2100  * pcn_79c971_mediastatus:      [ifmedia interface function]
 2101  *
 2102  *      Get the current interface media status (Am79c971 version).
 2103  */
 2104 void
 2105 pcn_79c971_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
 2106 {
 2107         struct pcn_softc *sc = ifp->if_softc;
 2108 
 2109         mii_pollstat(&sc->sc_mii);
 2110         ifmr->ifm_status = sc->sc_mii.mii_media_status;
 2111         ifmr->ifm_active = sc->sc_mii.mii_media_active;
 2112 }
 2113 
 2114 /*
 2115  * pcn_79c971_mediachange:      [ifmedia interface function]
 2116  *
 2117  *      Set hardware to newly-selected media (Am79c971 version).
 2118  */
 2119 int
 2120 pcn_79c971_mediachange(struct ifnet *ifp)
 2121 {
 2122         struct pcn_softc *sc = ifp->if_softc;
 2123 
 2124         if (ifp->if_flags & IFF_UP)
 2125                 mii_mediachg(&sc->sc_mii);
 2126         return (0);
 2127 }
 2128 
 2129 /*
 2130  * pcn_mii_readreg:     [mii interface function]
 2131  *
 2132  *      Read a PHY register on the MII.
 2133  */
 2134 int
 2135 pcn_mii_readreg(struct device *self, int phy, int reg)
 2136 {
 2137         struct pcn_softc *sc = (void *) self;
 2138         uint32_t rv;
 2139 
 2140         pcn_bcr_write(sc, LE_BCR33, reg | (phy << PHYAD_SHIFT));
 2141         rv = pcn_bcr_read(sc, LE_BCR34) & LE_B34_MIIMD;
 2142         if (rv == 0xffff)
 2143                 return (0);
 2144 
 2145         return (rv);
 2146 }
 2147 
 2148 /*
 2149  * pcn_mii_writereg:    [mii interface function]
 2150  *
 2151  *      Write a PHY register on the MII.
 2152  */
 2153 void
 2154 pcn_mii_writereg(struct device *self, int phy, int reg, int val)
 2155 {
 2156         struct pcn_softc *sc = (void *) self;
 2157 
 2158         pcn_bcr_write(sc, LE_BCR33, reg | (phy << PHYAD_SHIFT));
 2159         pcn_bcr_write(sc, LE_BCR34, val);
 2160 }
 2161 
 2162 /*
 2163  * pcn_mii_statchg:     [mii interface function]
 2164  *
 2165  *      Callback from MII layer when media changes.
 2166  */
 2167 void
 2168 pcn_mii_statchg(struct device *self)
 2169 {
 2170         struct pcn_softc *sc = (void *) self;
 2171 
 2172         if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0)
 2173                 pcn_bcr_write(sc, LE_BCR9, LE_B9_FDEN);
 2174         else
 2175                 pcn_bcr_write(sc, LE_BCR9, 0);
 2176 }

/* [<][>][^][v][top][bottom][index][help] */