root/dev/ic/aic6915.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. sf_reg_read
  2. sf_reg_write
  3. sf_attach
  4. sf_shutdown
  5. sf_start
  6. sf_watchdog
  7. sf_ioctl
  8. sf_intr
  9. sf_txintr
  10. sf_rxintr
  11. sf_tick
  12. sf_stats_update
  13. sf_reset
  14. sf_macreset
  15. sf_init
  16. sf_rxdrain
  17. sf_stop
  18. sf_read_eeprom
  19. sf_add_rxbuf
  20. sf_set_filter_perfect
  21. sf_set_filter_hash
  22. sf_set_filter
  23. sf_mii_read
  24. sf_mii_write
  25. sf_mii_statchg
  26. sf_mediastatus
  27. sf_mediachange

    1 /*      $OpenBSD: aic6915.c,v 1.3 2006/12/15 15:28:27 martin Exp $      */
    2 /*      $NetBSD: aic6915.c,v 1.15 2005/12/24 20:27:29 perry Exp $       */
    3 
    4 /*-
    5  * Copyright (c) 2001 The NetBSD Foundation, Inc.
    6  * All rights reserved.
    7  *
    8  * This code is derived from software contributed to The NetBSD Foundation
    9  * by Jason R. Thorpe.
   10  *
   11  * Redistribution and use in source and binary forms, with or without
   12  * modification, are permitted provided that the following conditions
   13  * are met:
   14  * 1. Redistributions of source code must retain the above copyright
   15  *    notice, this list of conditions and the following disclaimer.
   16  * 2. Redistributions in binary form must reproduce the above copyright
   17  *    notice, this list of conditions and the following disclaimer in the
   18  *    documentation and/or other materials provided with the distribution.
   19  * 3. All advertising materials mentioning features or use of this software
   20  *    must display the following acknowledgement:
   21  *      This product includes software developed by the NetBSD
   22  *      Foundation, Inc. and its contributors.
   23  * 4. Neither the name of The NetBSD Foundation nor the names of its
   24  *    contributors may be used to endorse or promote products derived
   25  *    from this software without specific prior written permission.
   26  *
   27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   37  * POSSIBILITY OF SUCH DAMAGE.
   38  */
   39 
   40 /*
   41  * Device driver for the Adaptec AIC-6915 (``Starfire'')
   42  * 10/100 Ethernet controller.
   43  */
   44 
   45 #include "bpfilter.h"
   46 
   47 #include <sys/param.h>
   48 #include <sys/endian.h>
   49 #include <sys/systm.h>
   50 #include <sys/timeout.h>
   51 #include <sys/mbuf.h>
   52 #include <sys/malloc.h>
   53 #include <sys/kernel.h>
   54 #include <sys/socket.h>
   55 #include <sys/ioctl.h>
   56 #include <sys/errno.h>
   57 #include <sys/device.h>
   58 
   59 #include <uvm/uvm_extern.h>
   60 
   61 #include <net/if.h>
   62 #include <net/if_dl.h>
   63 
   64 #ifdef INET
   65 #include <netinet/in.h>
   66 #include <netinet/in_systm.h>
   67 #include <netinet/in_var.h>
   68 #include <netinet/ip.h>
   69 #include <netinet/if_ether.h>
   70 #endif
   71 
   72 #include <net/if_media.h>
   73 
   74 #if NBPFILTER > 0
   75 #include <net/bpf.h>
   76 #endif
   77 
   78 #include <machine/bus.h>
   79 #include <machine/intr.h>
   80 
   81 #include <dev/mii/miivar.h>
   82 
   83 #include <dev/ic/aic6915.h>
   84 
   85 void    sf_start(struct ifnet *);
   86 void    sf_watchdog(struct ifnet *);
   87 int     sf_ioctl(struct ifnet *, u_long, caddr_t);
   88 int     sf_init(struct ifnet *);
   89 void    sf_stop(struct ifnet *, int);
   90 
   91 void    sf_shutdown(void *);
   92 
   93 void    sf_txintr(struct sf_softc *);
   94 void    sf_rxintr(struct sf_softc *);
   95 void    sf_stats_update(struct sf_softc *);
   96 
   97 void    sf_reset(struct sf_softc *);
   98 void    sf_macreset(struct sf_softc *);
   99 void    sf_rxdrain(struct sf_softc *);
  100 int     sf_add_rxbuf(struct sf_softc *, int);
  101 uint8_t sf_read_eeprom(struct sf_softc *, int);
  102 void    sf_set_filter(struct sf_softc *);
  103 
  104 int     sf_mii_read(struct device *, int, int);
  105 void    sf_mii_write(struct device *, int, int, int);
  106 void    sf_mii_statchg(struct device *);
  107 
  108 void    sf_tick(void *);
  109 
  110 int     sf_mediachange(struct ifnet *);
  111 void    sf_mediastatus(struct ifnet *, struct ifmediareq *);
  112 
  113 uint32_t sf_reg_read(struct sf_softc *, bus_addr_t);
  114 void    sf_reg_write(struct sf_softc *, bus_addr_t , uint32_t);
  115 
  116 void    sf_set_filter_perfect(struct sf_softc *, int , uint8_t *);
  117 void    sf_set_filter_hash(struct sf_softc *, uint8_t *);
  118 
  119 struct cfdriver sf_cd = {
  120         NULL, "sf", DV_IFNET
  121 };
  122 
  123 #define sf_funcreg_read(sc, reg)                                        \
  124         bus_space_read_4((sc)->sc_st, (sc)->sc_sh_func, (reg))
  125 #define sf_funcreg_write(sc, reg, val)                                  \
  126         bus_space_write_4((sc)->sc_st, (sc)->sc_sh_func, (reg), (val))
  127 
  128 uint32_t
  129 sf_reg_read(struct sf_softc *sc, bus_addr_t reg)
  130 {
  131 
  132         if (__predict_false(sc->sc_iomapped)) {
  133                 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess,
  134                     reg);
  135                 return (bus_space_read_4(sc->sc_st, sc->sc_sh,
  136                     SF_IndirectIoDataPort));
  137         }
  138 
  139         return (bus_space_read_4(sc->sc_st, sc->sc_sh, reg));
  140 }
  141 
  142 void
  143 sf_reg_write(struct sf_softc *sc, bus_addr_t reg, uint32_t val)
  144 {
  145 
  146         if (__predict_false(sc->sc_iomapped)) {
  147                 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess,
  148                     reg);
  149                 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoDataPort,
  150                     val);
  151                 return;
  152         }
  153 
  154         bus_space_write_4(sc->sc_st, sc->sc_sh, reg, val);
  155 }
  156 
  157 #define sf_genreg_read(sc, reg)                                         \
  158         sf_reg_read((sc), (reg) + SF_GENREG_OFFSET)
  159 #define sf_genreg_write(sc, reg, val)                                   \
  160         sf_reg_write((sc), (reg) + SF_GENREG_OFFSET, (val))
  161 
  162 /*
  163  * sf_attach:
  164  *
  165  *      Attach a Starfire interface to the system.
  166  */
  167 void
  168 sf_attach(struct sf_softc *sc)
  169 {
  170         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
  171         int i, rseg, error;
  172         bus_dma_segment_t seg;
  173         u_int8_t enaddr[ETHER_ADDR_LEN];
  174 
  175         timeout_set(&sc->sc_mii_timeout, sf_tick, sc);
  176 
  177         /*
  178          * If we're I/O mapped, the functional register handle is
  179          * the same as the base handle.  If we're memory mapped,
  180          * carve off a chunk of the register space for the functional
  181          * registers, to save on arithmetic later.
  182          */
  183         if (sc->sc_iomapped)
  184                 sc->sc_sh_func = sc->sc_sh;
  185         else {
  186                 if ((error = bus_space_subregion(sc->sc_st, sc->sc_sh,
  187                     SF_GENREG_OFFSET, SF_FUNCREG_SIZE, &sc->sc_sh_func)) != 0) {
  188                         printf("%s: unable to sub-region functional "
  189                             "registers, error = %d\n", sc->sc_dev.dv_xname,
  190                             error);
  191                         return;
  192                 }
  193         }
  194 
  195         /*
  196          * Initialize the transmit threshold for this interface.  The
  197          * manual describes the default as 4 * 16 bytes.  We start out
  198          * at 10 * 16 bytes, to avoid a bunch of initial underruns on
  199          * several platforms.
  200          */
  201         sc->sc_txthresh = 10;
  202 
  203         /*
  204          * Allocate the control data structures, and create and load the
  205          * DMA map for it.
  206          */
  207         if ((error = bus_dmamem_alloc(sc->sc_dmat,
  208             sizeof(struct sf_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
  209             BUS_DMA_NOWAIT)) != 0) {
  210                 printf("%s: unable to allocate control data, error = %d\n",
  211                     sc->sc_dev.dv_xname, error);
  212                 goto fail_0;
  213         }
  214 
  215         if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
  216             sizeof(struct sf_control_data), (caddr_t *)&sc->sc_control_data,
  217             BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
  218                 printf("%s: unable to map control data, error = %d\n",
  219                     sc->sc_dev.dv_xname, error);
  220                 goto fail_1;
  221         }
  222 
  223         if ((error = bus_dmamap_create(sc->sc_dmat,
  224             sizeof(struct sf_control_data), 1,
  225             sizeof(struct sf_control_data), 0, BUS_DMA_NOWAIT,
  226             &sc->sc_cddmamap)) != 0) {
  227                 printf("%s: unable to create control data DMA map, "
  228                     "error = %d\n", sc->sc_dev.dv_xname, error);
  229                 goto fail_2;
  230         }
  231 
  232         if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
  233             sc->sc_control_data, sizeof(struct sf_control_data), NULL,
  234             BUS_DMA_NOWAIT)) != 0) {
  235                 printf("%s: unable to load control data DMA map, error = %d\n",
  236                     sc->sc_dev.dv_xname, error);
  237                 goto fail_3;
  238         }
  239 
  240         /*
  241          * Create the transmit buffer DMA maps.
  242          */
  243         for (i = 0; i < SF_NTXDESC; i++) {
  244                 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
  245                     SF_NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
  246                     &sc->sc_txsoft[i].ds_dmamap)) != 0) {
  247                         printf("%s: unable to create tx DMA map %d, "
  248                             "error = %d\n", sc->sc_dev.dv_xname, i, error);
  249                         goto fail_4;
  250                 }
  251         }
  252 
  253         /*
  254          * Create the receive buffer DMA maps.
  255          */
  256         for (i = 0; i < SF_NRXDESC; i++) {
  257                 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
  258                     MCLBYTES, 0, BUS_DMA_NOWAIT,
  259                     &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
  260                         printf("%s: unable to create rx DMA map %d, "
  261                             "error = %d\n", sc->sc_dev.dv_xname, i, error);
  262                         goto fail_5;
  263                 }
  264         }
  265 
  266         /*
  267          * Reset the chip to a known state.
  268          */
  269         sf_reset(sc);
  270 
  271         /*
  272          * Read the Ethernet address from the EEPROM.
  273          */
  274         for (i = 0; i < ETHER_ADDR_LEN; i++)
  275                 enaddr[i] = sf_read_eeprom(sc, (15 + (ETHER_ADDR_LEN - 1)) - i);
  276 
  277         printf(", address %s\n", ether_sprintf(enaddr));
  278 
  279 #ifdef DEBUG
  280         if (sf_funcreg_read(sc, SF_PciDeviceConfig) & PDC_System64)
  281                 printf("%s: 64-bit PCI slot detected\n", sc->sc_dev.dv_xname);
  282 #endif
  283 
  284         /*
  285          * Initialize our media structures and probe the MII.
  286          */
  287         sc->sc_mii.mii_ifp = ifp;
  288         sc->sc_mii.mii_readreg = sf_mii_read;
  289         sc->sc_mii.mii_writereg = sf_mii_write;
  290         sc->sc_mii.mii_statchg = sf_mii_statchg;
  291         ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, sf_mediachange,
  292             sf_mediastatus);
  293         mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
  294             MII_OFFSET_ANY, 0);
  295         if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
  296                 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
  297                 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
  298         } else
  299                 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
  300         bcopy(enaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
  301         bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
  302         ifp = &sc->sc_arpcom.ac_if;
  303         ifp->if_softc = sc;
  304         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  305         ifp->if_ioctl = sf_ioctl;
  306         ifp->if_start = sf_start;
  307         ifp->if_watchdog = sf_watchdog;
  308         IFQ_SET_MAXLEN(&ifp->if_snd, SF_NTXDESC_MASK);
  309         IFQ_SET_READY(&ifp->if_snd);
  310 
  311         /*
  312          * Attach the interface.
  313          */
  314         if_attach(ifp);
  315         ether_ifattach(ifp);
  316 
  317         /*
  318          * Make sure the interface is shutdown during reboot.
  319          */
  320         sc->sc_sdhook = shutdownhook_establish(sf_shutdown, sc);
  321         if (sc->sc_sdhook == NULL)
  322                 printf("%s: WARNING: unable to establish shutdown hook\n",
  323                     sc->sc_dev.dv_xname);
  324         return;
  325 
  326         /*
  327          * Free any resources we've allocated during the failed attach
  328          * attempt.  Do this in reverse order an fall through.
  329          */
  330  fail_5:
  331         for (i = 0; i < SF_NRXDESC; i++) {
  332                 if (sc->sc_rxsoft[i].ds_dmamap != NULL)
  333                         bus_dmamap_destroy(sc->sc_dmat,
  334                             sc->sc_rxsoft[i].ds_dmamap);
  335         }
  336  fail_4:
  337         for (i = 0; i < SF_NTXDESC; i++) {
  338                 if (sc->sc_txsoft[i].ds_dmamap != NULL)
  339                         bus_dmamap_destroy(sc->sc_dmat,
  340                             sc->sc_txsoft[i].ds_dmamap);
  341         }
  342         bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
  343  fail_3:
  344         bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
  345  fail_2:
  346         bus_dmamem_unmap(sc->sc_dmat, (caddr_t) sc->sc_control_data,
  347             sizeof(struct sf_control_data));
  348  fail_1:
  349         bus_dmamem_free(sc->sc_dmat, &seg, rseg);
  350  fail_0:
  351         return;
  352 }
  353 
  354 /*
  355  * sf_shutdown:
  356  *
  357  *      Shutdown hook -- make sure the interface is stopped at reboot.
  358  */
  359 void
  360 sf_shutdown(void *arg)
  361 {
  362         struct sf_softc *sc = arg;
  363 
  364         sf_stop(&sc->sc_arpcom.ac_if, 1);
  365 }
  366 
  367 /*
  368  * sf_start:            [ifnet interface function]
  369  *
  370  *      Start packet transmission on the interface.
  371  */
  372 void
  373 sf_start(struct ifnet *ifp)
  374 {
  375         struct sf_softc *sc = ifp->if_softc;
  376         struct mbuf *m0, *m;
  377         struct sf_txdesc0 *txd;
  378         struct sf_descsoft *ds;
  379         bus_dmamap_t dmamap;
  380         int error, producer, last = -1, opending, seg;
  381 
  382         /*
  383          * Remember the previous number of pending transmits.
  384          */
  385         opending = sc->sc_txpending;
  386 
  387         /*
  388          * Find out where we're sitting.
  389          */
  390         producer = SF_TXDINDEX_TO_HOST(
  391             TDQPI_HiPrTxProducerIndex_get(
  392             sf_funcreg_read(sc, SF_TxDescQueueProducerIndex)));
  393 
  394         /*
  395          * Loop through the send queue, setting up transmit descriptors
  396          * until we drain the queue, or use up all available transmit
  397          * descriptors.  Leave a blank one at the end for sanity's sake.
  398          */
  399         while (sc->sc_txpending < (SF_NTXDESC - 1)) {
  400                 /*
  401                  * Grab a packet off the queue.
  402                  */
  403                 IFQ_POLL(&ifp->if_snd, m0);
  404                 if (m0 == NULL)
  405                         break;
  406                 m = NULL;
  407 
  408                 /*
  409                  * Get the transmit descriptor.
  410                  */
  411                 txd = &sc->sc_txdescs[producer];
  412                 ds = &sc->sc_txsoft[producer];
  413                 dmamap = ds->ds_dmamap;
  414 
  415                 /*
  416                  * Load the DMA map.  If this fails, the packet either
  417                  * didn't fit in the allotted number of frags, or we were
  418                  * short on resources.  In this case, we'll copy and try
  419                  * again.
  420                  */
  421                 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
  422                     BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
  423                         MGETHDR(m, M_DONTWAIT, MT_DATA);
  424                         if (m == NULL) {
  425                                 printf("%s: unable to allocate Tx mbuf\n",
  426                                     sc->sc_dev.dv_xname);
  427                                 break;
  428                         }
  429                         if (m0->m_pkthdr.len > MHLEN) {
  430                                 MCLGET(m, M_DONTWAIT);
  431                                 if ((m->m_flags & M_EXT) == 0) {
  432                                         printf("%s: unable to allocate Tx "
  433                                             "cluster\n", sc->sc_dev.dv_xname);
  434                                         m_freem(m);
  435                                         break;
  436                                 }
  437                         }
  438                         m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
  439                         m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
  440                         error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
  441                             m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
  442                         if (error) {
  443                                 printf("%s: unable to load Tx buffer, "
  444                                     "error = %d\n", sc->sc_dev.dv_xname, error);
  445                                 break;
  446                         }
  447                 }
  448 
  449                 /*
  450                  * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
  451                  */
  452                 IFQ_DEQUEUE(&ifp->if_snd, m0);
  453                 if (m != NULL) {
  454                         m_freem(m0);
  455                         m0 = m;
  456                 }
  457 
  458                 /* Initialize the descriptor. */
  459                 txd->td_word0 =
  460                     htole32(TD_W0_ID | TD_W0_CRCEN | m0->m_pkthdr.len);
  461                 if (producer == (SF_NTXDESC - 1))
  462                         txd->td_word0 |= TD_W0_END;
  463                 txd->td_word1 = htole32(dmamap->dm_nsegs);
  464                 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
  465                         txd->td_frags[seg].fr_addr =
  466                             htole32(dmamap->dm_segs[seg].ds_addr);
  467                         txd->td_frags[seg].fr_len =
  468                             htole32(dmamap->dm_segs[seg].ds_len);
  469                 }
  470 
  471                 /* Sync the descriptor and the DMA map. */
  472                 SF_CDTXDSYNC(sc, producer, BUS_DMASYNC_PREWRITE);
  473                 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
  474                     BUS_DMASYNC_PREWRITE);
  475 
  476                 /*
  477                  * Store a pointer to the packet so we can free it later.
  478                  */
  479                 ds->ds_mbuf = m0;
  480 
  481                 /* Advance the Tx pointer. */
  482                 sc->sc_txpending++;
  483                 last = producer;
  484                 producer = SF_NEXTTX(producer);
  485 
  486 #if NBPFILTER > 0
  487                 /*
  488                  * Pass the packet to any BPF listeners.
  489                  */
  490                 if (ifp->if_bpf)
  491                         bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
  492 #endif
  493         }
  494 
  495         if (sc->sc_txpending == (SF_NTXDESC - 1)) {
  496                 /* No more slots left; notify upper layer. */
  497                 ifp->if_flags |= IFF_OACTIVE;
  498         }
  499 
  500         if (sc->sc_txpending != opending) {
  501                 KASSERT(last != -1);
  502                 /*
  503                  * We enqueued packets.  Cause a transmit interrupt to
  504                  * happen on the last packet we enqueued, and give the
  505                  * new descriptors to the chip by writing the new
  506                  * producer index.
  507                  */
  508                 sc->sc_txdescs[last].td_word0 |= TD_W0_INTR;
  509                 SF_CDTXDSYNC(sc, last, BUS_DMASYNC_PREWRITE);
  510 
  511                 sf_funcreg_write(sc, SF_TxDescQueueProducerIndex,
  512                     TDQPI_HiPrTxProducerIndex(SF_TXDINDEX_TO_CHIP(producer)));
  513 
  514                 /* Set a watchdog timer in case the chip flakes out. */
  515                 ifp->if_timer = 5;
  516         }
  517 }
  518 
  519 /*
  520  * sf_watchdog:         [ifnet interface function]
  521  *
  522  *      Watchdog timer handler.
  523  */
  524 void
  525 sf_watchdog(struct ifnet *ifp)
  526 {
  527         struct sf_softc *sc = ifp->if_softc;
  528 
  529         printf("%s: device timeout\n", sc->sc_dev.dv_xname);
  530         ifp->if_oerrors++;
  531 
  532         (void) sf_init(ifp);
  533 
  534         /* Try to get more packets going. */
  535         sf_start(ifp);
  536 }
  537 
  538 /*
  539  * sf_ioctl:            [ifnet interface function]
  540  *
  541  *      Handle control requests from the operator.
  542  */
  543 int
  544 sf_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
  545 {
  546         struct sf_softc *sc = (struct sf_softc *)ifp->if_softc;
  547         struct ifaddr *ifa = (struct ifaddr *)data;
  548         struct ifreq *ifr = (struct ifreq *) data;
  549         int s, error = 0;
  550 
  551         s = splnet();
  552 
  553         if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) {
  554                 splx(s);
  555                 return (error);
  556         }
  557 
  558         switch (cmd) {
  559         case SIOCSIFADDR:
  560                 ifp->if_flags |= IFF_UP;
  561                 if (!(ifp->if_flags & IFF_RUNNING))
  562                         sf_init(ifp);
  563 #ifdef INET
  564                 if (ifa->ifa_addr->sa_family == AF_INET)
  565                         arp_ifinit(&sc->sc_arpcom, ifa);
  566 #endif
  567                 break;
  568 
  569         case SIOCSIFFLAGS:
  570                 if (ifp->if_flags & IFF_UP) {
  571                         if (ifp->if_flags & IFF_RUNNING &&
  572                             ((ifp->if_flags ^ sc->sc_flags) &
  573                              IFF_PROMISC)) {
  574                                 sf_set_filter(sc);
  575                         } else {
  576                                 if (!(ifp->if_flags & IFF_RUNNING))
  577                                         sf_init(ifp);
  578                         }
  579                 } else {
  580                         if (ifp->if_flags & IFF_RUNNING)
  581                                 sf_stop(ifp, 1);
  582                 }
  583                 sc->sc_flags = ifp->if_flags;
  584                 break;
  585 
  586         case SIOCSIFMTU:
  587                 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ifp->if_hardmtu)
  588                         error = EINVAL;
  589                 else if (ifp->if_mtu != ifr->ifr_mtu)
  590                         ifp->if_mtu = ifr->ifr_mtu;
  591                 break;
  592 
  593         case SIOCADDMULTI:
  594         case SIOCDELMULTI:
  595                 ifr = (struct ifreq *)data;
  596                 error = (cmd == SIOCADDMULTI) ?
  597                         ether_addmulti(ifr, &sc->sc_arpcom) :
  598                         ether_delmulti(ifr, &sc->sc_arpcom);
  599 
  600                 if (error == ENETRESET) {
  601                         if (ifp->if_flags & IFF_RUNNING)
  602                                 sf_set_filter(sc);
  603                         error = 0;
  604                 }
  605                 break;
  606 
  607         case SIOCGIFMEDIA:
  608         case SIOCSIFMEDIA:
  609                 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
  610                 break;
  611 
  612         default:
  613                 error = ENOTTY;
  614         }
  615 
  616         /* Try to get more packets going. */
  617         sf_start(ifp);
  618 
  619         splx(s);
  620         return (error);
  621 }
  622 
  623 /*
  624  * sf_intr:
  625  *
  626  *      Interrupt service routine.
  627  */
  628 int
  629 sf_intr(void *arg)
  630 {
  631         struct sf_softc *sc = arg;
  632         uint32_t isr;
  633         int handled = 0, wantinit = 0;
  634 
  635         for (;;) {
  636                 /* Reading clears all interrupts we're interested in. */
  637                 isr = sf_funcreg_read(sc, SF_InterruptStatus);
  638                 if ((isr & IS_PCIPadInt) == 0)
  639                         break;
  640 
  641                 handled = 1;
  642 
  643                 /* Handle receive interrupts. */
  644                 if (isr & IS_RxQ1DoneInt)
  645                         sf_rxintr(sc);
  646 
  647                 /* Handle transmit completion interrupts. */
  648                 if (isr & (IS_TxDmaDoneInt|IS_TxQueueDoneInt))
  649                         sf_txintr(sc);
  650 
  651                 /* Handle abnormal interrupts. */
  652                 if (isr & IS_AbnormalInterrupt) {
  653                         /* Statistics. */
  654                         if (isr & IS_StatisticWrapInt)
  655                                 sf_stats_update(sc);
  656 
  657                         /* DMA errors. */
  658                         if (isr & IS_DmaErrInt) {
  659                                 wantinit = 1;
  660                                 printf("%s: WARNING: DMA error\n",
  661                                     sc->sc_dev.dv_xname);
  662                         }
  663 
  664                         /* Transmit FIFO underruns. */
  665                         if (isr & IS_TxDataLowInt) {
  666                                 if (sc->sc_txthresh < 0xff)
  667                                         sc->sc_txthresh++;
  668 #ifdef DEBUG
  669                                 printf("%s: transmit FIFO underrun, new "
  670                                     "threshold: %d bytes\n",
  671                                     sc->sc_dev.dv_xname,
  672                                     sc->sc_txthresh * 16);
  673 #endif
  674                                 sf_funcreg_write(sc, SF_TransmitFrameCSR,
  675                                     sc->sc_TransmitFrameCSR |
  676                                     TFCSR_TransmitThreshold(sc->sc_txthresh));
  677                                 sf_funcreg_write(sc, SF_TxDescQueueCtrl,
  678                                     sc->sc_TxDescQueueCtrl |
  679                                     TDQC_TxHighPriorityFifoThreshold(
  680                                                         sc->sc_txthresh));
  681                         }
  682                 }
  683         }
  684 
  685         if (handled) {
  686                 /* Reset the interface, if necessary. */
  687                 if (wantinit)
  688                         sf_init(&sc->sc_arpcom.ac_if);
  689 
  690                 /* Try and get more packets going. */
  691                 sf_start(&sc->sc_arpcom.ac_if);
  692         }
  693 
  694         return (handled);
  695 }
  696 
  697 /*
  698  * sf_txintr:
  699  *
  700  *      Helper -- handle transmit completion interrupts.
  701  */
  702 void
  703 sf_txintr(struct sf_softc *sc)
  704 {
  705         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
  706         struct sf_descsoft *ds;
  707         uint32_t cqci, tcd;
  708         int consumer, producer, txidx;
  709 
  710  try_again:
  711         cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex);
  712 
  713         consumer = CQCI_TxCompletionConsumerIndex_get(cqci);
  714         producer = CQPI_TxCompletionProducerIndex_get(
  715             sf_funcreg_read(sc, SF_CompletionQueueProducerIndex));
  716 
  717         if (consumer == producer)
  718                 return;
  719 
  720         ifp->if_flags &= ~IFF_OACTIVE;
  721 
  722         while (consumer != producer) {
  723                 SF_CDTXCSYNC(sc, consumer, BUS_DMASYNC_POSTREAD);
  724                 tcd = letoh32(sc->sc_txcomp[consumer].tcd_word0);
  725 
  726                 txidx = SF_TCD_INDEX_TO_HOST(TCD_INDEX(tcd));
  727 #ifdef DIAGNOSTIC
  728                 if ((tcd & TCD_PR) == 0)
  729                         printf("%s: Tx queue mismatch, index %d\n",
  730                             sc->sc_dev.dv_xname, txidx);
  731 #endif
  732                 /*
  733                  * NOTE: stats are updated later.  We're just
  734                  * releasing packets that have been DMA'd to
  735                  * the chip.
  736                  */
  737                 ds = &sc->sc_txsoft[txidx];
  738                 SF_CDTXDSYNC(sc, txidx, BUS_DMASYNC_POSTWRITE);
  739                 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
  740                     0, ds->ds_dmamap->dm_mapsize,
  741                     BUS_DMASYNC_POSTWRITE);
  742                 m_freem(ds->ds_mbuf);
  743                 ds->ds_mbuf = NULL;
  744 
  745                 consumer = SF_NEXTTCD(consumer);
  746                 sc->sc_txpending--;
  747         }
  748 
  749         /* XXXJRT -- should be KDASSERT() */
  750         KASSERT(sc->sc_txpending >= 0);
  751 
  752         /* If all packets are done, cancel the watchdog timer. */
  753         if (sc->sc_txpending == 0)
  754                 ifp->if_timer = 0;
  755 
  756         /* Update the consumer index. */
  757         sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
  758             (cqci & ~CQCI_TxCompletionConsumerIndex(0x7ff)) |
  759              CQCI_TxCompletionConsumerIndex(consumer));
  760 
  761         /* Double check for new completions. */
  762         goto try_again;
  763 }
  764 
  765 /*
  766  * sf_rxintr:
  767  *
  768  *      Helper -- handle receive interrupts.
  769  */
  770 void
  771 sf_rxintr(struct sf_softc *sc)
  772 {
  773         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
  774         struct sf_descsoft *ds;
  775         struct sf_rcd_full *rcd;
  776         struct mbuf *m;
  777         uint32_t cqci, word0;
  778         int consumer, producer, bufproducer, rxidx, len;
  779 
  780  try_again:
  781         cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex);
  782 
  783         consumer = CQCI_RxCompletionQ1ConsumerIndex_get(cqci);
  784         producer = CQPI_RxCompletionQ1ProducerIndex_get(
  785             sf_funcreg_read(sc, SF_CompletionQueueProducerIndex));
  786         bufproducer = RXQ1P_RxDescQ1Producer_get(
  787             sf_funcreg_read(sc, SF_RxDescQueue1Ptrs));
  788 
  789         if (consumer == producer)
  790                 return;
  791 
  792         while (consumer != producer) {
  793                 rcd = &sc->sc_rxcomp[consumer];
  794                 SF_CDRXCSYNC(sc, consumer,
  795                     BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
  796                 SF_CDRXCSYNC(sc, consumer,
  797                     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  798 
  799                 word0 = letoh32(rcd->rcd_word0);
  800                 rxidx = RCD_W0_EndIndex(word0);
  801 
  802                 ds = &sc->sc_rxsoft[rxidx];
  803 
  804                 consumer = SF_NEXTRCD(consumer);
  805                 bufproducer = SF_NEXTRX(bufproducer);
  806 
  807                 if ((word0 & RCD_W0_OK) == 0) {
  808                         SF_INIT_RXDESC(sc, rxidx);
  809                         continue;
  810                 }
  811 
  812                 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
  813                     ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
  814 
  815                 /*
  816                  * No errors; receive the packet.  Note that we have
  817                  * configured the Starfire to NOT transfer the CRC
  818                  * with the packet.
  819                  */
  820                 len = RCD_W0_Length(word0);
  821 
  822 #ifndef __STRICT_ALIGNMENT
  823                 /*
  824                  * Allocate a new mbuf cluster.  If that fails, we are
  825                  * out of memory, and must drop the packet and recycle
  826                  * the buffer that's already attached to this descriptor.
  827                  */
  828                 m = ds->ds_mbuf;
  829                 if (sf_add_rxbuf(sc, rxidx) != 0) {
  830                         ifp->if_ierrors++;
  831                         SF_INIT_RXDESC(sc, rxidx);
  832                         bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
  833                             ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
  834                         continue;
  835                 }
  836 #else
  837                 /*
  838                  * The Starfire's receive buffer must be 4-byte aligned.
  839                  * But this means that the data after the Ethernet header
  840                  * is misaligned.  We must allocate a new buffer and
  841                  * copy the data, shifted forward 2 bytes.
  842                  */
  843                 MGETHDR(m, M_DONTWAIT, MT_DATA);
  844                 if (m == NULL) {
  845  dropit:
  846                         ifp->if_ierrors++;
  847                         SF_INIT_RXDESC(sc, rxidx);
  848                         bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
  849                             ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
  850                         continue;
  851                 }
  852                 if (len > (MHLEN - 2)) {
  853                         MCLGET(m, M_DONTWAIT);
  854                         if ((m->m_flags & M_EXT) == 0) {
  855                                 m_freem(m);
  856                                 goto dropit;
  857                         }
  858                 }
  859                 m->m_data += 2;
  860 
  861                 /*
  862                  * Note that we use cluster for incoming frames, so the
  863                  * buffer is virtually contiguous.
  864                  */
  865                 memcpy(mtod(m, caddr_t), mtod(ds->ds_mbuf, caddr_t), len);
  866 
  867                 /* Allow the receive descriptor to continue using its mbuf. */
  868                 SF_INIT_RXDESC(sc, rxidx);
  869                 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
  870                     ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
  871 #endif /* __STRICT_ALIGNMENT */
  872 
  873                 m->m_pkthdr.rcvif = ifp;
  874                 m->m_pkthdr.len = m->m_len = len;
  875 
  876 #if NBPFILTER > 0
  877                 /*
  878                  * Pass this up to any BPF listeners.
  879                  */
  880                 if (ifp->if_bpf)
  881                         bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
  882 #endif /* NBPFILTER > 0 */
  883 
  884                 /* Pass it on. */
  885                 ether_input_mbuf(ifp, m);
  886                 ifp->if_ipackets++;
  887         }
  888 
  889         /* Update the chip's pointers. */
  890         sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
  891             (cqci & ~CQCI_RxCompletionQ1ConsumerIndex(0x7ff)) |
  892              CQCI_RxCompletionQ1ConsumerIndex(consumer));
  893         sf_funcreg_write(sc, SF_RxDescQueue1Ptrs,
  894             RXQ1P_RxDescQ1Producer(bufproducer));
  895 
  896         /* Double-check for any new completions. */
  897         goto try_again;
  898 }
  899 
  900 /*
  901  * sf_tick:
  902  *
  903  *      One second timer, used to tick the MII and update stats.
  904  */
  905 void
  906 sf_tick(void *arg)
  907 {
  908         struct sf_softc *sc = arg;
  909         int s;
  910 
  911         s = splnet();
  912         mii_tick(&sc->sc_mii);
  913         sf_stats_update(sc);
  914         splx(s);
  915 
  916         timeout_add(&sc->sc_mii_timeout, hz);
  917 }
  918 
  919 /*
  920  * sf_stats_update:
  921  *
  922  *      Read the statitistics counters.
  923  */
  924 void
  925 sf_stats_update(struct sf_softc *sc)
  926 {
  927         struct sf_stats stats;
  928         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
  929         uint32_t *p;
  930         u_int i;
  931 
  932         p = &stats.TransmitOKFrames;
  933         for (i = 0; i < (sizeof(stats) / sizeof(uint32_t)); i++) {
  934                 *p++ = sf_genreg_read(sc,
  935                     SF_STATS_BASE + (i * sizeof(uint32_t)));
  936                 sf_genreg_write(sc, SF_STATS_BASE + (i * sizeof(uint32_t)), 0);
  937         }
  938 
  939         ifp->if_opackets += stats.TransmitOKFrames;
  940 
  941         ifp->if_collisions += stats.SingleCollisionFrames +
  942             stats.MultipleCollisionFrames;
  943 
  944         ifp->if_oerrors += stats.TransmitAbortDueToExcessiveCollisions +
  945             stats.TransmitAbortDueToExcessingDeferral +
  946             stats.FramesLostDueToInternalTransmitErrors;
  947 
  948         ifp->if_ipackets += stats.ReceiveOKFrames;
  949 
  950         ifp->if_ierrors += stats.ReceiveCRCErrors + stats.AlignmentErrors +
  951             stats.ReceiveFramesTooLong + stats.ReceiveFramesTooShort +
  952             stats.ReceiveFramesJabbersError +
  953             stats.FramesLostDueToInternalReceiveErrors;
  954 }
  955 
  956 /*
  957  * sf_reset:
  958  *
  959  *      Perform a soft reset on the Starfire.
  960  */
  961 void
  962 sf_reset(struct sf_softc *sc)
  963 {
  964         int i;
  965 
  966         sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0);
  967 
  968         sf_macreset(sc);
  969 
  970         sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_SoftReset);
  971         for (i = 0; i < 1000; i++) {
  972                 delay(10);
  973                 if ((sf_funcreg_read(sc, SF_PciDeviceConfig) &
  974                      PDC_SoftReset) == 0)
  975                         break;
  976         }
  977 
  978         if (i == 1000) {
  979                 printf("%s: reset failed to complete\n", sc->sc_dev.dv_xname);
  980                 sf_funcreg_write(sc, SF_PciDeviceConfig, 0);
  981         }
  982 
  983         delay(1000);
  984 }
  985 
  986 /*
  987  * sf_macreset:
  988  *
  989  *      Reset the MAC portion of the Starfire.
  990  */
  991 void
  992 sf_macreset(struct sf_softc *sc)
  993 {
  994 
  995         sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1 | MC1_SoftRst);
  996         delay(1000);
  997         sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1);
  998 }
  999 
 1000 /*
 1001  * sf_init:             [ifnet interface function]
 1002  *
 1003  *      Initialize the interface.  Must be called at splnet().
 1004  */
 1005 int
 1006 sf_init(struct ifnet *ifp)
 1007 {
 1008         struct sf_softc *sc = ifp->if_softc;
 1009         struct sf_descsoft *ds;
 1010         int error = 0;
 1011         u_int i;
 1012 
 1013         /*
 1014          * Cancel any pending I/O.
 1015          */
 1016         sf_stop(ifp, 0);
 1017 
 1018         /*
 1019          * Reset the Starfire to a known state.
 1020          */
 1021         sf_reset(sc);
 1022 
 1023         /* Clear the stat counters. */
 1024         for (i = 0; i < sizeof(struct sf_stats); i += sizeof(uint32_t))
 1025                 sf_genreg_write(sc, SF_STATS_BASE + i, 0);
 1026 
 1027         /*
 1028          * Initialize the transmit descriptor ring.
 1029          */
 1030         memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
 1031         sf_funcreg_write(sc, SF_TxDescQueueHighAddr, 0);
 1032         sf_funcreg_write(sc, SF_HiPrTxDescQueueBaseAddr, SF_CDTXDADDR(sc, 0));
 1033         sf_funcreg_write(sc, SF_LoPrTxDescQueueBaseAddr, 0);
 1034 
 1035         /*
 1036          * Initialize the transmit completion ring.
 1037          */
 1038         for (i = 0; i < SF_NTCD; i++) {
 1039                 sc->sc_txcomp[i].tcd_word0 = TCD_DMA_ID;
 1040                 SF_CDTXCSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 1041         }
 1042         sf_funcreg_write(sc, SF_CompletionQueueHighAddr, 0);
 1043         sf_funcreg_write(sc, SF_TxCompletionQueueCtrl, SF_CDTXCADDR(sc, 0));
 1044 
 1045         /*
 1046          * Initialize the receive descriptor ring.
 1047          */
 1048         for (i = 0; i < SF_NRXDESC; i++) {
 1049                 ds = &sc->sc_rxsoft[i];
 1050                 if (ds->ds_mbuf == NULL) {
 1051                         if ((error = sf_add_rxbuf(sc, i)) != 0) {
 1052                                 printf("%s: unable to allocate or map rx "
 1053                                     "buffer %d, error = %d\n",
 1054                                     sc->sc_dev.dv_xname, i, error);
 1055                                 /*
 1056                                  * XXX Should attempt to run with fewer receive
 1057                                  * XXX buffers instead of just failing.
 1058                                  */
 1059                                 sf_rxdrain(sc);
 1060                                 goto out;
 1061                         }
 1062                 } else
 1063                         SF_INIT_RXDESC(sc, i);
 1064         }
 1065         sf_funcreg_write(sc, SF_RxDescQueueHighAddress, 0);
 1066         sf_funcreg_write(sc, SF_RxDescQueue1LowAddress, SF_CDRXDADDR(sc, 0));
 1067         sf_funcreg_write(sc, SF_RxDescQueue2LowAddress, 0);
 1068 
 1069         /*
 1070          * Initialize the receive completion ring.
 1071          */
 1072         for (i = 0; i < SF_NRCD; i++) {
 1073                 sc->sc_rxcomp[i].rcd_word0 = RCD_W0_ID;
 1074                 sc->sc_rxcomp[i].rcd_word1 = 0;
 1075                 sc->sc_rxcomp[i].rcd_word2 = 0;
 1076                 sc->sc_rxcomp[i].rcd_timestamp = 0;
 1077                 SF_CDRXCSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 1078         }
 1079         sf_funcreg_write(sc, SF_RxCompletionQueue1Ctrl, SF_CDRXCADDR(sc, 0) |
 1080             RCQ1C_RxCompletionQ1Type(3));
 1081         sf_funcreg_write(sc, SF_RxCompletionQueue2Ctrl, 0);
 1082 
 1083         /*
 1084          * Initialize the Tx CSR.
 1085          */
 1086         sc->sc_TransmitFrameCSR = 0;
 1087         sf_funcreg_write(sc, SF_TransmitFrameCSR,
 1088             sc->sc_TransmitFrameCSR |
 1089             TFCSR_TransmitThreshold(sc->sc_txthresh));
 1090 
 1091         /*
 1092          * Initialize the Tx descriptor control register.
 1093          */
 1094         sc->sc_TxDescQueueCtrl = TDQC_SkipLength(0) |
 1095             TDQC_TxDmaBurstSize(4) |    /* default */
 1096             TDQC_MinFrameSpacing(3) |   /* 128 bytes */
 1097             TDQC_TxDescType(0);
 1098         sf_funcreg_write(sc, SF_TxDescQueueCtrl,
 1099             sc->sc_TxDescQueueCtrl |
 1100             TDQC_TxHighPriorityFifoThreshold(sc->sc_txthresh));
 1101 
 1102         /*
 1103          * Initialize the Rx descriptor control registers.
 1104          */
 1105         sf_funcreg_write(sc, SF_RxDescQueue1Ctrl,
 1106             RDQ1C_RxQ1BufferLength(MCLBYTES) |
 1107             RDQ1C_RxDescSpacing(0));
 1108         sf_funcreg_write(sc, SF_RxDescQueue2Ctrl, 0);
 1109 
 1110         /*
 1111          * Initialize the Tx descriptor producer indices.
 1112          */
 1113         sf_funcreg_write(sc, SF_TxDescQueueProducerIndex,
 1114             TDQPI_HiPrTxProducerIndex(0) |
 1115             TDQPI_LoPrTxProducerIndex(0));
 1116 
 1117         /*
 1118          * Initialize the Rx descriptor producer indices.
 1119          */
 1120         sf_funcreg_write(sc, SF_RxDescQueue1Ptrs,
 1121             RXQ1P_RxDescQ1Producer(SF_NRXDESC - 1));
 1122         sf_funcreg_write(sc, SF_RxDescQueue2Ptrs,
 1123             RXQ2P_RxDescQ2Producer(0));
 1124 
 1125         /*
 1126          * Initialize the Tx and Rx completion queue consumer indices.
 1127          */
 1128         sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
 1129             CQCI_TxCompletionConsumerIndex(0) |
 1130             CQCI_RxCompletionQ1ConsumerIndex(0));
 1131         sf_funcreg_write(sc, SF_RxHiPrCompletionPtrs, 0);
 1132 
 1133         /*
 1134          * Initialize the Rx DMA control register.
 1135          */
 1136         sf_funcreg_write(sc, SF_RxDmaCtrl,
 1137             RDC_RxHighPriorityThreshold(6) |    /* default */
 1138             RDC_RxBurstSize(4));                /* default */
 1139 
 1140         /*
 1141          * Set the receive filter.
 1142          */
 1143         sc->sc_RxAddressFilteringCtl = 0;
 1144         sf_set_filter(sc);
 1145 
 1146         /*
 1147          * Set MacConfig1.  When we set the media, MacConfig1 will
 1148          * actually be written and the MAC part reset.
 1149          */
 1150         sc->sc_MacConfig1 = MC1_PadEn;
 1151 
 1152         /*
 1153          * Set the media.
 1154          */
 1155         mii_mediachg(&sc->sc_mii);
 1156 
 1157         /*
 1158          * Initialize the interrupt register.
 1159          */
 1160         sc->sc_InterruptEn = IS_PCIPadInt | IS_RxQ1DoneInt |
 1161             IS_TxQueueDoneInt | IS_TxDmaDoneInt | IS_DmaErrInt |
 1162             IS_StatisticWrapInt;
 1163         sf_funcreg_write(sc, SF_InterruptEn, sc->sc_InterruptEn);
 1164 
 1165         sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_IntEnable |
 1166             PDC_PCIMstDmaEn | (1 << PDC_FifoThreshold_SHIFT));
 1167 
 1168         /*
 1169          * Start the transmit and receive processes.
 1170          */
 1171         sf_funcreg_write(sc, SF_GeneralEthernetCtrl,
 1172             GEC_TxDmaEn|GEC_RxDmaEn|GEC_TransmitEn|GEC_ReceiveEn);
 1173 
 1174         /* Start the on second clock. */
 1175         timeout_add(&sc->sc_mii_timeout, hz);
 1176 
 1177         /*
 1178          * Note that the interface is now running.
 1179          */
 1180         ifp->if_flags |= IFF_RUNNING;
 1181         ifp->if_flags &= ~IFF_OACTIVE;
 1182 
 1183  out:
 1184         if (error) {
 1185                 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
 1186                 ifp->if_timer = 0;
 1187                 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
 1188         }
 1189         return (error);
 1190 }
 1191 
 1192 /*
 1193  * sf_rxdrain:
 1194  *
 1195  *      Drain the receive queue.
 1196  */
 1197 void
 1198 sf_rxdrain(struct sf_softc *sc)
 1199 {
 1200         struct sf_descsoft *ds;
 1201         int i;
 1202 
 1203         for (i = 0; i < SF_NRXDESC; i++) {
 1204                 ds = &sc->sc_rxsoft[i];
 1205                 if (ds->ds_mbuf != NULL) {
 1206                         bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
 1207                         m_freem(ds->ds_mbuf);
 1208                         ds->ds_mbuf = NULL;
 1209                 }
 1210         }
 1211 }
 1212 
 1213 /*
 1214  * sf_stop:             [ifnet interface function]
 1215  *
 1216  *      Stop transmission on the interface.
 1217  */
 1218 void
 1219 sf_stop(struct ifnet *ifp, int disable)
 1220 {
 1221         struct sf_softc *sc = ifp->if_softc;
 1222         struct sf_descsoft *ds;
 1223         int i;
 1224 
 1225         /* Stop the one second clock. */
 1226         timeout_del(&sc->sc_mii_timeout);
 1227 
 1228         /* Down the MII. */
 1229         mii_down(&sc->sc_mii);
 1230 
 1231         /* Disable interrupts. */
 1232         sf_funcreg_write(sc, SF_InterruptEn, 0);
 1233 
 1234         /* Stop the transmit and receive processes. */
 1235         sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0);
 1236 
 1237         /*
 1238          * Release any queued transmit buffers.
 1239          */
 1240         for (i = 0; i < SF_NTXDESC; i++) {
 1241                 ds = &sc->sc_txsoft[i];
 1242                 if (ds->ds_mbuf != NULL) {
 1243                         bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
 1244                         m_freem(ds->ds_mbuf);
 1245                         ds->ds_mbuf = NULL;
 1246                 }
 1247         }
 1248 
 1249         if (disable)
 1250                 sf_rxdrain(sc);
 1251 
 1252         /*
 1253          * Mark the interface down and cancel the watchdog timer.
 1254          */
 1255         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
 1256         ifp->if_timer = 0;
 1257 }
 1258 
 1259 /*
 1260  * sf_read_eeprom:
 1261  *
 1262  *      Read from the Starfire EEPROM.
 1263  */
 1264 uint8_t
 1265 sf_read_eeprom(struct sf_softc *sc, int offset)
 1266 {
 1267         uint32_t reg;
 1268 
 1269         reg = sf_genreg_read(sc, SF_EEPROM_BASE + (offset & ~3));
 1270 
 1271         return ((reg >> (8 * (offset & 3))) & 0xff);
 1272 }
 1273 
 1274 /*
 1275  * sf_add_rxbuf:
 1276  *
 1277  *      Add a receive buffer to the indicated descriptor.
 1278  */
 1279 int
 1280 sf_add_rxbuf(struct sf_softc *sc, int idx)
 1281 {
 1282         struct sf_descsoft *ds = &sc->sc_rxsoft[idx];
 1283         struct mbuf *m;
 1284         int error;
 1285 
 1286         MGETHDR(m, M_DONTWAIT, MT_DATA);
 1287         if (m == NULL)
 1288                 return (ENOBUFS);
 1289 
 1290         MCLGET(m, M_DONTWAIT);
 1291         if ((m->m_flags & M_EXT) == 0) {
 1292                 m_freem(m);
 1293                 return (ENOBUFS);
 1294         }
 1295 
 1296         if (ds->ds_mbuf != NULL)
 1297                 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
 1298 
 1299         ds->ds_mbuf = m;
 1300 
 1301         error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
 1302             m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
 1303             BUS_DMA_READ|BUS_DMA_NOWAIT);
 1304         if (error) {
 1305                 printf("%s: can't load rx DMA map %d, error = %d\n",
 1306                     sc->sc_dev.dv_xname, idx, error);
 1307                 panic("sf_add_rxbuf"); /* XXX */
 1308         }
 1309 
 1310         bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
 1311             ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
 1312 
 1313         SF_INIT_RXDESC(sc, idx);
 1314 
 1315         return (0);
 1316 }
 1317 
 1318 void
 1319 sf_set_filter_perfect(struct sf_softc *sc, int slot, uint8_t *enaddr)
 1320 {
 1321         uint32_t reg0, reg1, reg2;
 1322 
 1323         reg0 = enaddr[5] | (enaddr[4] << 8);
 1324         reg1 = enaddr[3] | (enaddr[2] << 8);
 1325         reg2 = enaddr[1] | (enaddr[0] << 8);
 1326 
 1327         sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 0, reg0);
 1328         sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 4, reg1);
 1329         sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 8, reg2);
 1330 }
 1331 
 1332 void
 1333 sf_set_filter_hash(struct sf_softc *sc, uint8_t *enaddr)
 1334 {
 1335         uint32_t hash, slot, reg;
 1336 
 1337         hash = ether_crc32_be(enaddr, ETHER_ADDR_LEN) >> 23;
 1338         slot = hash >> 4;
 1339 
 1340         reg = sf_genreg_read(sc, SF_HASH_BASE + (slot * 0x10));
 1341         reg |= 1 << (hash & 0xf);
 1342         sf_genreg_write(sc, SF_HASH_BASE + (slot * 0x10), reg);
 1343 }
 1344 
 1345 /*
 1346  * sf_set_filter:
 1347  *
 1348  *      Set the Starfire receive filter.
 1349  */
 1350 void
 1351 sf_set_filter(struct sf_softc *sc)
 1352 {
 1353         struct arpcom *ac = &sc->sc_arpcom;
 1354         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
 1355         struct ether_multi *enm;
 1356         struct ether_multistep step;
 1357         int i;
 1358 
 1359         /* Start by clearing the perfect and hash tables. */
 1360         for (i = 0; i < SF_PERFECT_SIZE; i += sizeof(uint32_t))
 1361                 sf_genreg_write(sc, SF_PERFECT_BASE + i, 0);
 1362 
 1363         for (i = 0; i < SF_HASH_SIZE; i += sizeof(uint32_t))
 1364                 sf_genreg_write(sc, SF_HASH_BASE + i, 0);
 1365 
 1366         /*
 1367          * Clear the perfect and hash mode bits.
 1368          */
 1369         sc->sc_RxAddressFilteringCtl &=
 1370             ~(RAFC_PerfectFilteringMode(3) | RAFC_HashFilteringMode(3));
 1371 
 1372         if (ifp->if_flags & IFF_BROADCAST)
 1373                 sc->sc_RxAddressFilteringCtl |= RAFC_PassBroadcast;
 1374         else
 1375                 sc->sc_RxAddressFilteringCtl &= ~RAFC_PassBroadcast;
 1376 
 1377         if (ifp->if_flags & IFF_PROMISC) {
 1378                 sc->sc_RxAddressFilteringCtl |= RAFC_PromiscuousMode;
 1379                 goto allmulti;
 1380         } else
 1381                 sc->sc_RxAddressFilteringCtl &= ~RAFC_PromiscuousMode;
 1382 
 1383         /*
 1384          * Set normal perfect filtering mode.
 1385          */
 1386         sc->sc_RxAddressFilteringCtl |= RAFC_PerfectFilteringMode(1);
 1387 
 1388         /*
 1389          * First, write the station address to the perfect filter
 1390          * table.
 1391          */
 1392         sf_set_filter_perfect(sc, 0, LLADDR(ifp->if_sadl));
 1393 
 1394         /*
 1395          * Now set the hash bits for each multicast address in our
 1396          * list.
 1397          */
 1398         ETHER_FIRST_MULTI(step, ac, enm);
 1399         if (enm == NULL)
 1400                 goto done;
 1401         while (enm != NULL) {
 1402                 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
 1403                         /*
 1404                          * We must listen to a range of multicast addresses.
 1405                          * For now, just accept all multicasts, rather than
 1406                          * trying to set only those filter bits needed to match
 1407                          * the range.  (At this time, the only use of address
 1408                          * ranges is for IP multicast routing, for which the
 1409                          * range is big enough to require all bits set.)
 1410                          */
 1411                         goto allmulti;
 1412                 }
 1413                 sf_set_filter_hash(sc, enm->enm_addrlo);
 1414                 ETHER_NEXT_MULTI(step, enm);
 1415         }
 1416 
 1417         /*
 1418          * Set "hash only multicast dest, match regardless of VLAN ID".
 1419          */
 1420         sc->sc_RxAddressFilteringCtl |= RAFC_HashFilteringMode(2);
 1421         goto done;
 1422 
 1423  allmulti:
 1424         /*
 1425          * XXX RAFC_PassMulticast is sub-optimal if using VLAN mode.
 1426          */
 1427         sc->sc_RxAddressFilteringCtl |= RAFC_PassMulticast;
 1428         ifp->if_flags |= IFF_ALLMULTI;
 1429 
 1430  done:
 1431         sf_funcreg_write(sc, SF_RxAddressFilteringCtl,
 1432             sc->sc_RxAddressFilteringCtl);
 1433 }
 1434 
 1435 /*
 1436  * sf_mii_read:         [mii interface function]
 1437  *
 1438  *      Read from the MII.
 1439  */
 1440 int
 1441 sf_mii_read(struct device *self, int phy, int reg)
 1442 {
 1443         struct sf_softc *sc = (void *) self;
 1444         uint32_t v;
 1445         int i;
 1446 
 1447         for (i = 0; i < 1000; i++) {
 1448                 v = sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg));
 1449                 if (v & MiiDataValid)
 1450                         break;
 1451                 delay(1);
 1452         }
 1453 
 1454         if ((v & MiiDataValid) == 0)
 1455                 return (0);
 1456 
 1457         if (MiiRegDataPort(v) == 0xffff)
 1458                 return (0);
 1459 
 1460         return (MiiRegDataPort(v));
 1461 }
 1462 
 1463 /*
 1464  * sf_mii_write:        [mii interface function]
 1465  *
 1466  *      Write to the MII.
 1467  */
 1468 void
 1469 sf_mii_write(struct device *self, int phy, int reg, int val)
 1470 {
 1471         struct sf_softc *sc = (void *) self;
 1472         int i;
 1473 
 1474         sf_genreg_write(sc, SF_MII_PHY_REG(phy, reg), val);
 1475 
 1476         for (i = 0; i < 1000; i++) {
 1477                 if ((sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg)) &
 1478                      MiiBusy) == 0)
 1479                         return;
 1480                 delay(1);
 1481         }
 1482 
 1483         printf("%s: MII write timed out\n", sc->sc_dev.dv_xname);
 1484 }
 1485 
 1486 /*
 1487  * sf_mii_statchg:      [mii interface function]
 1488  *
 1489  *      Callback from the PHY when the media changes.
 1490  */
 1491 void
 1492 sf_mii_statchg(struct device *self)
 1493 {
 1494         struct sf_softc *sc = (void *) self;
 1495         uint32_t ipg;
 1496 
 1497         if (sc->sc_mii.mii_media_active & IFM_FDX) {
 1498                 sc->sc_MacConfig1 |= MC1_FullDuplex;
 1499                 ipg = 0x15;
 1500         } else {
 1501                 sc->sc_MacConfig1 &= ~MC1_FullDuplex;
 1502                 ipg = 0x11;
 1503         }
 1504 
 1505         sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1);
 1506         sf_macreset(sc);
 1507 
 1508         sf_genreg_write(sc, SF_BkToBkIPG, ipg);
 1509 }
 1510 
 1511 /*
 1512  * sf_mediastatus:      [ifmedia interface function]
 1513  *
 1514  *      Callback from ifmedia to request current media status.
 1515  */
 1516 void
 1517 sf_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
 1518 {
 1519         struct sf_softc *sc = ifp->if_softc;
 1520 
 1521         mii_pollstat(&sc->sc_mii);
 1522         ifmr->ifm_status = sc->sc_mii.mii_media_status;
 1523         ifmr->ifm_active = sc->sc_mii.mii_media_active;
 1524 }
 1525 
 1526 /*
 1527  * sf_mediachange:      [ifmedia interface function]
 1528  *
 1529  *      Callback from ifmedia to request new media setting.
 1530  */
 1531 int
 1532 sf_mediachange(struct ifnet *ifp)
 1533 {
 1534         struct sf_softc *sc = ifp->if_softc;
 1535 
 1536         if (ifp->if_flags & IFF_UP)
 1537                 mii_mediachg(&sc->sc_mii);
 1538         return (0);
 1539 }

/* [<][>][^][v][top][bottom][index][help] */