root/dev/pci/if_txp.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. txp_probe
  2. txp_attachhook
  3. txp_attach
  4. txp_chip_init
  5. txp_reset_adapter
  6. txp_download_fw
  7. txp_download_fw_wait
  8. txp_download_fw_section
  9. txp_intr
  10. txp_rx_reclaim
  11. txp_rxbuf_reclaim
  12. txp_tx_reclaim
  13. txp_shutdown
  14. txp_alloc_rings
  15. txp_dma_malloc
  16. txp_dma_free
  17. txp_ioctl
  18. txp_init
  19. txp_tick
  20. txp_start
  21. txp_command
  22. txp_command2
  23. txp_response
  24. txp_rsp_fixup
  25. txp_cmd_desc_numfree
  26. txp_stop
  27. txp_watchdog
  28. txp_ifmedia_upd
  29. txp_ifmedia_sts
  30. txp_show_descriptor
  31. txp_set_filter
  32. txp_capabilities

    1 /*      $OpenBSD: if_txp.c,v 1.84 2007/04/11 14:41:15 claudio Exp $     */
    2 
    3 /*
    4  * Copyright (c) 2001
    5  *      Jason L. Wright <jason@thought.net>, Theo de Raadt, and
    6  *      Aaron Campbell <aaron@monkey.org>.  All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
   18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
   19  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR THE VOICES IN THEIR HEADS
   21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   27  * THE POSSIBILITY OF SUCH DAMAGE.
   28  */
   29 
   30 /*
   31  * Driver for 3c990 (Typhoon) Ethernet ASIC
   32  */
   33 
   34 #include "bpfilter.h"
   35 #include "vlan.h"
   36 
   37 #include <sys/param.h>
   38 #include <sys/systm.h>
   39 #include <sys/sockio.h>
   40 #include <sys/mbuf.h>
   41 #include <sys/malloc.h>
   42 #include <sys/kernel.h>
   43 #include <sys/socket.h>
   44 #include <sys/device.h>
   45 #include <sys/timeout.h>
   46 
   47 #include <net/if.h>
   48 #include <net/if_dl.h>
   49 #include <net/if_types.h>
   50 
   51 #ifdef INET
   52 #include <netinet/in.h>
   53 #include <netinet/in_systm.h>
   54 #include <netinet/in_var.h>
   55 #include <netinet/ip.h>
   56 #include <netinet/if_ether.h>
   57 #endif
   58 
   59 #include <net/if_media.h>
   60 
   61 #if NBPFILTER > 0
   62 #include <net/bpf.h>
   63 #endif
   64 
   65 #if NVLAN > 0
   66 #include <net/if_types.h>
   67 #include <net/if_vlan_var.h>
   68 #endif
   69 
   70 #include <machine/bus.h>
   71 
   72 #include <dev/mii/mii.h>
   73 #include <dev/mii/miivar.h>
   74 #include <dev/pci/pcireg.h>
   75 #include <dev/pci/pcivar.h>
   76 #include <dev/pci/pcidevs.h>
   77 
   78 #include <dev/pci/if_txpreg.h>
   79 
   80 /*
   81  * These currently break the 3c990 firmware, hopefully will be resolved
   82  * at some point.
   83  */
   84 #undef  TRY_TX_UDP_CSUM
   85 #undef  TRY_TX_TCP_CSUM
   86 
   87 int txp_probe(struct device *, void *, void *);
   88 void txp_attach(struct device *, struct device *, void *);
   89 void txp_attachhook(void *vsc);
   90 int txp_intr(void *);
   91 void txp_tick(void *);
   92 void txp_shutdown(void *);
   93 int txp_ioctl(struct ifnet *, u_long, caddr_t);
   94 void txp_start(struct ifnet *);
   95 void txp_stop(struct txp_softc *);
   96 void txp_init(struct txp_softc *);
   97 void txp_watchdog(struct ifnet *);
   98 
   99 int txp_chip_init(struct txp_softc *);
  100 int txp_reset_adapter(struct txp_softc *);
  101 int txp_download_fw(struct txp_softc *);
  102 int txp_download_fw_wait(struct txp_softc *);
  103 int txp_download_fw_section(struct txp_softc *,
  104     struct txp_fw_section_header *, int, u_char *, size_t);
  105 int txp_alloc_rings(struct txp_softc *);
  106 void txp_dma_free(struct txp_softc *, struct txp_dma_alloc *);
  107 int txp_dma_malloc(struct txp_softc *, bus_size_t, struct txp_dma_alloc *, int);
  108 void txp_set_filter(struct txp_softc *);
  109 
  110 int txp_cmd_desc_numfree(struct txp_softc *);
  111 int txp_command(struct txp_softc *, u_int16_t, u_int16_t, u_int32_t,
  112     u_int32_t, u_int16_t *, u_int32_t *, u_int32_t *, int);
  113 int txp_command2(struct txp_softc *, u_int16_t, u_int16_t,
  114     u_int32_t, u_int32_t, struct txp_ext_desc *, u_int8_t,
  115     struct txp_rsp_desc **, int);
  116 int txp_response(struct txp_softc *, u_int32_t, u_int16_t, u_int16_t,
  117     struct txp_rsp_desc **);
  118 void txp_rsp_fixup(struct txp_softc *, struct txp_rsp_desc *,
  119     struct txp_rsp_desc *);
  120 void txp_capabilities(struct txp_softc *);
  121 
  122 void txp_ifmedia_sts(struct ifnet *, struct ifmediareq *);
  123 int txp_ifmedia_upd(struct ifnet *);
  124 void txp_show_descriptor(void *);
  125 void txp_tx_reclaim(struct txp_softc *, struct txp_tx_ring *,
  126     struct txp_dma_alloc *);
  127 void txp_rxbuf_reclaim(struct txp_softc *);
  128 void txp_rx_reclaim(struct txp_softc *, struct txp_rx_ring *,
  129     struct txp_dma_alloc *);
  130 
  131 struct cfattach txp_ca = {
  132         sizeof(struct txp_softc), txp_probe, txp_attach,
  133 };
  134 
  135 struct cfdriver txp_cd = {
  136         0, "txp", DV_IFNET
  137 };
  138 
  139 const struct pci_matchid txp_devices[] = {
  140         { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990 },
  141         { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990TX },
  142         { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990TX95 },
  143         { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990TX97 },
  144         { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990SVR95 },
  145         { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990SVR97 },
  146         { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C990BTXM },
  147         { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C990BSVR },
  148         { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990FX },
  149 };
  150 
  151 int
  152 txp_probe(parent, match, aux)
  153         struct device *parent;
  154         void *match, *aux;
  155 {
  156         return (pci_matchbyid((struct pci_attach_args *)aux, txp_devices,
  157             sizeof(txp_devices)/sizeof(txp_devices[0])));
  158 }
  159 
  160 void
  161 txp_attachhook(void *vsc)
  162 {
  163         struct txp_softc *sc = vsc;
  164         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
  165         u_int16_t p1;
  166         u_int32_t p2;
  167         int s;
  168 
  169         s = splnet();
  170         printf("%s: ", sc->sc_dev.dv_xname);
  171 
  172         if (txp_chip_init(sc)) {
  173                 printf("failed chip init\n");
  174                 splx(s);
  175                 return;
  176         }
  177 
  178         if (txp_download_fw(sc)) {
  179                 splx(s);
  180                 return;
  181         }
  182 
  183         if (txp_alloc_rings(sc)) {
  184                 splx(s);
  185                 return;
  186         }
  187 
  188         if (txp_command(sc, TXP_CMD_MAX_PKT_SIZE_WRITE, TXP_MAX_PKTLEN, 0, 0,
  189             NULL, NULL, NULL, 1)) {
  190                 splx(s);
  191                 return;
  192         }
  193 
  194         if (txp_command(sc, TXP_CMD_STATION_ADDRESS_READ, 0, 0, 0,
  195             &p1, &p2, NULL, 1)) {
  196                 splx(s);
  197                 return;
  198         }
  199 
  200         txp_set_filter(sc);
  201 
  202         p1 = htole16(p1);
  203         sc->sc_arpcom.ac_enaddr[0] = ((u_int8_t *)&p1)[1];
  204         sc->sc_arpcom.ac_enaddr[1] = ((u_int8_t *)&p1)[0];
  205         p2 = htole32(p2);
  206         sc->sc_arpcom.ac_enaddr[2] = ((u_int8_t *)&p2)[3];
  207         sc->sc_arpcom.ac_enaddr[3] = ((u_int8_t *)&p2)[2];
  208         sc->sc_arpcom.ac_enaddr[4] = ((u_int8_t *)&p2)[1];
  209         sc->sc_arpcom.ac_enaddr[5] = ((u_int8_t *)&p2)[0];
  210 
  211         printf("address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
  212         sc->sc_cold = 0;
  213 
  214         ifmedia_init(&sc->sc_ifmedia, 0, txp_ifmedia_upd, txp_ifmedia_sts);
  215         ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
  216         ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
  217         ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
  218         ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL);
  219         ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX|IFM_HDX, 0, NULL);
  220         ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
  221         ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
  222 
  223         sc->sc_xcvr = TXP_XCVR_AUTO;
  224         txp_command(sc, TXP_CMD_XCVR_SELECT, TXP_XCVR_AUTO, 0, 0,
  225             NULL, NULL, NULL, 0);
  226         ifmedia_set(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO);
  227 
  228         ifp->if_softc = sc;
  229         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  230         ifp->if_ioctl = txp_ioctl;
  231         ifp->if_start = txp_start;
  232         ifp->if_watchdog = txp_watchdog;
  233         ifp->if_baudrate = 10000000;
  234         IFQ_SET_MAXLEN(&ifp->if_snd, TX_ENTRIES);
  235         IFQ_SET_READY(&ifp->if_snd);
  236         ifp->if_capabilities = 0;
  237         bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
  238 
  239         txp_capabilities(sc);
  240 
  241         timeout_set(&sc->sc_tick, txp_tick, sc);
  242 
  243         /*
  244          * Attach us everywhere
  245          */
  246         if_attach(ifp);
  247         ether_ifattach(ifp);
  248 
  249         shutdownhook_establish(txp_shutdown, sc);
  250         splx(s);
  251 }
  252 
  253 void
  254 txp_attach(parent, self, aux)
  255         struct device *parent, *self;
  256         void *aux;
  257 {
  258         struct txp_softc *sc = (struct txp_softc *)self;
  259         struct pci_attach_args *pa = aux;
  260         pci_chipset_tag_t pc = pa->pa_pc;
  261         pci_intr_handle_t ih;
  262         const char *intrstr = NULL;
  263         bus_size_t iosize;
  264 
  265         sc->sc_cold = 1;
  266 
  267         if (pci_mapreg_map(pa, TXP_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0,
  268             &sc->sc_bt, &sc->sc_bh, NULL, &iosize, 0)) {
  269                 printf(": can't map mem space %d\n", 0);
  270                 return;
  271         }
  272 
  273         sc->sc_dmat = pa->pa_dmat;
  274 
  275         /*
  276          * Allocate our interrupt.
  277          */
  278         if (pci_intr_map(pa, &ih)) {
  279                 printf(": couldn't map interrupt\n");
  280                 return;
  281         }
  282 
  283         intrstr = pci_intr_string(pc, ih);
  284         sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, txp_intr, sc,
  285             self->dv_xname);
  286         if (sc->sc_ih == NULL) {
  287                 printf(": couldn't establish interrupt");
  288                 if (intrstr != NULL)
  289                         printf(" at %s", intrstr);
  290                 printf("\n");
  291                 return;
  292         }
  293         printf(": %s\n", intrstr);
  294 
  295         if (rootvp == NULL)
  296                 mountroothook_establish(txp_attachhook, sc);
  297         else
  298                 txp_attachhook(sc);
  299 
  300 }
  301 
  302 int
  303 txp_chip_init(sc)
  304         struct txp_softc *sc;
  305 {
  306         /* disable interrupts */
  307         WRITE_REG(sc, TXP_IER, 0);
  308         WRITE_REG(sc, TXP_IMR,
  309             TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |
  310             TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
  311             TXP_INT_LATCH);
  312 
  313         /* ack all interrupts */
  314         WRITE_REG(sc, TXP_ISR, TXP_INT_RESERVED | TXP_INT_LATCH |
  315             TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 |
  316             TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |
  317             TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
  318             TXP_INT_A2H_3 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0);
  319 
  320         if (txp_reset_adapter(sc))
  321                 return (-1);
  322 
  323         /* disable interrupts */
  324         WRITE_REG(sc, TXP_IER, 0);
  325         WRITE_REG(sc, TXP_IMR,
  326             TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |
  327             TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
  328             TXP_INT_LATCH);
  329 
  330         /* ack all interrupts */
  331         WRITE_REG(sc, TXP_ISR, TXP_INT_RESERVED | TXP_INT_LATCH |
  332             TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 |
  333             TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |
  334             TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
  335             TXP_INT_A2H_3 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0);
  336 
  337         return (0);
  338 }
  339 
  340 int
  341 txp_reset_adapter(sc)
  342         struct txp_softc *sc;
  343 {
  344         u_int32_t r;
  345         int i;
  346 
  347         WRITE_REG(sc, TXP_SRR, TXP_SRR_ALL);
  348         DELAY(1000);
  349         WRITE_REG(sc, TXP_SRR, 0);
  350 
  351         /* Should wait max 6 seconds */
  352         for (i = 0; i < 6000; i++) {
  353                 r = READ_REG(sc, TXP_A2H_0);
  354                 if (r == STAT_WAITING_FOR_HOST_REQUEST)
  355                         break;
  356                 DELAY(1000);
  357         }
  358 
  359         if (r != STAT_WAITING_FOR_HOST_REQUEST) {
  360                 printf("%s: reset hung\n", TXP_DEVNAME(sc));
  361                 return (-1);
  362         }
  363 
  364         return (0);
  365 }
  366 
  367 int
  368 txp_download_fw(sc)
  369         struct txp_softc *sc;
  370 {
  371         struct txp_fw_file_header *fileheader;
  372         struct txp_fw_section_header *secthead;
  373         u_int32_t r, i, ier, imr;
  374         size_t buflen;
  375         int sect, err;
  376         u_char *buf;
  377 
  378         ier = READ_REG(sc, TXP_IER);
  379         WRITE_REG(sc, TXP_IER, ier | TXP_INT_A2H_0);
  380 
  381         imr = READ_REG(sc, TXP_IMR);
  382         WRITE_REG(sc, TXP_IMR, imr | TXP_INT_A2H_0);
  383 
  384         for (i = 0; i < 10000; i++) {
  385                 r = READ_REG(sc, TXP_A2H_0);
  386                 if (r == STAT_WAITING_FOR_HOST_REQUEST)
  387                         break;
  388                 DELAY(50);
  389         }
  390         if (r != STAT_WAITING_FOR_HOST_REQUEST) {
  391                 printf("not waiting for host request\n");
  392                 return (-1);
  393         }
  394 
  395         /* Ack the status */
  396         WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0);
  397 
  398         err = loadfirmware("3c990", &buf, &buflen);
  399         if (err) {
  400                 printf("failed loadfirmware of file 3c990: errno %d\n",
  401                     err);
  402                 return (err);
  403         }
  404 
  405         fileheader = (struct txp_fw_file_header *)buf;
  406         if (bcmp("TYPHOON", fileheader->magicid, sizeof(fileheader->magicid))) {
  407                 printf("firmware invalid magic\n");
  408                 goto fail;
  409         }
  410 
  411         /* Tell boot firmware to get ready for image */
  412         WRITE_REG(sc, TXP_H2A_1, letoh32(fileheader->addr));
  413         WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_RUNTIME_IMAGE);
  414 
  415         if (txp_download_fw_wait(sc)) {
  416                 printf("fw wait failed, initial\n");
  417                 goto fail;
  418         }
  419 
  420         secthead = (struct txp_fw_section_header *)(buf +
  421             sizeof(struct txp_fw_file_header));
  422 
  423         for (sect = 0; sect < letoh32(fileheader->nsections); sect++) {
  424                 if (txp_download_fw_section(sc, secthead, sect, buf, buflen))
  425                         goto fail;
  426                 secthead = (struct txp_fw_section_header *)
  427                     (((u_int8_t *)secthead) + letoh32(secthead->nbytes) +
  428                         sizeof(*secthead));
  429         }
  430 
  431         WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_DOWNLOAD_COMPLETE);
  432 
  433         for (i = 0; i < 10000; i++) {
  434                 r = READ_REG(sc, TXP_A2H_0);
  435                 if (r == STAT_WAITING_FOR_BOOT)
  436                         break;
  437                 DELAY(50);
  438         }
  439         if (r != STAT_WAITING_FOR_BOOT) {
  440                 printf("not waiting for boot\n");
  441                 goto fail;
  442         }
  443 
  444         WRITE_REG(sc, TXP_IER, ier);
  445         WRITE_REG(sc, TXP_IMR, imr);
  446 
  447         free(buf, M_DEVBUF);
  448         printf("loaded firmware, ");
  449         return (0);
  450 fail:
  451         free(buf, M_DEVBUF);
  452         return (-1);
  453 }
  454 
  455 int
  456 txp_download_fw_wait(sc)
  457         struct txp_softc *sc;
  458 {
  459         u_int32_t i, r;
  460 
  461         for (i = 0; i < 10000; i++) {
  462                 r = READ_REG(sc, TXP_ISR);
  463                 if (r & TXP_INT_A2H_0)
  464                         break;
  465                 DELAY(50);
  466         }
  467 
  468         if (!(r & TXP_INT_A2H_0)) {
  469                 printf("fw wait failed comm0\n");
  470                 return (-1);
  471         }
  472 
  473         WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0);
  474 
  475         r = READ_REG(sc, TXP_A2H_0);
  476         if (r != STAT_WAITING_FOR_SEGMENT) {
  477                 printf("fw not waiting for segment\n");
  478                 return (-1);
  479         }
  480         return (0);
  481 }
  482 
  483 int
  484 txp_download_fw_section(sc, sect, sectnum, buf, buflen)
  485         struct txp_softc *sc;
  486         struct txp_fw_section_header *sect;
  487         int sectnum;
  488         u_char *buf;
  489         size_t buflen;
  490 {
  491         struct txp_dma_alloc dma;
  492         int rseg, err = 0;
  493         struct mbuf m;
  494         u_int16_t csum;
  495 
  496         /* Skip zero length sections */
  497         if (sect->nbytes == 0)
  498                 return (0);
  499 
  500         /* Make sure we aren't past the end of the image */
  501         rseg = ((u_int8_t *)sect) - ((u_int8_t *)buf);
  502         if (rseg >= buflen) {
  503                 printf("fw invalid section address, section %d\n", sectnum);
  504                 return (-1);
  505         }
  506 
  507         /* Make sure this section doesn't go past the end */
  508         rseg += letoh32(sect->nbytes);
  509         if (rseg >= buflen) {
  510                 printf("fw truncated section %d\n", sectnum);
  511                 return (-1);
  512         }
  513 
  514         /* map a buffer, copy segment to it, get physaddr */
  515         if (txp_dma_malloc(sc, letoh32(sect->nbytes), &dma, 0)) {
  516                 printf("fw dma malloc failed, section %d\n", sectnum);
  517                 return (-1);
  518         }
  519 
  520         bcopy(((u_int8_t *)sect) + sizeof(*sect), dma.dma_vaddr,
  521             letoh32(sect->nbytes));
  522 
  523         /*
  524          * dummy up mbuf and verify section checksum
  525          */
  526         m.m_type = MT_DATA;
  527         m.m_next = m.m_nextpkt = NULL;
  528         m.m_len = letoh32(sect->nbytes);
  529         m.m_data = dma.dma_vaddr;
  530         m.m_flags = 0;
  531         csum = in_cksum(&m, letoh32(sect->nbytes));
  532         if (csum != sect->cksum) {
  533                 printf("fw section %d, bad cksum (expected 0x%x got 0x%x)\n",
  534                     sectnum, sect->cksum, csum);
  535                 err = -1;
  536                 goto bail;
  537         }
  538 
  539         bus_dmamap_sync(sc->sc_dmat, dma.dma_map, 0,
  540             dma.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
  541 
  542         WRITE_REG(sc, TXP_H2A_1, letoh32(sect->nbytes));
  543         WRITE_REG(sc, TXP_H2A_2, letoh16(sect->cksum));
  544         WRITE_REG(sc, TXP_H2A_3, letoh32(sect->addr));
  545         WRITE_REG(sc, TXP_H2A_4, dma.dma_paddr >> 32);
  546         WRITE_REG(sc, TXP_H2A_5, dma.dma_paddr & 0xffffffff);
  547         WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_SEGMENT_AVAILABLE);
  548 
  549         if (txp_download_fw_wait(sc)) {
  550                 printf("%s: fw wait failed, section %d\n",
  551                     sc->sc_dev.dv_xname, sectnum);
  552                 err = -1;
  553         }
  554 
  555         bus_dmamap_sync(sc->sc_dmat, dma.dma_map, 0,
  556             dma.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
  557 
  558 bail:
  559         txp_dma_free(sc, &dma);
  560 
  561         return (err);
  562 }
  563 
  564 int
  565 txp_intr(vsc)
  566         void *vsc;
  567 {
  568         struct txp_softc *sc = vsc;
  569         struct txp_hostvar *hv = sc->sc_hostvar;
  570         u_int32_t isr;
  571         int claimed = 0;
  572 
  573         /* mask all interrupts */
  574         WRITE_REG(sc, TXP_IMR, TXP_INT_RESERVED | TXP_INT_SELF |
  575             TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 |
  576             TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0 |
  577             TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
  578             TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |  TXP_INT_LATCH);
  579 
  580         bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0,
  581             sizeof(struct txp_hostvar), BUS_DMASYNC_POSTWRITE|BUS_DMASYNC_POSTREAD);
  582 
  583         isr = READ_REG(sc, TXP_ISR);
  584         while (isr) {
  585                 claimed = 1;
  586                 WRITE_REG(sc, TXP_ISR, isr);
  587 
  588                 if ((*sc->sc_rxhir.r_roff) != (*sc->sc_rxhir.r_woff))
  589                         txp_rx_reclaim(sc, &sc->sc_rxhir, &sc->sc_rxhiring_dma);
  590                 if ((*sc->sc_rxlor.r_roff) != (*sc->sc_rxlor.r_woff))
  591                         txp_rx_reclaim(sc, &sc->sc_rxlor, &sc->sc_rxloring_dma);
  592 
  593                 if (hv->hv_rx_buf_write_idx == hv->hv_rx_buf_read_idx)
  594                         txp_rxbuf_reclaim(sc);
  595 
  596                 if (sc->sc_txhir.r_cnt && (sc->sc_txhir.r_cons !=
  597                     TXP_OFFSET2IDX(letoh32(*(sc->sc_txhir.r_off)))))
  598                         txp_tx_reclaim(sc, &sc->sc_txhir, &sc->sc_txhiring_dma);
  599 
  600                 if (sc->sc_txlor.r_cnt && (sc->sc_txlor.r_cons !=
  601                     TXP_OFFSET2IDX(letoh32(*(sc->sc_txlor.r_off)))))
  602                         txp_tx_reclaim(sc, &sc->sc_txlor, &sc->sc_txloring_dma);
  603 
  604                 isr = READ_REG(sc, TXP_ISR);
  605         }
  606 
  607         bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0,
  608             sizeof(struct txp_hostvar), BUS_DMASYNC_POSTWRITE|BUS_DMASYNC_POSTREAD);
  609 
  610         /* unmask all interrupts */
  611         WRITE_REG(sc, TXP_IMR, TXP_INT_A2H_3);
  612 
  613         txp_start(&sc->sc_arpcom.ac_if);
  614 
  615         return (claimed);
  616 }
  617 
  618 void
  619 txp_rx_reclaim(sc, r, dma)
  620         struct txp_softc *sc;
  621         struct txp_rx_ring *r;
  622         struct txp_dma_alloc *dma;
  623 {
  624         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
  625         struct txp_rx_desc *rxd;
  626         struct mbuf *m;
  627         struct txp_swdesc *sd;
  628         u_int32_t roff, woff;
  629         int sumflags = 0, idx;
  630 
  631         roff = letoh32(*r->r_roff);
  632         woff = letoh32(*r->r_woff);
  633         idx = roff / sizeof(struct txp_rx_desc);
  634         rxd = r->r_desc + idx;
  635 
  636         while (roff != woff) {
  637 
  638                 bus_dmamap_sync(sc->sc_dmat, dma->dma_map,
  639                     idx * sizeof(struct txp_rx_desc), sizeof(struct txp_rx_desc),
  640                     BUS_DMASYNC_POSTREAD);
  641 
  642                 if (rxd->rx_flags & RX_FLAGS_ERROR) {
  643                         printf("%s: error 0x%x\n", sc->sc_dev.dv_xname,
  644                             letoh32(rxd->rx_stat));
  645                         ifp->if_ierrors++;
  646                         goto next;
  647                 }
  648 
  649                 /* retrieve stashed pointer */
  650                 bcopy((u_long *)&rxd->rx_vaddrlo, &sd, sizeof(sd));
  651 
  652                 bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0,
  653                     sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
  654                 bus_dmamap_unload(sc->sc_dmat, sd->sd_map);
  655                 bus_dmamap_destroy(sc->sc_dmat, sd->sd_map);
  656                 m = sd->sd_mbuf;
  657                 free(sd, M_DEVBUF);
  658                 m->m_pkthdr.len = m->m_len = letoh16(rxd->rx_len);
  659 
  660 #if NVLAN > 0
  661                 /*
  662                  * XXX Another firmware bug: the vlan encapsulation
  663                  * is always removed, even when we tell the card not
  664                  * to do that.  Restore the vlan encapsulation below.
  665                  */
  666                 if (rxd->rx_stat & htole32(RX_STAT_VLAN)) {
  667                         struct ether_vlan_header vh;
  668 
  669                         if (m->m_pkthdr.len < ETHER_HDR_LEN) {
  670                                 m_freem(m);
  671                                 goto next;
  672                         }
  673                         m_copydata(m, 0, ETHER_HDR_LEN, (caddr_t)&vh);
  674                         vh.evl_proto = vh.evl_encap_proto;
  675                         vh.evl_tag = rxd->rx_vlan >> 16;
  676                         vh.evl_encap_proto = htons(ETHERTYPE_VLAN);
  677                         m_adj(m, ETHER_HDR_LEN);
  678                         M_PREPEND(m, sizeof(vh), M_DONTWAIT);
  679                         if (m == NULL)
  680                                 goto next;
  681                         m_copyback(m, 0, sizeof(vh), &vh);
  682                 }
  683 #endif
  684 
  685 #ifdef __STRICT_ALIGNMENT
  686                 {
  687                         /*
  688                          * XXX Nice chip, except it won't accept "off by 2"
  689                          * buffers, so we're force to copy.  Supposedly
  690                          * this will be fixed in a newer firmware rev
  691                          * and this will be temporary.
  692                          */
  693                         struct mbuf *mnew;
  694 
  695                         MGETHDR(mnew, M_DONTWAIT, MT_DATA);
  696                         if (mnew == NULL) {
  697                                 m_freem(m);
  698                                 goto next;
  699                         }
  700                         if (m->m_len > (MHLEN - 2)) {
  701                                 MCLGET(mnew, M_DONTWAIT);
  702                                 if (!(mnew->m_flags & M_EXT)) {
  703                                         m_freem(mnew);
  704                                         m_freem(m);
  705                                         goto next;
  706                                 }
  707                         }
  708                         mnew->m_pkthdr.rcvif = ifp;
  709                         mnew->m_pkthdr.len = mnew->m_len = m->m_len;
  710                         mnew->m_data += 2;
  711                         bcopy(m->m_data, mnew->m_data, m->m_len);
  712                         m_freem(m);
  713                         m = mnew;
  714                 }
  715 #endif
  716 
  717 #if NBPFILTER > 0
  718                 /*
  719                  * Handle BPF listeners. Let the BPF user see the packet.
  720                  */
  721                 if (ifp->if_bpf)
  722                         bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
  723 #endif
  724 
  725                 if (rxd->rx_stat & htole32(RX_STAT_IPCKSUMBAD))
  726                         sumflags |= M_IPV4_CSUM_IN_BAD;
  727                 else if (rxd->rx_stat & htole32(RX_STAT_IPCKSUMGOOD))
  728                         sumflags |= M_IPV4_CSUM_IN_OK;
  729 
  730                 if (rxd->rx_stat & htole32(RX_STAT_TCPCKSUMBAD))
  731                         sumflags |= M_TCP_CSUM_IN_BAD;
  732                 else if (rxd->rx_stat & htole32(RX_STAT_TCPCKSUMGOOD))
  733                         sumflags |= M_TCP_CSUM_IN_OK;
  734 
  735                 if (rxd->rx_stat & htole32(RX_STAT_UDPCKSUMBAD))
  736                         sumflags |= M_UDP_CSUM_IN_BAD;
  737                 else if (rxd->rx_stat & htole32(RX_STAT_UDPCKSUMGOOD))
  738                         sumflags |= M_UDP_CSUM_IN_OK;
  739 
  740                 m->m_pkthdr.csum_flags = sumflags;
  741 
  742                 ether_input_mbuf(ifp, m);
  743 
  744 next:
  745                 bus_dmamap_sync(sc->sc_dmat, dma->dma_map,
  746                     idx * sizeof(struct txp_rx_desc), sizeof(struct txp_rx_desc),
  747                     BUS_DMASYNC_PREREAD);
  748 
  749                 roff += sizeof(struct txp_rx_desc);
  750                 if (roff == (RX_ENTRIES * sizeof(struct txp_rx_desc))) {
  751                         idx = 0;
  752                         roff = 0;
  753                         rxd = r->r_desc;
  754                 } else {
  755                         idx++;
  756                         rxd++;
  757                 }
  758                 woff = letoh32(*r->r_woff);
  759         }
  760 
  761         *r->r_roff = htole32(woff);
  762 }
  763 
  764 void
  765 txp_rxbuf_reclaim(sc)
  766         struct txp_softc *sc;
  767 {
  768         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
  769         struct txp_hostvar *hv = sc->sc_hostvar;
  770         struct txp_rxbuf_desc *rbd;
  771         struct txp_swdesc *sd;
  772         u_int32_t i, end;
  773 
  774         end = TXP_OFFSET2IDX(letoh32(hv->hv_rx_buf_read_idx));
  775         i = TXP_OFFSET2IDX(letoh32(hv->hv_rx_buf_write_idx));
  776 
  777         if (++i == RXBUF_ENTRIES)
  778                 i = 0;
  779 
  780         rbd = sc->sc_rxbufs + i;
  781 
  782         while (i != end) {
  783                 sd = (struct txp_swdesc *)malloc(sizeof(struct txp_swdesc),
  784                     M_DEVBUF, M_NOWAIT);
  785                 if (sd == NULL)
  786                         break;
  787 
  788                 MGETHDR(sd->sd_mbuf, M_DONTWAIT, MT_DATA);
  789                 if (sd->sd_mbuf == NULL)
  790                         goto err_sd;
  791 
  792                 MCLGET(sd->sd_mbuf, M_DONTWAIT);
  793                 if ((sd->sd_mbuf->m_flags & M_EXT) == 0)
  794                         goto err_mbuf;
  795                 /* reserve some space for a possible VLAN header */
  796                 sd->sd_mbuf->m_data += 8;
  797                 sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES - 8;
  798                 sd->sd_mbuf->m_pkthdr.rcvif = ifp;
  799                 if (bus_dmamap_create(sc->sc_dmat, TXP_MAX_PKTLEN, 1,
  800                     TXP_MAX_PKTLEN, 0, BUS_DMA_NOWAIT, &sd->sd_map))
  801                         goto err_mbuf;
  802                 if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, sd->sd_mbuf,
  803                     BUS_DMA_NOWAIT)) {
  804                         bus_dmamap_destroy(sc->sc_dmat, sd->sd_map);
  805                         goto err_mbuf;
  806                 }
  807 
  808                 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxbufring_dma.dma_map,
  809                     i * sizeof(struct txp_rxbuf_desc),
  810                     sizeof(struct txp_rxbuf_desc), BUS_DMASYNC_POSTWRITE);
  811                     
  812                 /* stash away pointer */
  813                 bcopy(&sd, (u_long *)&rbd->rb_vaddrlo, sizeof(sd));
  814 
  815                 rbd->rb_paddrlo = ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr)
  816                     & 0xffffffff;
  817                 rbd->rb_paddrhi = ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr)
  818                     >> 32;
  819 
  820                 bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0,
  821                     sd->sd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
  822 
  823                 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxbufring_dma.dma_map,
  824                     i * sizeof(struct txp_rxbuf_desc),
  825                     sizeof(struct txp_rxbuf_desc), BUS_DMASYNC_PREWRITE);
  826 
  827                 hv->hv_rx_buf_write_idx = htole32(TXP_IDX2OFFSET(i));
  828 
  829                 if (++i == RXBUF_ENTRIES) {
  830                         i = 0;
  831                         rbd = sc->sc_rxbufs;
  832                 } else
  833                         rbd++;
  834         }
  835         return;
  836 
  837 err_mbuf:
  838         m_freem(sd->sd_mbuf);
  839 err_sd:
  840         free(sd, M_DEVBUF);
  841 }
  842 
  843 /*
  844  * Reclaim mbufs and entries from a transmit ring.
  845  */
  846 void
  847 txp_tx_reclaim(sc, r, dma)
  848         struct txp_softc *sc;
  849         struct txp_tx_ring *r;
  850         struct txp_dma_alloc *dma;
  851 {
  852         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
  853         u_int32_t idx = TXP_OFFSET2IDX(letoh32(*(r->r_off)));
  854         u_int32_t cons = r->r_cons, cnt = r->r_cnt;
  855         struct txp_tx_desc *txd = r->r_desc + cons;
  856         struct txp_swdesc *sd = sc->sc_txd + cons;
  857         struct mbuf *m;
  858 
  859         while (cons != idx) {
  860                 if (cnt == 0)
  861                         break;
  862 
  863                 bus_dmamap_sync(sc->sc_dmat, dma->dma_map,
  864                     cons * sizeof(struct txp_tx_desc),
  865                     sizeof(struct txp_tx_desc),
  866                     BUS_DMASYNC_POSTWRITE);
  867 
  868                 if ((txd->tx_flags & TX_FLAGS_TYPE_M) ==
  869                     TX_FLAGS_TYPE_DATA) {
  870                         bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0,
  871                             sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
  872                         bus_dmamap_unload(sc->sc_dmat, sd->sd_map);
  873                         m = sd->sd_mbuf;
  874                         if (m != NULL) {
  875                                 m_freem(m);
  876                                 txd->tx_addrlo = 0;
  877                                 txd->tx_addrhi = 0;
  878                                 ifp->if_opackets++;
  879                         }
  880                 }
  881                 ifp->if_flags &= ~IFF_OACTIVE;
  882 
  883                 if (++cons == TX_ENTRIES) {
  884                         txd = r->r_desc;
  885                         cons = 0;
  886                         sd = sc->sc_txd;
  887                 } else {
  888                         txd++;
  889                         sd++;
  890                 }
  891 
  892                 cnt--;
  893         }
  894 
  895         r->r_cons = cons;
  896         r->r_cnt = cnt;
  897         if (cnt == 0)
  898                 ifp->if_timer = 0;
  899 }
  900 
  901 void
  902 txp_shutdown(vsc)
  903         void *vsc;
  904 {
  905         struct txp_softc *sc = (struct txp_softc *)vsc;
  906 
  907         /* mask all interrupts */
  908         WRITE_REG(sc, TXP_IMR,
  909             TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |
  910             TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
  911             TXP_INT_LATCH);
  912 
  913         txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 0);
  914         txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 0);
  915         txp_command(sc, TXP_CMD_HALT, 0, 0, 0, NULL, NULL, NULL, 0);
  916 }
  917 
  918 int
  919 txp_alloc_rings(sc)
  920         struct txp_softc *sc;
  921 {
  922         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
  923         struct txp_boot_record *boot;
  924         struct txp_swdesc *sd;
  925         u_int32_t r;
  926         int i, j;
  927 
  928         /* boot record */
  929         if (txp_dma_malloc(sc, sizeof(struct txp_boot_record), &sc->sc_boot_dma,
  930             BUS_DMA_COHERENT)) {
  931                 printf("can't allocate boot record\n");
  932                 return (-1);
  933         }
  934         boot = (struct txp_boot_record *)sc->sc_boot_dma.dma_vaddr;
  935         bzero(boot, sizeof(*boot));
  936         sc->sc_boot = boot;
  937 
  938         /* host variables */
  939         if (txp_dma_malloc(sc, sizeof(struct txp_hostvar), &sc->sc_host_dma,
  940             BUS_DMA_COHERENT)) {
  941                 printf("can't allocate host ring\n");
  942                 goto bail_boot;
  943         }
  944         bzero(sc->sc_host_dma.dma_vaddr, sizeof(struct txp_hostvar));
  945         boot->br_hostvar_lo = htole32(sc->sc_host_dma.dma_paddr & 0xffffffff);
  946         boot->br_hostvar_hi = htole32(sc->sc_host_dma.dma_paddr >> 32);
  947         sc->sc_hostvar = (struct txp_hostvar *)sc->sc_host_dma.dma_vaddr;
  948 
  949         /* high priority tx ring */
  950         if (txp_dma_malloc(sc, sizeof(struct txp_tx_desc) * TX_ENTRIES,
  951             &sc->sc_txhiring_dma, BUS_DMA_COHERENT)) {
  952                 printf("can't allocate high tx ring\n");
  953                 goto bail_host;
  954         }
  955         bzero(sc->sc_txhiring_dma.dma_vaddr, sizeof(struct txp_tx_desc) * TX_ENTRIES);
  956         boot->br_txhipri_lo = htole32(sc->sc_txhiring_dma.dma_paddr & 0xffffffff);
  957         boot->br_txhipri_hi = htole32(sc->sc_txhiring_dma.dma_paddr >> 32);
  958         boot->br_txhipri_siz = htole32(TX_ENTRIES * sizeof(struct txp_tx_desc));
  959         sc->sc_txhir.r_reg = TXP_H2A_1;
  960         sc->sc_txhir.r_desc = (struct txp_tx_desc *)sc->sc_txhiring_dma.dma_vaddr;
  961         sc->sc_txhir.r_cons = sc->sc_txhir.r_prod = sc->sc_txhir.r_cnt = 0;
  962         sc->sc_txhir.r_off = &sc->sc_hostvar->hv_tx_hi_desc_read_idx;
  963         for (i = 0; i < TX_ENTRIES; i++) {
  964                 if (bus_dmamap_create(sc->sc_dmat, TXP_MAX_PKTLEN,
  965                     TX_ENTRIES - 4, TXP_MAX_SEGLEN, 0,
  966                     BUS_DMA_NOWAIT, &sc->sc_txd[i].sd_map) != 0) {
  967                         for (j = 0; j < i; j++) {
  968                                 bus_dmamap_destroy(sc->sc_dmat,
  969                                     sc->sc_txd[j].sd_map);
  970                                 sc->sc_txd[j].sd_map = NULL;
  971                         }
  972                         goto bail_txhiring;
  973                 }
  974         }
  975 
  976         /* low priority tx ring */
  977         if (txp_dma_malloc(sc, sizeof(struct txp_tx_desc) * TX_ENTRIES,
  978             &sc->sc_txloring_dma, BUS_DMA_COHERENT)) {
  979                 printf("can't allocate low tx ring\n");
  980                 goto bail_txhiring;
  981         }
  982         bzero(sc->sc_txloring_dma.dma_vaddr, sizeof(struct txp_tx_desc) * TX_ENTRIES);
  983         boot->br_txlopri_lo = htole32(sc->sc_txloring_dma.dma_paddr & 0xffffffff);
  984         boot->br_txlopri_hi = htole32(sc->sc_txloring_dma.dma_paddr >> 32);
  985         boot->br_txlopri_siz = htole32(TX_ENTRIES * sizeof(struct txp_tx_desc));
  986         sc->sc_txlor.r_reg = TXP_H2A_3;
  987         sc->sc_txlor.r_desc = (struct txp_tx_desc *)sc->sc_txloring_dma.dma_vaddr;
  988         sc->sc_txlor.r_cons = sc->sc_txlor.r_prod = sc->sc_txlor.r_cnt = 0;
  989         sc->sc_txlor.r_off = &sc->sc_hostvar->hv_tx_lo_desc_read_idx;
  990 
  991         /* high priority rx ring */
  992         if (txp_dma_malloc(sc, sizeof(struct txp_rx_desc) * RX_ENTRIES,
  993             &sc->sc_rxhiring_dma, BUS_DMA_COHERENT)) {
  994                 printf("can't allocate high rx ring\n");
  995                 goto bail_txloring;
  996         }
  997         bzero(sc->sc_rxhiring_dma.dma_vaddr, sizeof(struct txp_rx_desc) * RX_ENTRIES);
  998         boot->br_rxhipri_lo = htole32(sc->sc_rxhiring_dma.dma_paddr & 0xffffffff);
  999         boot->br_rxhipri_hi = htole32(sc->sc_rxhiring_dma.dma_paddr >> 32);
 1000         boot->br_rxhipri_siz = htole32(RX_ENTRIES * sizeof(struct txp_rx_desc));
 1001         sc->sc_rxhir.r_desc =
 1002             (struct txp_rx_desc *)sc->sc_rxhiring_dma.dma_vaddr;
 1003         sc->sc_rxhir.r_roff = &sc->sc_hostvar->hv_rx_hi_read_idx;
 1004         sc->sc_rxhir.r_woff = &sc->sc_hostvar->hv_rx_hi_write_idx;
 1005         bus_dmamap_sync(sc->sc_dmat, sc->sc_rxhiring_dma.dma_map,
 1006             0, sc->sc_rxhiring_dma.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD);
 1007 
 1008         /* low priority ring */
 1009         if (txp_dma_malloc(sc, sizeof(struct txp_rx_desc) * RX_ENTRIES,
 1010             &sc->sc_rxloring_dma, BUS_DMA_COHERENT)) {
 1011                 printf("can't allocate low rx ring\n");
 1012                 goto bail_rxhiring;
 1013         }
 1014         bzero(sc->sc_rxloring_dma.dma_vaddr, sizeof(struct txp_rx_desc) * RX_ENTRIES);
 1015         boot->br_rxlopri_lo = htole32(sc->sc_rxloring_dma.dma_paddr & 0xffffffff);
 1016         boot->br_rxlopri_hi = htole32(sc->sc_rxloring_dma.dma_paddr >> 32);
 1017         boot->br_rxlopri_siz = htole32(RX_ENTRIES * sizeof(struct txp_rx_desc));
 1018         sc->sc_rxlor.r_desc =
 1019             (struct txp_rx_desc *)sc->sc_rxloring_dma.dma_vaddr;
 1020         sc->sc_rxlor.r_roff = &sc->sc_hostvar->hv_rx_lo_read_idx;
 1021         sc->sc_rxlor.r_woff = &sc->sc_hostvar->hv_rx_lo_write_idx;
 1022         bus_dmamap_sync(sc->sc_dmat, sc->sc_rxloring_dma.dma_map,
 1023             0, sc->sc_rxloring_dma.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD);
 1024 
 1025         /* command ring */
 1026         if (txp_dma_malloc(sc, sizeof(struct txp_cmd_desc) * CMD_ENTRIES,
 1027             &sc->sc_cmdring_dma, BUS_DMA_COHERENT)) {
 1028                 printf("can't allocate command ring\n");
 1029                 goto bail_rxloring;
 1030         }
 1031         bzero(sc->sc_cmdring_dma.dma_vaddr, sizeof(struct txp_cmd_desc) * CMD_ENTRIES);
 1032         boot->br_cmd_lo = htole32(sc->sc_cmdring_dma.dma_paddr & 0xffffffff);
 1033         boot->br_cmd_hi = htole32(sc->sc_cmdring_dma.dma_paddr >> 32);
 1034         boot->br_cmd_siz = htole32(CMD_ENTRIES * sizeof(struct txp_cmd_desc));
 1035         sc->sc_cmdring.base = (struct txp_cmd_desc *)sc->sc_cmdring_dma.dma_vaddr;
 1036         sc->sc_cmdring.size = CMD_ENTRIES * sizeof(struct txp_cmd_desc);
 1037         sc->sc_cmdring.lastwrite = 0;
 1038 
 1039         /* response ring */
 1040         if (txp_dma_malloc(sc, sizeof(struct txp_rsp_desc) * RSP_ENTRIES,
 1041             &sc->sc_rspring_dma, BUS_DMA_COHERENT)) {
 1042                 printf("can't allocate response ring\n");
 1043                 goto bail_cmdring;
 1044         }
 1045         bzero(sc->sc_rspring_dma.dma_vaddr, sizeof(struct txp_rsp_desc) * RSP_ENTRIES);
 1046         boot->br_resp_lo = htole32(sc->sc_rspring_dma.dma_paddr & 0xffffffff);
 1047         boot->br_resp_hi = htole32(sc->sc_rspring_dma.dma_paddr >> 32);
 1048         boot->br_resp_siz = htole32(CMD_ENTRIES * sizeof(struct txp_rsp_desc));
 1049         sc->sc_rspring.base = (struct txp_rsp_desc *)sc->sc_rspring_dma.dma_vaddr;
 1050         sc->sc_rspring.size = RSP_ENTRIES * sizeof(struct txp_rsp_desc);
 1051         sc->sc_rspring.lastwrite = 0;
 1052 
 1053         /* receive buffer ring */
 1054         if (txp_dma_malloc(sc, sizeof(struct txp_rxbuf_desc) * RXBUF_ENTRIES,
 1055             &sc->sc_rxbufring_dma, BUS_DMA_COHERENT)) {
 1056                 printf("can't allocate rx buffer ring\n");
 1057                 goto bail_rspring;
 1058         }
 1059         bzero(sc->sc_rxbufring_dma.dma_vaddr, sizeof(struct txp_rxbuf_desc) * RXBUF_ENTRIES);
 1060         boot->br_rxbuf_lo = htole32(sc->sc_rxbufring_dma.dma_paddr & 0xffffffff);
 1061         boot->br_rxbuf_hi = htole32(sc->sc_rxbufring_dma.dma_paddr >> 32);
 1062         boot->br_rxbuf_siz = htole32(RXBUF_ENTRIES * sizeof(struct txp_rxbuf_desc));
 1063         sc->sc_rxbufs = (struct txp_rxbuf_desc *)sc->sc_rxbufring_dma.dma_vaddr;
 1064         for (i = 0; i < RXBUF_ENTRIES; i++) {
 1065                 sd = (struct txp_swdesc *)malloc(sizeof(struct txp_swdesc),
 1066                     M_DEVBUF, M_NOWAIT);
 1067 
 1068                 /* stash away pointer */
 1069                 bcopy(&sd, (u_long *)&sc->sc_rxbufs[i].rb_vaddrlo, sizeof(sd));
 1070 
 1071                 if (sd == NULL)
 1072                         break;
 1073 
 1074                 MGETHDR(sd->sd_mbuf, M_DONTWAIT, MT_DATA);
 1075                 if (sd->sd_mbuf == NULL) {
 1076                         goto bail_rxbufring;
 1077                 }
 1078 
 1079                 MCLGET(sd->sd_mbuf, M_DONTWAIT);
 1080                 if ((sd->sd_mbuf->m_flags & M_EXT) == 0) {
 1081                         goto bail_rxbufring;
 1082                 }
 1083                 /* reserve some space for a possible VLAN header */
 1084                 sd->sd_mbuf->m_data += 8;
 1085                 sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES - 8;
 1086                 sd->sd_mbuf->m_pkthdr.rcvif = ifp;
 1087                 if (bus_dmamap_create(sc->sc_dmat, TXP_MAX_PKTLEN, 1,
 1088                     TXP_MAX_PKTLEN, 0, BUS_DMA_NOWAIT, &sd->sd_map)) {
 1089                         goto bail_rxbufring;
 1090                 }
 1091                 if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, sd->sd_mbuf,
 1092                     BUS_DMA_NOWAIT)) {
 1093                         bus_dmamap_destroy(sc->sc_dmat, sd->sd_map);
 1094                         goto bail_rxbufring;
 1095                 }
 1096                 bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0,
 1097                     sd->sd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
 1098 
 1099                 sc->sc_rxbufs[i].rb_paddrlo =
 1100                     ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr) & 0xffffffff;
 1101                 sc->sc_rxbufs[i].rb_paddrhi =
 1102                     ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr) >> 32;
 1103         }
 1104         bus_dmamap_sync(sc->sc_dmat, sc->sc_rxbufring_dma.dma_map,
 1105             0, sc->sc_rxbufring_dma.dma_map->dm_mapsize,
 1106             BUS_DMASYNC_PREWRITE);
 1107         sc->sc_hostvar->hv_rx_buf_write_idx = htole32((RXBUF_ENTRIES - 1) *
 1108             sizeof(struct txp_rxbuf_desc));
 1109 
 1110         /* zero dma */
 1111         if (txp_dma_malloc(sc, sizeof(u_int32_t), &sc->sc_zero_dma,
 1112             BUS_DMA_COHERENT)) {
 1113                 printf("can't allocate response ring\n");
 1114                 goto bail_rxbufring;
 1115         }
 1116         bzero(sc->sc_zero_dma.dma_vaddr, sizeof(u_int32_t));
 1117         boot->br_zero_lo = htole32(sc->sc_zero_dma.dma_paddr & 0xffffffff);
 1118         boot->br_zero_hi = htole32(sc->sc_zero_dma.dma_paddr >> 32);
 1119 
 1120         /* See if it's waiting for boot, and try to boot it */
 1121         for (i = 0; i < 10000; i++) {
 1122                 r = READ_REG(sc, TXP_A2H_0);
 1123                 if (r == STAT_WAITING_FOR_BOOT)
 1124                         break;
 1125                 DELAY(50);
 1126         }
 1127         if (r != STAT_WAITING_FOR_BOOT) {
 1128                 printf("not waiting for boot\n");
 1129                 goto bail;
 1130         }
 1131         WRITE_REG(sc, TXP_H2A_2, sc->sc_boot_dma.dma_paddr >> 32);
 1132         WRITE_REG(sc, TXP_H2A_1, sc->sc_boot_dma.dma_paddr & 0xffffffff);
 1133         WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_REGISTER_BOOT_RECORD);
 1134 
 1135         /* See if it booted */
 1136         for (i = 0; i < 10000; i++) {
 1137                 r = READ_REG(sc, TXP_A2H_0);
 1138                 if (r == STAT_RUNNING)
 1139                         break;
 1140                 DELAY(50);
 1141         }
 1142         if (r != STAT_RUNNING) {
 1143                 printf("fw not running\n");
 1144                 goto bail;
 1145         }
 1146 
 1147         /* Clear TX and CMD ring write registers */
 1148         WRITE_REG(sc, TXP_H2A_1, TXP_BOOTCMD_NULL);
 1149         WRITE_REG(sc, TXP_H2A_2, TXP_BOOTCMD_NULL);
 1150         WRITE_REG(sc, TXP_H2A_3, TXP_BOOTCMD_NULL);
 1151         WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_NULL);
 1152 
 1153         return (0);
 1154 
 1155 bail:
 1156         txp_dma_free(sc, &sc->sc_zero_dma);
 1157 bail_rxbufring:
 1158         for (i = 0; i < RXBUF_ENTRIES; i++) {
 1159                 bcopy((u_long *)&sc->sc_rxbufs[i].rb_vaddrlo, &sd, sizeof(sd));
 1160                 if (sd)
 1161                         free(sd, M_DEVBUF);
 1162         }
 1163         txp_dma_free(sc, &sc->sc_rxbufring_dma);
 1164 bail_rspring:
 1165         txp_dma_free(sc, &sc->sc_rspring_dma);
 1166 bail_cmdring:
 1167         txp_dma_free(sc, &sc->sc_cmdring_dma);
 1168 bail_rxloring:
 1169         txp_dma_free(sc, &sc->sc_rxloring_dma);
 1170 bail_rxhiring:
 1171         txp_dma_free(sc, &sc->sc_rxhiring_dma);
 1172 bail_txloring:
 1173         txp_dma_free(sc, &sc->sc_txloring_dma);
 1174 bail_txhiring:
 1175         txp_dma_free(sc, &sc->sc_txhiring_dma);
 1176 bail_host:
 1177         txp_dma_free(sc, &sc->sc_host_dma);
 1178 bail_boot:
 1179         txp_dma_free(sc, &sc->sc_boot_dma);
 1180         return (-1);
 1181 }
 1182 
 1183 int
 1184 txp_dma_malloc(sc, size, dma, mapflags)
 1185         struct txp_softc *sc;
 1186         bus_size_t size;
 1187         struct txp_dma_alloc *dma;
 1188         int mapflags;
 1189 {
 1190         int r;
 1191 
 1192         if ((r = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0,
 1193             &dma->dma_seg, 1, &dma->dma_nseg, 0)) != 0)
 1194                 goto fail_0;
 1195 
 1196         if ((r = bus_dmamem_map(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg,
 1197             size, &dma->dma_vaddr, mapflags | BUS_DMA_NOWAIT)) != 0)
 1198                 goto fail_1;
 1199 
 1200         if ((r = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
 1201             BUS_DMA_NOWAIT, &dma->dma_map)) != 0)
 1202                 goto fail_2;
 1203 
 1204         if ((r = bus_dmamap_load(sc->sc_dmat, dma->dma_map, dma->dma_vaddr,
 1205             size, NULL, BUS_DMA_NOWAIT)) != 0)
 1206                 goto fail_3;
 1207 
 1208         dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
 1209         return (0);
 1210 
 1211 fail_3:
 1212         bus_dmamap_destroy(sc->sc_dmat, dma->dma_map);
 1213 fail_2:
 1214         bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, size);
 1215 fail_1:
 1216         bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg);
 1217 fail_0:
 1218         return (r);
 1219 }
 1220 
 1221 void
 1222 txp_dma_free(sc, dma)
 1223         struct txp_softc *sc;
 1224         struct txp_dma_alloc *dma;
 1225 {
 1226         bus_dmamap_unload(sc->sc_dmat, dma->dma_map);
 1227         bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, dma->dma_map->dm_mapsize);
 1228         bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg);
 1229         bus_dmamap_destroy(sc->sc_dmat, dma->dma_map);
 1230 }
 1231 
 1232 int
 1233 txp_ioctl(ifp, command, data)
 1234         struct ifnet *ifp;
 1235         u_long command;
 1236         caddr_t data;
 1237 {
 1238         struct txp_softc *sc = ifp->if_softc;
 1239         struct ifreq *ifr = (struct ifreq *)data;
 1240         struct ifaddr *ifa = (struct ifaddr *)data;
 1241         int s, error = 0;
 1242 
 1243         s = splnet();
 1244 
 1245         if ((error = ether_ioctl(ifp, &sc->sc_arpcom, command, data)) > 0) {
 1246                 splx(s);
 1247                 return error;
 1248         }
 1249 
 1250         switch(command) {
 1251         case SIOCSIFADDR:
 1252                 ifp->if_flags |= IFF_UP;
 1253                 switch (ifa->ifa_addr->sa_family) {
 1254 #ifdef INET
 1255                 case AF_INET:
 1256                         txp_init(sc);
 1257                         arp_ifinit(&sc->sc_arpcom, ifa);
 1258                         break;
 1259 #endif /* INET */
 1260                 default:
 1261                         txp_init(sc);
 1262                         break;
 1263                 }
 1264                 break;
 1265         case SIOCSIFFLAGS:
 1266                 if (ifp->if_flags & IFF_UP) {
 1267                         txp_init(sc);
 1268                 } else {
 1269                         if (ifp->if_flags & IFF_RUNNING)
 1270                                 txp_stop(sc);
 1271                 }
 1272                 break;
 1273         case SIOCADDMULTI:
 1274         case SIOCDELMULTI:
 1275                 error = (command == SIOCADDMULTI) ?
 1276                     ether_addmulti(ifr, &sc->sc_arpcom) :
 1277                     ether_delmulti(ifr, &sc->sc_arpcom);
 1278 
 1279                 if (error == ENETRESET) {
 1280                         /*
 1281                          * Multicast list has changed; set the hardware
 1282                          * filter accordingly.
 1283                          */
 1284                         if (ifp->if_flags & IFF_RUNNING)
 1285                                 txp_set_filter(sc);
 1286                         error = 0;
 1287                 }
 1288                 break;
 1289         case SIOCGIFMEDIA:
 1290         case SIOCSIFMEDIA:
 1291                 error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, command);
 1292                 break;
 1293         default:
 1294                 error = ENOTTY;
 1295                 break;
 1296         }
 1297 
 1298         splx(s);
 1299 
 1300         return(error);
 1301 }
 1302 
 1303 void
 1304 txp_init(sc)
 1305         struct txp_softc *sc;
 1306 {
 1307         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
 1308         int s;
 1309 
 1310         txp_stop(sc);
 1311 
 1312         s = splnet();
 1313 
 1314         txp_set_filter(sc);
 1315 
 1316         txp_command(sc, TXP_CMD_TX_ENABLE, 0, 0, 0, NULL, NULL, NULL, 1);
 1317         txp_command(sc, TXP_CMD_RX_ENABLE, 0, 0, 0, NULL, NULL, NULL, 1);
 1318 
 1319         WRITE_REG(sc, TXP_IER, TXP_INT_RESERVED | TXP_INT_SELF |
 1320             TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 |
 1321             TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0 |
 1322             TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
 1323             TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |  TXP_INT_LATCH);
 1324         WRITE_REG(sc, TXP_IMR, TXP_INT_A2H_3);
 1325 
 1326         ifp->if_flags |= IFF_RUNNING;
 1327         ifp->if_flags &= ~IFF_OACTIVE;
 1328         ifp->if_timer = 0;
 1329 
 1330         if (!timeout_pending(&sc->sc_tick))
 1331                 timeout_add(&sc->sc_tick, hz);
 1332 
 1333         splx(s);
 1334 }
 1335 
 1336 void
 1337 txp_tick(vsc)
 1338         void *vsc;
 1339 {
 1340         struct txp_softc *sc = vsc;
 1341         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
 1342         struct txp_rsp_desc *rsp = NULL;
 1343         struct txp_ext_desc *ext;
 1344         int s;
 1345 
 1346         s = splnet();
 1347         txp_rxbuf_reclaim(sc);
 1348 
 1349         if (txp_command2(sc, TXP_CMD_READ_STATISTICS, 0, 0, 0, NULL, 0,
 1350             &rsp, 1))
 1351                 goto out;
 1352         if (rsp->rsp_numdesc != 6)
 1353                 goto out;
 1354         if (txp_command(sc, TXP_CMD_CLEAR_STATISTICS, 0, 0, 0,
 1355             NULL, NULL, NULL, 1))
 1356                 goto out;
 1357         ext = (struct txp_ext_desc *)(rsp + 1);
 1358 
 1359         ifp->if_ierrors += ext[3].ext_2 + ext[3].ext_3 + ext[3].ext_4 +
 1360             ext[4].ext_1 + ext[4].ext_4;
 1361         ifp->if_oerrors += ext[0].ext_1 + ext[1].ext_1 + ext[1].ext_4 +
 1362             ext[2].ext_1;
 1363         ifp->if_collisions += ext[0].ext_2 + ext[0].ext_3 + ext[1].ext_2 +
 1364             ext[1].ext_3;
 1365         ifp->if_opackets += rsp->rsp_par2;
 1366         ifp->if_ipackets += ext[2].ext_3;
 1367 
 1368 out:
 1369         if (rsp != NULL)
 1370                 free(rsp, M_DEVBUF);
 1371 
 1372         splx(s);
 1373         timeout_add(&sc->sc_tick, hz);
 1374 }
 1375 
 1376 void
 1377 txp_start(ifp)
 1378         struct ifnet *ifp;
 1379 {
 1380         struct txp_softc *sc = ifp->if_softc;
 1381         struct txp_tx_ring *r = &sc->sc_txhir;
 1382         struct txp_tx_desc *txd;
 1383         int txdidx;
 1384         struct txp_frag_desc *fxd;
 1385         struct mbuf *m, *mnew;
 1386         struct txp_swdesc *sd;
 1387         u_int32_t firstprod, firstcnt, prod, cnt, i;
 1388 #if NVLAN > 0
 1389         struct ifvlan           *ifv;
 1390 #endif
 1391 
 1392         if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
 1393                 return;
 1394 
 1395         prod = r->r_prod;
 1396         cnt = r->r_cnt;
 1397 
 1398         while (1) {
 1399                 IFQ_POLL(&ifp->if_snd, m);
 1400                 if (m == NULL)
 1401                         break;
 1402                 mnew = NULL;
 1403 
 1404                 firstprod = prod;
 1405                 firstcnt = cnt;
 1406 
 1407                 sd = sc->sc_txd + prod;
 1408                 sd->sd_mbuf = m;
 1409 
 1410                 if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, m,
 1411                     BUS_DMA_NOWAIT)) {
 1412                         MGETHDR(mnew, M_DONTWAIT, MT_DATA);
 1413                         if (mnew == NULL)
 1414                                 goto oactive1;
 1415                         if (m->m_pkthdr.len > MHLEN) {
 1416                                 MCLGET(mnew, M_DONTWAIT);
 1417                                 if ((mnew->m_flags & M_EXT) == 0) {
 1418                                         m_freem(mnew);
 1419                                         goto oactive1;
 1420                                 }
 1421                         }
 1422                         m_copydata(m, 0, m->m_pkthdr.len, mtod(mnew, caddr_t));
 1423                         mnew->m_pkthdr.len = mnew->m_len = m->m_pkthdr.len;
 1424                         IFQ_DEQUEUE(&ifp->if_snd, m);
 1425                         m_freem(m);
 1426                         m = mnew;
 1427                         if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, m,
 1428                             BUS_DMA_NOWAIT))
 1429                                 goto oactive1;
 1430                 }
 1431 
 1432                 if ((TX_ENTRIES - cnt) < 4)
 1433                         goto oactive;
 1434 
 1435                 txd = r->r_desc + prod;
 1436                 txdidx = prod;
 1437                 txd->tx_flags = TX_FLAGS_TYPE_DATA;
 1438                 txd->tx_numdesc = 0;
 1439                 txd->tx_addrlo = 0;
 1440                 txd->tx_addrhi = 0;
 1441                 txd->tx_totlen = m->m_pkthdr.len;
 1442                 txd->tx_pflags = 0;
 1443                 txd->tx_numdesc = sd->sd_map->dm_nsegs;
 1444 
 1445                 if (++prod == TX_ENTRIES)
 1446                         prod = 0;
 1447 
 1448                 if (++cnt >= (TX_ENTRIES - 4))
 1449                         goto oactive;
 1450 
 1451 #if NVLAN > 0
 1452                 if ((m->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
 1453                     m->m_pkthdr.rcvif != NULL) {
 1454                         ifv = m->m_pkthdr.rcvif->if_softc;
 1455                         txd->tx_pflags = TX_PFLAGS_VLAN |
 1456                             (htons(ifv->ifv_tag) << TX_PFLAGS_VLANTAG_S);
 1457                 }
 1458 #endif
 1459 
 1460                 if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
 1461                         txd->tx_pflags |= TX_PFLAGS_IPCKSUM;
 1462 #ifdef TRY_TX_TCP_CSUM
 1463                 if (m->m_pkthdr.csum_flags & M_TCPV4_CSUM_OUT)
 1464                         txd->tx_pflags |= TX_PFLAGS_TCPCKSUM;
 1465 #endif
 1466 #ifdef TRY_TX_UDP_CSUM
 1467                 if (m->m_pkthdr.csum_flags & M_UDPV4_CSUM_OUT)
 1468                         txd->tx_pflags |= TX_PFLAGS_UDPCKSUM;
 1469 #endif
 1470 
 1471                 bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0,
 1472                     sd->sd_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
 1473 
 1474                 fxd = (struct txp_frag_desc *)(r->r_desc + prod);
 1475                 for (i = 0; i < sd->sd_map->dm_nsegs; i++) {
 1476                         if (++cnt >= (TX_ENTRIES - 4)) {
 1477                                 bus_dmamap_sync(sc->sc_dmat, sd->sd_map,
 1478                                     0, sd->sd_map->dm_mapsize,
 1479                                     BUS_DMASYNC_POSTWRITE);
 1480                                 goto oactive;
 1481                         }
 1482 
 1483                         fxd->frag_flags = FRAG_FLAGS_TYPE_FRAG |
 1484                             FRAG_FLAGS_VALID;
 1485                         fxd->frag_rsvd1 = 0;
 1486                         fxd->frag_len = sd->sd_map->dm_segs[i].ds_len;
 1487                         fxd->frag_addrlo =
 1488                             ((u_int64_t)sd->sd_map->dm_segs[i].ds_addr) &
 1489                             0xffffffff;
 1490                         fxd->frag_addrhi =
 1491                             ((u_int64_t)sd->sd_map->dm_segs[i].ds_addr) >>
 1492                             32;
 1493                         fxd->frag_rsvd2 = 0;
 1494 
 1495                         bus_dmamap_sync(sc->sc_dmat,
 1496                             sc->sc_txhiring_dma.dma_map,
 1497                             prod * sizeof(struct txp_frag_desc),
 1498                             sizeof(struct txp_frag_desc), BUS_DMASYNC_PREWRITE);
 1499 
 1500                         if (++prod == TX_ENTRIES) {
 1501                                 fxd = (struct txp_frag_desc *)r->r_desc;
 1502                                 prod = 0;
 1503                         } else
 1504                                 fxd++;
 1505 
 1506                 }
 1507 
 1508                 /*
 1509                  * if mnew isn't NULL, we already dequeued and copied
 1510                  * the packet.
 1511                  */
 1512                 if (mnew == NULL)
 1513                         IFQ_DEQUEUE(&ifp->if_snd, m);
 1514 
 1515                 ifp->if_timer = 5;
 1516 
 1517 #if NBPFILTER > 0
 1518                 if (ifp->if_bpf)
 1519                         bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
 1520 #endif
 1521 
 1522                 txd->tx_flags |= TX_FLAGS_VALID;
 1523                 bus_dmamap_sync(sc->sc_dmat, sc->sc_txhiring_dma.dma_map,
 1524                     txdidx * sizeof(struct txp_tx_desc),
 1525                     sizeof(struct txp_tx_desc), BUS_DMASYNC_PREWRITE);
 1526 
 1527 #if 0
 1528                 {
 1529                         struct mbuf *mx;
 1530                         int i;
 1531 
 1532                         printf("txd: flags 0x%x ndesc %d totlen %d pflags 0x%x\n",
 1533                             txd->tx_flags, txd->tx_numdesc, txd->tx_totlen,
 1534                             txd->tx_pflags);
 1535                         for (mx = m; mx != NULL; mx = mx->m_next) {
 1536                                 for (i = 0; i < mx->m_len; i++) {
 1537                                         printf(":%02x",
 1538                                             (u_int8_t)m->m_data[i]);
 1539                                 }
 1540                         }
 1541                         printf("\n");
 1542                 }
 1543 #endif
 1544 
 1545                 WRITE_REG(sc, r->r_reg, TXP_IDX2OFFSET(prod));
 1546         }
 1547 
 1548         r->r_prod = prod;
 1549         r->r_cnt = cnt;
 1550         return;
 1551 
 1552 oactive:
 1553         bus_dmamap_unload(sc->sc_dmat, sd->sd_map);
 1554 oactive1:
 1555         ifp->if_flags |= IFF_OACTIVE;
 1556         r->r_prod = firstprod;
 1557         r->r_cnt = firstcnt;
 1558 }
 1559 
 1560 /*
 1561  * Handle simple commands sent to the typhoon
 1562  */
 1563 int
 1564 txp_command(sc, id, in1, in2, in3, out1, out2, out3, wait)
 1565         struct txp_softc *sc;
 1566         u_int16_t id, in1, *out1;
 1567         u_int32_t in2, in3, *out2, *out3;
 1568         int wait;
 1569 {
 1570         struct txp_rsp_desc *rsp = NULL;
 1571 
 1572         if (txp_command2(sc, id, in1, in2, in3, NULL, 0, &rsp, wait))
 1573                 return (-1);
 1574 
 1575         if (!wait)
 1576                 return (0);
 1577 
 1578         if (out1 != NULL)
 1579                 *out1 = letoh16(rsp->rsp_par1);
 1580         if (out2 != NULL)
 1581                 *out2 = letoh32(rsp->rsp_par2);
 1582         if (out3 != NULL)
 1583                 *out3 = letoh32(rsp->rsp_par3);
 1584         free(rsp, M_DEVBUF);
 1585         return (0);
 1586 }
 1587 
 1588 int
 1589 txp_command2(sc, id, in1, in2, in3, in_extp, in_extn, rspp, wait)
 1590         struct txp_softc *sc;
 1591         u_int16_t id, in1;
 1592         u_int32_t in2, in3;
 1593         struct txp_ext_desc *in_extp;
 1594         u_int8_t in_extn;
 1595         struct txp_rsp_desc **rspp;
 1596         int wait;
 1597 {
 1598         struct txp_hostvar *hv = sc->sc_hostvar;
 1599         struct txp_cmd_desc *cmd;
 1600         struct txp_ext_desc *ext;
 1601         u_int32_t idx, i;
 1602         u_int16_t seq;
 1603 
 1604         if (txp_cmd_desc_numfree(sc) < (in_extn + 1)) {
 1605                 printf("%s: no free cmd descriptors\n", TXP_DEVNAME(sc));
 1606                 return (-1);
 1607         }
 1608 
 1609         idx = sc->sc_cmdring.lastwrite;
 1610         cmd = (struct txp_cmd_desc *)(((u_int8_t *)sc->sc_cmdring.base) + idx);
 1611         bzero(cmd, sizeof(*cmd));
 1612 
 1613         cmd->cmd_numdesc = in_extn;
 1614         seq = sc->sc_seq++;
 1615         cmd->cmd_seq = htole16(seq);
 1616         cmd->cmd_id = htole16(id);
 1617         cmd->cmd_par1 = htole16(in1);
 1618         cmd->cmd_par2 = htole32(in2);
 1619         cmd->cmd_par3 = htole32(in3);
 1620         cmd->cmd_flags = CMD_FLAGS_TYPE_CMD |
 1621             (wait ? CMD_FLAGS_RESP : 0) | CMD_FLAGS_VALID;
 1622 
 1623         idx += sizeof(struct txp_cmd_desc);
 1624         if (idx == sc->sc_cmdring.size)
 1625                 idx = 0;
 1626 
 1627         for (i = 0; i < in_extn; i++) {
 1628                 ext = (struct txp_ext_desc *)(((u_int8_t *)sc->sc_cmdring.base) + idx);
 1629                 bcopy(in_extp, ext, sizeof(struct txp_ext_desc));
 1630                 in_extp++;
 1631                 idx += sizeof(struct txp_cmd_desc);
 1632                 if (idx == sc->sc_cmdring.size)
 1633                         idx = 0;
 1634         }
 1635 
 1636         sc->sc_cmdring.lastwrite = idx;
 1637 
 1638         WRITE_REG(sc, TXP_H2A_2, sc->sc_cmdring.lastwrite);
 1639         bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0,
 1640             sizeof(struct txp_hostvar), BUS_DMASYNC_PREREAD);
 1641 
 1642         if (!wait)
 1643                 return (0);
 1644 
 1645         for (i = 0; i < 10000; i++) {
 1646                 bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0,
 1647                     sizeof(struct txp_hostvar), BUS_DMASYNC_POSTREAD);
 1648                 idx = letoh32(hv->hv_resp_read_idx);
 1649                 if (idx != letoh32(hv->hv_resp_write_idx)) {
 1650                         *rspp = NULL;
 1651                         if (txp_response(sc, idx, id, seq, rspp))
 1652                                 return (-1);
 1653                         if (*rspp != NULL)
 1654                                 break;
 1655                 }
 1656                 bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0,
 1657                     sizeof(struct txp_hostvar), BUS_DMASYNC_PREREAD);
 1658                 DELAY(50);
 1659         }
 1660         if (i == 1000 || (*rspp) == NULL) {
 1661                 printf("%s: 0x%x command failed\n", TXP_DEVNAME(sc), id);
 1662                 return (-1);
 1663         }
 1664 
 1665         return (0);
 1666 }
 1667 
 1668 int
 1669 txp_response(sc, ridx, id, seq, rspp)
 1670         struct txp_softc *sc;
 1671         u_int32_t ridx;
 1672         u_int16_t id;
 1673         u_int16_t seq;
 1674         struct txp_rsp_desc **rspp;
 1675 {
 1676         struct txp_hostvar *hv = sc->sc_hostvar;
 1677         struct txp_rsp_desc *rsp;
 1678 
 1679         while (ridx != letoh32(hv->hv_resp_write_idx)) {
 1680                 rsp = (struct txp_rsp_desc *)(((u_int8_t *)sc->sc_rspring.base) + ridx);
 1681 
 1682                 if (id == letoh16(rsp->rsp_id) && letoh16(rsp->rsp_seq) == seq) {
 1683                         *rspp = (struct txp_rsp_desc *)malloc(
 1684                             sizeof(struct txp_rsp_desc) * (rsp->rsp_numdesc + 1),
 1685                             M_DEVBUF, M_NOWAIT);
 1686                         if ((*rspp) == NULL)
 1687                                 return (-1);
 1688                         txp_rsp_fixup(sc, rsp, *rspp);
 1689                         return (0);
 1690                 }
 1691 
 1692                 if (rsp->rsp_flags & RSP_FLAGS_ERROR) {
 1693                         printf("%s: response error: id 0x%x\n",
 1694                             TXP_DEVNAME(sc), letoh16(rsp->rsp_id));
 1695                         txp_rsp_fixup(sc, rsp, NULL);
 1696                         ridx = letoh32(hv->hv_resp_read_idx);
 1697                         continue;
 1698                 }
 1699 
 1700                 switch (letoh16(rsp->rsp_id)) {
 1701                 case TXP_CMD_CYCLE_STATISTICS:
 1702                 case TXP_CMD_MEDIA_STATUS_READ:
 1703                         break;
 1704                 case TXP_CMD_HELLO_RESPONSE:
 1705                         printf("%s: hello\n", TXP_DEVNAME(sc));
 1706                         break;
 1707                 default:
 1708                         printf("%s: unknown id(0x%x)\n", TXP_DEVNAME(sc),
 1709                             letoh16(rsp->rsp_id));
 1710                 }
 1711 
 1712                 txp_rsp_fixup(sc, rsp, NULL);
 1713                 ridx = letoh32(hv->hv_resp_read_idx);
 1714                 hv->hv_resp_read_idx = letoh32(ridx);
 1715         }
 1716 
 1717         return (0);
 1718 }
 1719 
 1720 void
 1721 txp_rsp_fixup(sc, rsp, dst)
 1722         struct txp_softc *sc;
 1723         struct txp_rsp_desc *rsp, *dst;
 1724 {
 1725         struct txp_rsp_desc *src = rsp;
 1726         struct txp_hostvar *hv = sc->sc_hostvar;
 1727         u_int32_t i, ridx;
 1728 
 1729         ridx = letoh32(hv->hv_resp_read_idx);
 1730 
 1731         for (i = 0; i < rsp->rsp_numdesc + 1; i++) {
 1732                 if (dst != NULL)
 1733                         bcopy(src, dst++, sizeof(struct txp_rsp_desc));
 1734                 ridx += sizeof(struct txp_rsp_desc);
 1735                 if (ridx == sc->sc_rspring.size) {
 1736                         src = sc->sc_rspring.base;
 1737                         ridx = 0;
 1738                 } else
 1739                         src++;
 1740                 sc->sc_rspring.lastwrite = ridx;
 1741                 hv->hv_resp_read_idx = htole32(ridx);
 1742         }
 1743         
 1744         hv->hv_resp_read_idx = htole32(ridx);
 1745 }
 1746 
 1747 int
 1748 txp_cmd_desc_numfree(sc)
 1749         struct txp_softc *sc;
 1750 {
 1751         struct txp_hostvar *hv = sc->sc_hostvar;
 1752         struct txp_boot_record *br = sc->sc_boot;
 1753         u_int32_t widx, ridx, nfree;
 1754 
 1755         widx = sc->sc_cmdring.lastwrite;
 1756         ridx = letoh32(hv->hv_cmd_read_idx);
 1757 
 1758         if (widx == ridx) {
 1759                 /* Ring is completely free */
 1760                 nfree = letoh32(br->br_cmd_siz) - sizeof(struct txp_cmd_desc);
 1761         } else {
 1762                 if (widx > ridx)
 1763                         nfree = letoh32(br->br_cmd_siz) -
 1764                             (widx - ridx + sizeof(struct txp_cmd_desc));
 1765                 else
 1766                         nfree = ridx - widx - sizeof(struct txp_cmd_desc);
 1767         }
 1768 
 1769         return (nfree / sizeof(struct txp_cmd_desc));
 1770 }
 1771 
 1772 void
 1773 txp_stop(sc)
 1774         struct txp_softc *sc;
 1775 {
 1776         txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 1);
 1777         txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 1);
 1778 
 1779         if (timeout_pending(&sc->sc_tick))
 1780                 timeout_del(&sc->sc_tick);
 1781 }
 1782 
 1783 void
 1784 txp_watchdog(ifp)
 1785         struct ifnet *ifp;
 1786 {
 1787 }
 1788 
 1789 int
 1790 txp_ifmedia_upd(ifp)
 1791         struct ifnet *ifp;
 1792 {
 1793         struct txp_softc *sc = ifp->if_softc;
 1794         struct ifmedia *ifm = &sc->sc_ifmedia;
 1795         u_int16_t new_xcvr;
 1796 
 1797         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
 1798                 return (EINVAL);
 1799 
 1800         if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) {
 1801                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
 1802                         new_xcvr = TXP_XCVR_10_FDX;
 1803                 else
 1804                         new_xcvr = TXP_XCVR_10_HDX;
 1805         } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) {
 1806                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
 1807                         new_xcvr = TXP_XCVR_100_FDX;
 1808                 else
 1809                         new_xcvr = TXP_XCVR_100_HDX;
 1810         } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
 1811                 new_xcvr = TXP_XCVR_AUTO;
 1812         } else
 1813                 return (EINVAL);
 1814 
 1815         /* nothing to do */
 1816         if (sc->sc_xcvr == new_xcvr)
 1817                 return (0);
 1818 
 1819         txp_command(sc, TXP_CMD_XCVR_SELECT, new_xcvr, 0, 0,
 1820             NULL, NULL, NULL, 0);
 1821         sc->sc_xcvr = new_xcvr;
 1822 
 1823         return (0);
 1824 }
 1825 
 1826 void
 1827 txp_ifmedia_sts(ifp, ifmr)
 1828         struct ifnet *ifp;
 1829         struct ifmediareq *ifmr;
 1830 {
 1831         struct txp_softc *sc = ifp->if_softc;
 1832         struct ifmedia *ifm = &sc->sc_ifmedia;
 1833         u_int16_t bmsr, bmcr, anlpar;
 1834 
 1835         ifmr->ifm_status = IFM_AVALID;
 1836         ifmr->ifm_active = IFM_ETHER;
 1837 
 1838         if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0,
 1839             &bmsr, NULL, NULL, 1))
 1840                 goto bail;
 1841         if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0,
 1842             &bmsr, NULL, NULL, 1))
 1843                 goto bail;
 1844 
 1845         if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMCR, 0,
 1846             &bmcr, NULL, NULL, 1))
 1847                 goto bail;
 1848 
 1849         if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_ANLPAR, 0,
 1850             &anlpar, NULL, NULL, 1))
 1851                 goto bail;
 1852 
 1853         if (bmsr & BMSR_LINK)
 1854                 ifmr->ifm_status |= IFM_ACTIVE;
 1855 
 1856         if (bmcr & BMCR_ISO) {
 1857                 ifmr->ifm_active |= IFM_NONE;
 1858                 ifmr->ifm_status = 0;
 1859                 return;
 1860         }
 1861 
 1862         if (bmcr & BMCR_LOOP)
 1863                 ifmr->ifm_active |= IFM_LOOP;
 1864 
 1865         if (bmcr & BMCR_AUTOEN) {
 1866                 if ((bmsr & BMSR_ACOMP) == 0) {
 1867                         ifmr->ifm_active |= IFM_NONE;
 1868                         return;
 1869                 }
 1870 
 1871                 if (anlpar & ANLPAR_T4)
 1872                         ifmr->ifm_active |= IFM_100_T4;
 1873                 else if (anlpar & ANLPAR_TX_FD)
 1874                         ifmr->ifm_active |= IFM_100_TX|IFM_FDX;
 1875                 else if (anlpar & ANLPAR_TX)
 1876                         ifmr->ifm_active |= IFM_100_TX;
 1877                 else if (anlpar & ANLPAR_10_FD)
 1878                         ifmr->ifm_active |= IFM_10_T|IFM_FDX;
 1879                 else if (anlpar & ANLPAR_10)
 1880                         ifmr->ifm_active |= IFM_10_T;
 1881                 else
 1882                         ifmr->ifm_active |= IFM_NONE;
 1883         } else
 1884                 ifmr->ifm_active = ifm->ifm_cur->ifm_media;
 1885         return;
 1886 
 1887 bail:
 1888         ifmr->ifm_active |= IFM_NONE;
 1889         ifmr->ifm_status &= ~IFM_AVALID;
 1890 }
 1891 
 1892 void
 1893 txp_show_descriptor(d)
 1894         void *d;
 1895 {
 1896         struct txp_cmd_desc *cmd = d;
 1897         struct txp_rsp_desc *rsp = d;
 1898         struct txp_tx_desc *txd = d;
 1899         struct txp_frag_desc *frgd = d;
 1900 
 1901         switch (cmd->cmd_flags & CMD_FLAGS_TYPE_M) {
 1902         case CMD_FLAGS_TYPE_CMD:
 1903                 /* command descriptor */
 1904                 printf("[cmd flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
 1905                     cmd->cmd_flags, cmd->cmd_numdesc, letoh16(cmd->cmd_id),
 1906                     letoh16(cmd->cmd_seq), letoh16(cmd->cmd_par1),
 1907                     letoh32(cmd->cmd_par2), letoh32(cmd->cmd_par3));
 1908                 break;
 1909         case CMD_FLAGS_TYPE_RESP:
 1910                 /* response descriptor */
 1911                 printf("[rsp flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
 1912                     rsp->rsp_flags, rsp->rsp_numdesc, letoh16(rsp->rsp_id),
 1913                     letoh16(rsp->rsp_seq), letoh16(rsp->rsp_par1),
 1914                     letoh32(rsp->rsp_par2), letoh32(rsp->rsp_par3));
 1915                 break;
 1916         case CMD_FLAGS_TYPE_DATA:
 1917                 /* data header (assuming tx for now) */
 1918                 printf("[data flags 0x%x num %d totlen %d addr 0x%x/0x%x pflags 0x%x]",
 1919                     txd->tx_flags, txd->tx_numdesc, txd->tx_totlen,
 1920                     txd->tx_addrlo, txd->tx_addrhi, txd->tx_pflags);
 1921                 break;
 1922         case CMD_FLAGS_TYPE_FRAG:
 1923                 /* fragment descriptor */
 1924                 printf("[frag flags 0x%x rsvd1 0x%x len %d addr 0x%x/0x%x rsvd2 0x%x]",
 1925                     frgd->frag_flags, frgd->frag_rsvd1, frgd->frag_len,
 1926                     frgd->frag_addrlo, frgd->frag_addrhi, frgd->frag_rsvd2);
 1927                 break;
 1928         default:
 1929                 printf("[unknown(%x) flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
 1930                     cmd->cmd_flags & CMD_FLAGS_TYPE_M,
 1931                     cmd->cmd_flags, cmd->cmd_numdesc, letoh16(cmd->cmd_id),
 1932                     letoh16(cmd->cmd_seq), letoh16(cmd->cmd_par1),
 1933                     letoh32(cmd->cmd_par2), letoh32(cmd->cmd_par3));
 1934                 break;
 1935         }
 1936 }
 1937 
 1938 void
 1939 txp_set_filter(sc)
 1940         struct txp_softc *sc;
 1941 {
 1942         struct arpcom *ac = &sc->sc_arpcom;
 1943         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
 1944         u_int32_t hashbit, hash[2];
 1945         u_int16_t filter;
 1946         int mcnt = 0;
 1947         struct ether_multi *enm;
 1948         struct ether_multistep step;
 1949 
 1950         if (ifp->if_flags & IFF_PROMISC) {
 1951                 filter = TXP_RXFILT_PROMISC;
 1952                 goto setit;
 1953         }
 1954 
 1955 again:
 1956         filter = TXP_RXFILT_DIRECT;
 1957 
 1958         if (ifp->if_flags & IFF_BROADCAST)
 1959                 filter |= TXP_RXFILT_BROADCAST;
 1960 
 1961         if (ifp->if_flags & IFF_ALLMULTI)
 1962                 filter |= TXP_RXFILT_ALLMULTI;
 1963         else {
 1964                 hash[0] = hash[1] = 0;
 1965 
 1966                 ETHER_FIRST_MULTI(step, ac, enm);
 1967                 while (enm != NULL) {
 1968                         if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
 1969                                 /*
 1970                                  * We must listen to a range of multicast
 1971                                  * addresses.  For now, just accept all
 1972                                  * multicasts, rather than trying to set only
 1973                                  * those filter bits needed to match the range.
 1974                                  * (At this time, the only use of address
 1975                                  * ranges is for IP multicast routing, for
 1976                                  * which the range is big enough to require
 1977                                  * all bits set.)
 1978                                  */
 1979                                 ifp->if_flags |= IFF_ALLMULTI;
 1980                                 goto again;
 1981                         }
 1982 
 1983                         mcnt++;
 1984                         hashbit = (u_int16_t)(ether_crc32_be(enm->enm_addrlo,
 1985                             ETHER_ADDR_LEN) & (64 - 1));
 1986                         hash[hashbit / 32] |= (1 << hashbit % 32);
 1987                         ETHER_NEXT_MULTI(step, enm);
 1988                 }
 1989 
 1990                 if (mcnt > 0) {
 1991                         filter |= TXP_RXFILT_HASHMULTI;
 1992                         txp_command(sc, TXP_CMD_MCAST_HASH_MASK_WRITE,
 1993                             2, hash[0], hash[1], NULL, NULL, NULL, 0);
 1994                 }
 1995         }
 1996 
 1997 setit:
 1998         txp_command(sc, TXP_CMD_RX_FILTER_WRITE, filter, 0, 0,
 1999             NULL, NULL, NULL, 1);
 2000 }
 2001 
 2002 void
 2003 txp_capabilities(sc)
 2004         struct txp_softc *sc;
 2005 {
 2006         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
 2007         struct txp_rsp_desc *rsp = NULL;
 2008         struct txp_ext_desc *ext;
 2009 
 2010         if (txp_command2(sc, TXP_CMD_OFFLOAD_READ, 0, 0, 0, NULL, 0, &rsp, 1))
 2011                 goto out;
 2012 
 2013         if (rsp->rsp_numdesc != 1)
 2014                 goto out;
 2015         ext = (struct txp_ext_desc *)(rsp + 1);
 2016 
 2017         sc->sc_tx_capability = ext->ext_1 & OFFLOAD_MASK;
 2018         sc->sc_rx_capability = ext->ext_2 & OFFLOAD_MASK;
 2019 
 2020         ifp->if_capabilities |= IFCAP_VLAN_MTU;
 2021 
 2022 #if NVLAN > 0
 2023         if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_VLAN) {
 2024                 sc->sc_tx_capability |= OFFLOAD_VLAN;
 2025                 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
 2026         }
 2027 #endif
 2028 
 2029 #if 0
 2030         /* not ready yet */
 2031         if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_IPSEC) {
 2032                 sc->sc_tx_capability |= OFFLOAD_IPSEC;
 2033                 sc->sc_rx_capability |= OFFLOAD_IPSEC;
 2034                 ifp->if_capabilities |= IFCAP_IPSEC;
 2035         }
 2036 #endif
 2037 
 2038         if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_IPCKSUM) {
 2039                 sc->sc_tx_capability |= OFFLOAD_IPCKSUM;
 2040                 sc->sc_rx_capability |= OFFLOAD_IPCKSUM;
 2041                 ifp->if_capabilities |= IFCAP_CSUM_IPv4;
 2042         }
 2043 
 2044         if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_TCPCKSUM) {
 2045                 sc->sc_rx_capability |= OFFLOAD_TCPCKSUM;
 2046 #ifdef TRY_TX_TCP_CSUM
 2047                 sc->sc_tx_capability |= OFFLOAD_TCPCKSUM;
 2048                 ifp->if_capabilities |= IFCAP_CSUM_TCPv4;
 2049 #endif
 2050         }
 2051 
 2052         if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_UDPCKSUM) {
 2053                 sc->sc_rx_capability |= OFFLOAD_UDPCKSUM;
 2054 #ifdef TRY_TX_UDP_CSUM
 2055                 sc->sc_tx_capability |= OFFLOAD_UDPCKSUM;
 2056                 ifp->if_capabilities |= IFCAP_CSUM_UDPv4;
 2057 #endif
 2058         }
 2059 
 2060         if (txp_command(sc, TXP_CMD_OFFLOAD_WRITE, 0,
 2061             sc->sc_tx_capability, sc->sc_rx_capability, NULL, NULL, NULL, 1))
 2062                 goto out;
 2063 
 2064 out:
 2065         if (rsp != NULL)
 2066                 free(rsp, M_DEVBUF);
 2067 }

/* [<][>][^][v][top][bottom][index][help] */