root/dev/ic/pgt.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. pgt_write_memory_barrier
  2. pgt_read_4
  3. pgt_write_4
  4. pgt_write_4_flush
  5. pgt_debug_events
  6. pgt_queue_frags_pending
  7. pgt_reinit_rx_desc_frag
  8. pgt_load_tx_desc_frag
  9. pgt_unload_tx_desc_frag
  10. pgt_load_firmware
  11. pgt_cleanup_queue
  12. pgt_reset
  13. pgt_stop
  14. pgt_attach
  15. pgt_detach
  16. pgt_reboot
  17. pgt_init_intr
  18. pgt_update_intr
  19. pgt_ieee80211_encap
  20. pgt_input_frames
  21. pgt_wakeup_intr
  22. pgt_sleep_intr
  23. pgt_empty_traps
  24. pgt_per_device_kthread
  25. pgt_async_reset
  26. pgt_async_update
  27. pgt_intr
  28. pgt_txdone
  29. pgt_rxdone
  30. pgt_trap_received
  31. pgt_mgmtrx_completion
  32. pgt_datarx_completion
  33. pgt_oid_get
  34. pgt_oid_retrieve
  35. pgt_oid_set
  36. pgt_state_dump
  37. pgt_mgmt_request
  38. pgt_desc_transmit
  39. pgt_maybe_trigger
  40. pgt_ieee80211_node_alloc
  41. pgt_ieee80211_newassoc
  42. pgt_ieee80211_node_free
  43. pgt_ieee80211_node_copy
  44. pgt_ieee80211_send_mgmt
  45. pgt_net_attach
  46. pgt_media_change
  47. pgt_media_status
  48. pgt_start
  49. pgt_ioctl
  50. pgt_obj_bss2scanres
  51. node_mark_active_ap
  52. node_mark_active_adhoc
  53. pgt_watchdog
  54. pgt_init
  55. pgt_update_hw_from_sw
  56. pgt_hostap_handle_mlme
  57. pgt_update_sw_from_hw
  58. pgt_newstate
  59. pgt_drain_tx_queue
  60. pgt_dma_alloc
  61. pgt_dma_alloc_queue
  62. pgt_dma_free
  63. pgt_dma_free_queue
  64. pgt_shutdown
  65. pgt_power

    1 /*      $OpenBSD: pgt.c,v 1.43 2007/07/18 18:10:31 damien Exp $  */
    2 
    3 /*
    4  * Copyright (c) 2006 Claudio Jeker <claudio@openbsd.org>
    5  * Copyright (c) 2006 Marcus Glocker <mglocker@openbsd.org>
    6  *
    7  * Permission to use, copy, modify, and distribute this software for any
    8  * purpose with or without fee is hereby granted, provided that the above
    9  * copyright notice and this permission notice appear in all copies.
   10  *
   11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
   12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
   13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
   14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
   15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
   16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
   17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
   18  */
   19 
   20 /*
   21  * Copyright (c) 2004 Fujitsu Laboratories of America, Inc.
   22  * Copyright (c) 2004 Brian Fundakowski Feldman
   23  * All rights reserved.
   24  *
   25  * Redistribution and use in source and binary forms, with or without
   26  * modification, are permitted provided that the following conditions
   27  * are met:
   28  * 1. Redistributions of source code must retain the above copyright
   29  *    notice, this list of conditions and the following disclaimer.
   30  * 2. Redistributions in binary form must reproduce the above copyright
   31  *    notice, this list of conditions and the following disclaimer in the
   32  *    documentation and/or other materials provided with the distribution.
   33  *
   34  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   35  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   36  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   37  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   38  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   39  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   40  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   41  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   42  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   43  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   44  * SUCH DAMAGE.
   45  */
   46 
   47 #include <sys/cdefs.h>
   48 #include "bpfilter.h"
   49 
   50 #include <sys/param.h>
   51 #include <sys/systm.h>
   52 #include <sys/kernel.h>
   53 #include <sys/malloc.h>
   54 #include <sys/socket.h>
   55 #include <sys/mbuf.h>
   56 #include <sys/endian.h>
   57 #include <sys/sockio.h>
   58 #include <sys/sysctl.h>
   59 #include <sys/kthread.h>
   60 #include <sys/time.h>
   61 #include <sys/ioctl.h>
   62 #include <sys/device.h>
   63 
   64 #include <machine/bus.h>
   65 #include <machine/endian.h>
   66 #include <machine/intr.h>
   67 
   68 #include <net/if.h>
   69 #include <net/if_arp.h>
   70 #include <net/if_dl.h>
   71 #include <net/if_llc.h>
   72 #include <net/if_media.h>
   73 #include <net/if_types.h>
   74 
   75 #if NBPFILTER > 0
   76 #include <net/bpf.h>
   77 #endif
   78 
   79 #ifdef INET
   80 #include <netinet/in.h>
   81 #include <netinet/in_systm.h>
   82 #include <netinet/in_var.h>
   83 #include <netinet/if_ether.h>
   84 #include <netinet/ip.h>
   85 #endif
   86 
   87 #include <net80211/ieee80211_var.h>
   88 #include <net80211/ieee80211_radiotap.h>
   89 
   90 #include <dev/ic/pgtreg.h>
   91 #include <dev/ic/pgtvar.h>
   92 
   93 #include <dev/ic/if_wireg.h>
   94 #include <dev/ic/if_wi_ieee.h>
   95 #include <dev/ic/if_wivar.h>
   96 
   97 #ifdef PGT_DEBUG
   98 #define DPRINTF(x)      do { printf x; } while (0)
   99 #else
  100 #define DPRINTF(x)
  101 #endif
  102 
  103 #define SETOID(oid, var, size) {                                        \
  104         if (pgt_oid_set(sc, oid, var, size) != 0)                       \
  105                 break;                                                  \
  106 }
  107 
  108 /*
  109  * This is a driver for the Intersil Prism family of 802.11g network cards,
  110  * based upon version 1.2 of the Linux driver and firmware found at
  111  * http://www.prism54.org/.
  112  */
  113 
  114 #define SCAN_TIMEOUT                    5       /* 5 seconds */
  115 
  116 struct cfdriver pgt_cd = {
  117         NULL, "pgt", DV_IFNET
  118 };
  119 
  120 void     pgt_media_status(struct ifnet *ifp, struct ifmediareq *imr);
  121 int      pgt_media_change(struct ifnet *ifp);
  122 void     pgt_write_memory_barrier(struct pgt_softc *);
  123 uint32_t pgt_read_4(struct pgt_softc *, uint16_t);
  124 void     pgt_write_4(struct pgt_softc *, uint16_t, uint32_t);
  125 void     pgt_write_4_flush(struct pgt_softc *, uint16_t, uint32_t);
  126 void     pgt_debug_events(struct pgt_softc *, const char *);
  127 uint32_t pgt_queue_frags_pending(struct pgt_softc *, enum pgt_queue);
  128 void     pgt_reinit_rx_desc_frag(struct pgt_softc *, struct pgt_desc *);
  129 int      pgt_load_tx_desc_frag(struct pgt_softc *, enum pgt_queue,
  130              struct pgt_desc *);
  131 void     pgt_unload_tx_desc_frag(struct pgt_softc *, struct pgt_desc *);
  132 int      pgt_load_firmware(struct pgt_softc *);
  133 void     pgt_cleanup_queue(struct pgt_softc *, enum pgt_queue,
  134              struct pgt_frag []);
  135 int      pgt_reset(struct pgt_softc *);
  136 void     pgt_stop(struct pgt_softc *, unsigned int);
  137 void     pgt_reboot(struct pgt_softc *);
  138 void     pgt_init_intr(struct pgt_softc *);
  139 void     pgt_update_intr(struct pgt_softc *, int);
  140 struct mbuf
  141         *pgt_ieee80211_encap(struct pgt_softc *, struct ether_header *,
  142              struct mbuf *, struct ieee80211_node **);
  143 void     pgt_input_frames(struct pgt_softc *, struct mbuf *);
  144 void     pgt_wakeup_intr(struct pgt_softc *);
  145 void     pgt_sleep_intr(struct pgt_softc *);
  146 void     pgt_empty_traps(struct pgt_softc_kthread *);
  147 void     pgt_per_device_kthread(void *);
  148 void     pgt_async_reset(struct pgt_softc *);
  149 void     pgt_async_update(struct pgt_softc *);
  150 void     pgt_txdone(struct pgt_softc *, enum pgt_queue);
  151 void     pgt_rxdone(struct pgt_softc *, enum pgt_queue);
  152 void     pgt_trap_received(struct pgt_softc *, uint32_t, void *, size_t);
  153 void     pgt_mgmtrx_completion(struct pgt_softc *, struct pgt_mgmt_desc *);
  154 struct mbuf
  155         *pgt_datarx_completion(struct pgt_softc *, enum pgt_queue);
  156 int      pgt_oid_get(struct pgt_softc *, enum pgt_oid, void *, size_t);
  157 int      pgt_oid_retrieve(struct pgt_softc *, enum pgt_oid, void *, size_t);
  158 int      pgt_oid_set(struct pgt_softc *, enum pgt_oid, const void *, size_t);
  159 void     pgt_state_dump(struct pgt_softc *);
  160 int      pgt_mgmt_request(struct pgt_softc *, struct pgt_mgmt_desc *);
  161 void     pgt_desc_transmit(struct pgt_softc *, enum pgt_queue,
  162              struct pgt_desc *, uint16_t, int);
  163 void     pgt_maybe_trigger(struct pgt_softc *, enum pgt_queue);
  164 struct ieee80211_node
  165         *pgt_ieee80211_node_alloc(struct ieee80211com *);
  166 void     pgt_ieee80211_newassoc(struct ieee80211com *,
  167              struct ieee80211_node *, int);
  168 void     pgt_ieee80211_node_free(struct ieee80211com *,
  169             struct ieee80211_node *);
  170 void     pgt_ieee80211_node_copy(struct ieee80211com *,
  171              struct ieee80211_node *,
  172              const struct ieee80211_node *);
  173 int      pgt_ieee80211_send_mgmt(struct ieee80211com *,
  174              struct ieee80211_node *, int, int);
  175 int      pgt_net_attach(struct pgt_softc *);
  176 void     pgt_start(struct ifnet *);
  177 int      pgt_ioctl(struct ifnet *, u_long, caddr_t);
  178 void     pgt_obj_bss2scanres(struct pgt_softc *,
  179              struct pgt_obj_bss *, struct wi_scan_res *, uint32_t);
  180 void     node_mark_active_ap(void *, struct ieee80211_node *);
  181 void     node_mark_active_adhoc(void *, struct ieee80211_node *);
  182 void     pgt_watchdog(struct ifnet *);
  183 int      pgt_init(struct ifnet *);
  184 void     pgt_update_hw_from_sw(struct pgt_softc *, int, int);
  185 void     pgt_hostap_handle_mlme(struct pgt_softc *, uint32_t,
  186              struct pgt_obj_mlme *);
  187 void     pgt_update_sw_from_hw(struct pgt_softc *,
  188              struct pgt_async_trap *, struct mbuf *);
  189 int      pgt_newstate(struct ieee80211com *, enum ieee80211_state, int);
  190 int      pgt_drain_tx_queue(struct pgt_softc *, enum pgt_queue);
  191 int      pgt_dma_alloc(struct pgt_softc *);
  192 int      pgt_dma_alloc_queue(struct pgt_softc *sc, enum pgt_queue pq);
  193 void     pgt_dma_free(struct pgt_softc *);
  194 void     pgt_dma_free_queue(struct pgt_softc *sc, enum pgt_queue pq);
  195 void     pgt_shutdown(void *);
  196 void     pgt_power(int, void *);
  197 
  198 void
  199 pgt_write_memory_barrier(struct pgt_softc *sc)
  200 {
  201         bus_space_barrier(sc->sc_iotag, sc->sc_iohandle, 0, 0,
  202             BUS_SPACE_BARRIER_WRITE);
  203 }
  204 
  205 u_int32_t
  206 pgt_read_4(struct pgt_softc *sc, uint16_t offset)
  207 {
  208         return (bus_space_read_4(sc->sc_iotag, sc->sc_iohandle, offset));
  209 }
  210 
  211 void
  212 pgt_write_4(struct pgt_softc *sc, uint16_t offset, uint32_t value)
  213 {
  214         bus_space_write_4(sc->sc_iotag, sc->sc_iohandle, offset, value);
  215 }
  216 
  217 /*
  218  * Write out 4 bytes and cause a PCI flush by reading back in on a
  219  * harmless register.
  220  */
  221 void
  222 pgt_write_4_flush(struct pgt_softc *sc, uint16_t offset, uint32_t value)
  223 {
  224         bus_space_write_4(sc->sc_iotag, sc->sc_iohandle, offset, value);
  225         (void)bus_space_read_4(sc->sc_iotag, sc->sc_iohandle, PGT_REG_INT_EN);
  226 }
  227 
  228 /*
  229  * Print the state of events in the queues from an interrupt or a trigger.
  230  */
  231 void
  232 pgt_debug_events(struct pgt_softc *sc, const char *when)
  233 {
  234 #define COUNT(i)                                                        \
  235         letoh32(sc->sc_cb->pcb_driver_curfrag[i]) -                     \
  236         letoh32(sc->sc_cb->pcb_device_curfrag[i])
  237         if (sc->sc_debug & SC_DEBUG_EVENTS)
  238                 DPRINTF(("%s: ev%s: %u %u %u %u %u %u\n",
  239                     sc->sc_dev.dv_xname, when, COUNT(0), COUNT(1), COUNT(2),
  240                     COUNT(3), COUNT(4), COUNT(5)));
  241 #undef COUNT
  242 }
  243 
  244 uint32_t
  245 pgt_queue_frags_pending(struct pgt_softc *sc, enum pgt_queue pq)
  246 {
  247         return (letoh32(sc->sc_cb->pcb_driver_curfrag[pq]) -
  248             letoh32(sc->sc_cb->pcb_device_curfrag[pq]));
  249 }
  250 
  251 void
  252 pgt_reinit_rx_desc_frag(struct pgt_softc *sc, struct pgt_desc *pd)
  253 {
  254         pd->pd_fragp->pf_addr = htole32((uint32_t)pd->pd_dmaaddr);
  255         pd->pd_fragp->pf_size = htole16(PGT_FRAG_SIZE);
  256         pd->pd_fragp->pf_flags = 0;
  257 
  258         bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0, pd->pd_dmam->dm_mapsize,
  259             BUS_DMASYNC_POSTWRITE);
  260 }
  261 
  262 int
  263 pgt_load_tx_desc_frag(struct pgt_softc *sc, enum pgt_queue pq,
  264     struct pgt_desc *pd)
  265 {
  266         int error;
  267 
  268         error = bus_dmamap_load(sc->sc_dmat, pd->pd_dmam, pd->pd_mem,
  269             PGT_FRAG_SIZE, NULL, BUS_DMA_NOWAIT);
  270         if (error) {
  271                 DPRINTF(("%s: unable to load %s tx DMA: %d\n",
  272                     sc->sc_dev.dv_xname,
  273                     pgt_queue_is_data(pq) ? "data" : "mgmt", error));
  274                 return (error);
  275         }
  276         pd->pd_dmaaddr = pd->pd_dmam->dm_segs[0].ds_addr;
  277         pd->pd_fragp->pf_addr = htole32((uint32_t)pd->pd_dmaaddr);
  278         pd->pd_fragp->pf_size = htole16(PGT_FRAG_SIZE);
  279         pd->pd_fragp->pf_flags = htole16(0);
  280 
  281         bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0, pd->pd_dmam->dm_mapsize,
  282             BUS_DMASYNC_POSTWRITE);
  283 
  284         return (0);
  285 }
  286 
  287 void
  288 pgt_unload_tx_desc_frag(struct pgt_softc *sc, struct pgt_desc *pd)
  289 {
  290         bus_dmamap_unload(sc->sc_dmat, pd->pd_dmam);
  291         pd->pd_dmaaddr = 0;
  292 }
  293 
  294 int
  295 pgt_load_firmware(struct pgt_softc *sc)
  296 {
  297         int error, reg, dirreg, fwoff, ucodeoff, fwlen;
  298         uint8_t *ucode;
  299         uint32_t *uc;
  300         size_t size;
  301         char *name;
  302 
  303         if (sc->sc_flags & SC_ISL3877)
  304                 name = "pgt-isl3877";
  305         else
  306                 name = "pgt-isl3890";   /* includes isl3880 */
  307 
  308         error = loadfirmware(name, &ucode, &size);
  309 
  310         if (error != 0) {
  311                 DPRINTF(("%s: error %d, could not read microcode %s!\n",
  312                     sc->sc_dev.dv_xname, error, name));
  313                 return (EIO);
  314         }
  315 
  316         if (size & 3) {
  317                 DPRINTF(("%s: bad firmware size %u\n",
  318                     sc->sc_dev.dv_xname, size));
  319                 free(ucode, M_DEVBUF);
  320                 return (EINVAL);
  321         }
  322 
  323         pgt_reboot(sc);
  324 
  325         fwoff = 0;
  326         ucodeoff = 0;
  327         uc = (uint32_t *)ucode;
  328         reg = PGT_FIRMWARE_INTERNAL_OFFSET;
  329         while (fwoff < size) {
  330                 pgt_write_4_flush(sc, PGT_REG_DIR_MEM_BASE, reg);
  331 
  332                 if ((size - fwoff) >= PGT_DIRECT_MEMORY_SIZE)
  333                         fwlen = PGT_DIRECT_MEMORY_SIZE;
  334                 else
  335                         fwlen = size - fwoff;
  336 
  337                 dirreg = PGT_DIRECT_MEMORY_OFFSET;
  338                 while (fwlen > 4) {
  339                         pgt_write_4(sc, dirreg, uc[ucodeoff]);
  340                         fwoff += 4;
  341                         dirreg += 4;
  342                         reg += 4;
  343                         fwlen -= 4;
  344                         ucodeoff++;
  345                 }
  346                 pgt_write_4_flush(sc, dirreg, uc[ucodeoff]);
  347                 fwoff += 4;
  348                 dirreg += 4;
  349                 reg += 4;
  350                 fwlen -= 4;
  351                 ucodeoff++;
  352         }
  353         DPRINTF(("%s: %d bytes microcode loaded from %s\n",
  354             sc->sc_dev.dv_xname, fwoff, name));
  355 
  356         reg = pgt_read_4(sc, PGT_REG_CTRL_STAT);
  357         reg &= ~(PGT_CTRL_STAT_RESET | PGT_CTRL_STAT_CLOCKRUN);
  358         reg |= PGT_CTRL_STAT_RAMBOOT;
  359         pgt_write_4_flush(sc, PGT_REG_CTRL_STAT, reg);
  360         pgt_write_memory_barrier(sc);
  361         DELAY(PGT_WRITEIO_DELAY);
  362 
  363         reg |= PGT_CTRL_STAT_RESET;
  364         pgt_write_4(sc, PGT_REG_CTRL_STAT, reg);
  365         pgt_write_memory_barrier(sc);
  366         DELAY(PGT_WRITEIO_DELAY);
  367 
  368         reg &= ~PGT_CTRL_STAT_RESET;
  369         pgt_write_4(sc, PGT_REG_CTRL_STAT, reg);
  370         pgt_write_memory_barrier(sc);
  371         DELAY(PGT_WRITEIO_DELAY);
  372 
  373         free(ucode, M_DEVBUF);
  374         
  375         return (0);
  376 }
  377 
  378 void
  379 pgt_cleanup_queue(struct pgt_softc *sc, enum pgt_queue pq,
  380     struct pgt_frag pqfrags[])
  381 {
  382         struct pgt_desc *pd;
  383         unsigned int i;
  384 
  385         sc->sc_cb->pcb_device_curfrag[pq] = 0;
  386         i = 0;
  387         /* XXX why only freeq ??? */
  388         TAILQ_FOREACH(pd, &sc->sc_freeq[pq], pd_link) {
  389                 pd->pd_fragnum = i;
  390                 pd->pd_fragp = &pqfrags[i];
  391                 if (pgt_queue_is_rx(pq))
  392                         pgt_reinit_rx_desc_frag(sc, pd);
  393                 i++;
  394         }
  395         sc->sc_freeq_count[pq] = i;
  396         /*
  397          * The ring buffer describes how many free buffers are available from
  398          * the host (for receive queues) or how many are pending (for
  399          * transmit queues).
  400          */
  401         if (pgt_queue_is_rx(pq))
  402                 sc->sc_cb->pcb_driver_curfrag[pq] = htole32(i);
  403         else
  404                 sc->sc_cb->pcb_driver_curfrag[pq] = 0;
  405 }
  406 
  407 /*
  408  * Turn off interrupts, reset the device (possibly loading firmware),
  409  * and put everything in a known state.
  410  */
  411 int
  412 pgt_reset(struct pgt_softc *sc)
  413 {
  414         int error;
  415 
  416         /* disable all interrupts */
  417         pgt_write_4_flush(sc, PGT_REG_INT_EN, 0);
  418         DELAY(PGT_WRITEIO_DELAY);
  419 
  420         /*
  421          * Set up the management receive queue, assuming there are no
  422          * requests in progress.
  423          */
  424         bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
  425             sc->sc_cbdmam->dm_mapsize,
  426             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE);
  427         pgt_cleanup_queue(sc, PGT_QUEUE_DATA_LOW_RX,
  428             &sc->sc_cb->pcb_data_low_rx[0]);
  429         pgt_cleanup_queue(sc, PGT_QUEUE_DATA_LOW_TX,
  430             &sc->sc_cb->pcb_data_low_tx[0]);
  431         pgt_cleanup_queue(sc, PGT_QUEUE_DATA_HIGH_RX,
  432             &sc->sc_cb->pcb_data_high_rx[0]);
  433         pgt_cleanup_queue(sc, PGT_QUEUE_DATA_HIGH_TX,
  434             &sc->sc_cb->pcb_data_high_tx[0]);
  435         pgt_cleanup_queue(sc, PGT_QUEUE_MGMT_RX,
  436             &sc->sc_cb->pcb_mgmt_rx[0]);
  437         pgt_cleanup_queue(sc, PGT_QUEUE_MGMT_TX,
  438             &sc->sc_cb->pcb_mgmt_tx[0]);
  439         bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
  440             sc->sc_cbdmam->dm_mapsize,
  441             BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD);
  442 
  443         /* load firmware */
  444         if (sc->sc_flags & SC_NEEDS_FIRMWARE) {
  445                 error = pgt_load_firmware(sc);
  446                 if (error) {
  447                         printf("%s: firmware load failed\n",
  448                             sc->sc_dev.dv_xname);
  449                         return (error);
  450                 }
  451                 sc->sc_flags &= ~SC_NEEDS_FIRMWARE;
  452                 DPRINTF(("%s: firmware loaded\n", sc->sc_dev.dv_xname));
  453         }
  454 
  455         /* upload the control block's DMA address */
  456         pgt_write_4_flush(sc, PGT_REG_CTRL_BLK_BASE,
  457             htole32((uint32_t)sc->sc_cbdmam->dm_segs[0].ds_addr));
  458         DELAY(PGT_WRITEIO_DELAY);
  459 
  460         /* send a reset event */
  461         pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_RESET);
  462         DELAY(PGT_WRITEIO_DELAY);
  463 
  464         /* await only the initialization interrupt */
  465         pgt_write_4_flush(sc, PGT_REG_INT_EN, PGT_INT_STAT_INIT);       
  466         DELAY(PGT_WRITEIO_DELAY);
  467 
  468         return (0);
  469 }
  470 
  471 /*
  472  * If we're trying to reset and the device has seemingly not been detached,
  473  * we'll spend a minute seeing if we can't do the reset.
  474  */
  475 void
  476 pgt_stop(struct pgt_softc *sc, unsigned int flag)
  477 {
  478         struct ieee80211com *ic;
  479         unsigned int wokeup;
  480         int tryagain = 0;
  481 
  482         ic = &sc->sc_ic;
  483 
  484         ic->ic_if.if_flags &= ~IFF_RUNNING;
  485         sc->sc_flags |= SC_UNINITIALIZED;
  486         sc->sc_flags |= flag;
  487 
  488         pgt_drain_tx_queue(sc, PGT_QUEUE_DATA_LOW_TX);
  489         pgt_drain_tx_queue(sc, PGT_QUEUE_DATA_HIGH_TX);
  490         pgt_drain_tx_queue(sc, PGT_QUEUE_MGMT_TX);
  491 
  492 trying_again:
  493         /* disable all interrupts */
  494         pgt_write_4_flush(sc, PGT_REG_INT_EN, 0);
  495         DELAY(PGT_WRITEIO_DELAY);
  496 
  497         /* reboot card */
  498         pgt_reboot(sc);
  499 
  500         do {
  501                 wokeup = 0;
  502                 /*
  503                  * We don't expect to be woken up, just to drop the lock
  504                  * and time out.  Only tx queues can have anything valid
  505                  * on them outside of an interrupt.
  506                  */
  507                 while (!TAILQ_EMPTY(&sc->sc_mgmtinprog)) {
  508                         struct pgt_mgmt_desc *pmd;
  509 
  510                         pmd = TAILQ_FIRST(&sc->sc_mgmtinprog);
  511                         TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link);
  512                         pmd->pmd_error = ENETRESET;
  513                         wakeup_one(pmd);
  514                         if (sc->sc_debug & SC_DEBUG_MGMT)
  515                                 DPRINTF(("%s: queue: mgmt %p <- %#x "
  516                                     "(drained)\n", sc->sc_dev.dv_xname,
  517                                     pmd, pmd->pmd_oid));
  518                         wokeup++;
  519                 }
  520                 if (wokeup > 0) {
  521                         if (flag == SC_NEEDS_RESET && sc->sc_flags & SC_DYING) {
  522                                 sc->sc_flags &= ~flag;
  523                                 return;
  524                         }
  525                 }
  526         } while (wokeup > 0);
  527 
  528         if (flag == SC_NEEDS_RESET) {
  529                 int error;
  530 
  531                 DPRINTF(("%s: resetting\n", sc->sc_dev.dv_xname));
  532                 sc->sc_flags &= ~SC_POWERSAVE;
  533                 sc->sc_flags |= SC_NEEDS_FIRMWARE;
  534                 error = pgt_reset(sc);
  535                 if (error == 0) {
  536                         tsleep(&sc->sc_flags, 0, "pgtres", hz);
  537                         if (sc->sc_flags & SC_UNINITIALIZED) {
  538                                 printf("%s: not responding\n",
  539                                     sc->sc_dev.dv_xname);
  540                                 /* Thud.  It was probably removed. */
  541                                 if (tryagain)
  542                                         panic("pgt went for lunch"); /* XXX */
  543                                 tryagain = 1;
  544                         } else {
  545                                 /* await all interrupts */
  546                                 pgt_write_4_flush(sc, PGT_REG_INT_EN,
  547                                     PGT_INT_STAT_SOURCES);      
  548                                 DELAY(PGT_WRITEIO_DELAY);
  549                                 ic->ic_if.if_flags |= IFF_RUNNING;
  550                         }
  551                 }
  552 
  553                 if (tryagain)
  554                         goto trying_again;
  555 
  556                 sc->sc_flags &= ~flag;
  557                 if (ic->ic_if.if_flags & IFF_RUNNING)
  558                         pgt_update_hw_from_sw(sc,
  559                             ic->ic_state != IEEE80211_S_INIT,
  560                             ic->ic_opmode != IEEE80211_M_MONITOR);
  561         }
  562 
  563         ic->ic_if.if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
  564         ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
  565 }
  566 
  567 void
  568 pgt_attach(void *xsc)
  569 {
  570         struct pgt_softc *sc = xsc;
  571         int error;
  572 
  573         /* debug flags */
  574         //sc->sc_debug |= SC_DEBUG_QUEUES;      /* super verbose */
  575         //sc->sc_debug |= SC_DEBUG_MGMT;
  576         sc->sc_debug |= SC_DEBUG_UNEXPECTED;
  577         //sc->sc_debug |= SC_DEBUG_TRIGGER;     /* verbose */
  578         //sc->sc_debug |= SC_DEBUG_EVENTS;      /* super verbose */
  579         //sc->sc_debug |= SC_DEBUG_POWER;
  580         sc->sc_debug |= SC_DEBUG_TRAP;
  581         sc->sc_debug |= SC_DEBUG_LINK;
  582         //sc->sc_debug |= SC_DEBUG_RXANNEX;
  583         //sc->sc_debug |= SC_DEBUG_RXFRAG;
  584         //sc->sc_debug |= SC_DEBUG_RXETHER;
  585 
  586         /* enable card if possible */
  587         if (sc->sc_enable != NULL)
  588                 (*sc->sc_enable)(sc);
  589 
  590         error = pgt_dma_alloc(sc);
  591         if (error)
  592                 return;
  593 
  594         sc->sc_ic.ic_if.if_softc = sc;
  595         TAILQ_INIT(&sc->sc_mgmtinprog);
  596         TAILQ_INIT(&sc->sc_kthread.sck_traps);
  597         sc->sc_flags |= SC_NEEDS_FIRMWARE | SC_UNINITIALIZED;
  598         sc->sc_80211_ioc_auth = IEEE80211_AUTH_OPEN;
  599 
  600         error = pgt_reset(sc);
  601         if (error)
  602                 return;
  603 
  604         tsleep(&sc->sc_flags, 0, "pgtres", hz);
  605         if (sc->sc_flags & SC_UNINITIALIZED) {
  606                 printf("%s: not responding\n", sc->sc_dev.dv_xname);
  607                 return;
  608         } else {
  609                 /* await all interrupts */
  610                 pgt_write_4_flush(sc, PGT_REG_INT_EN, PGT_INT_STAT_SOURCES);
  611                 DELAY(PGT_WRITEIO_DELAY);
  612         }
  613 
  614         error = pgt_net_attach(sc);
  615         if (error)
  616                 return;
  617 
  618         if (kthread_create(pgt_per_device_kthread, sc, NULL,
  619             sc->sc_dev.dv_xname) != 0)
  620                 return;
  621 
  622         ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
  623 }
  624 
  625 int
  626 pgt_detach(struct pgt_softc *sc)
  627 {
  628         if (sc->sc_flags & SC_NEEDS_FIRMWARE || sc->sc_flags & SC_UNINITIALIZED)
  629                 /* device was not initialized correctly, so leave early */
  630                 goto out;
  631 
  632         /* stop card */
  633         pgt_stop(sc, SC_DYING);
  634         pgt_reboot(sc);
  635 
  636         /*
  637          * Disable shutdown and power hooks
  638          */
  639         if (sc->sc_shutdown_hook != NULL)
  640                 shutdownhook_disestablish(sc->sc_shutdown_hook);
  641         if (sc->sc_power_hook != NULL)
  642                 powerhook_disestablish(sc->sc_power_hook);
  643 
  644         ieee80211_ifdetach(&sc->sc_ic.ic_if);
  645         if_detach(&sc->sc_ic.ic_if);
  646 
  647 out:
  648         /* disable card if possible */
  649         if (sc->sc_disable != NULL)
  650                 (*sc->sc_disable)(sc);
  651 
  652         pgt_dma_free(sc);
  653 
  654         return (0);
  655 }
  656 
  657 void
  658 pgt_reboot(struct pgt_softc *sc)
  659 {
  660         uint32_t reg;
  661 
  662         reg = pgt_read_4(sc, PGT_REG_CTRL_STAT);
  663         reg &= ~(PGT_CTRL_STAT_RESET | PGT_CTRL_STAT_RAMBOOT);
  664         pgt_write_4(sc, PGT_REG_CTRL_STAT, reg);
  665         pgt_write_memory_barrier(sc);
  666         DELAY(PGT_WRITEIO_DELAY);
  667 
  668         reg |= PGT_CTRL_STAT_RESET;
  669         pgt_write_4(sc, PGT_REG_CTRL_STAT, reg);
  670         pgt_write_memory_barrier(sc);
  671         DELAY(PGT_WRITEIO_DELAY);
  672 
  673         reg &= ~PGT_CTRL_STAT_RESET;
  674         pgt_write_4(sc, PGT_REG_CTRL_STAT, reg);
  675         pgt_write_memory_barrier(sc);
  676         DELAY(PGT_RESET_DELAY);
  677 }
  678 
  679 void
  680 pgt_init_intr(struct pgt_softc *sc)
  681 {
  682         if ((sc->sc_flags & SC_UNINITIALIZED) == 0) {
  683                 if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
  684                         DPRINTF(("%s: spurious initialization\n",
  685                             sc->sc_dev.dv_xname));
  686         } else {
  687                 sc->sc_flags &= ~SC_UNINITIALIZED;
  688                 wakeup(&sc->sc_flags);
  689         }
  690 }
  691 
  692 /*
  693  * If called with a NULL last_nextpkt, only the mgmt queue will be checked
  694  * for new packets.
  695  */
  696 void
  697 pgt_update_intr(struct pgt_softc *sc, int hack)
  698 {
  699         /* priority order */
  700         enum pgt_queue pqs[PGT_QUEUE_COUNT] = {
  701             PGT_QUEUE_MGMT_TX, PGT_QUEUE_MGMT_RX, 
  702             PGT_QUEUE_DATA_HIGH_TX, PGT_QUEUE_DATA_HIGH_RX, 
  703             PGT_QUEUE_DATA_LOW_TX, PGT_QUEUE_DATA_LOW_RX
  704         };
  705         struct mbuf *m;
  706         uint32_t npend;
  707         unsigned int dirtycount;
  708         int i;
  709 
  710         bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
  711             sc->sc_cbdmam->dm_mapsize,
  712             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE);
  713         pgt_debug_events(sc, "intr");
  714         /*
  715          * Check for completion of tx in their dirty queues.
  716          * Check completion of rx into their dirty queues.
  717          */
  718         for (i = 0; i < PGT_QUEUE_COUNT; i++) {
  719                 size_t qdirty, qfree, qtotal;
  720 
  721                 qdirty = sc->sc_dirtyq_count[pqs[i]];
  722                 qfree = sc->sc_freeq_count[pqs[i]];
  723                 qtotal = qdirty + qfree;
  724                 /*
  725                  * We want the wrap-around here.
  726                  */
  727                 if (pgt_queue_is_rx(pqs[i])) {
  728                         int data;
  729 
  730                         data = pgt_queue_is_data(pqs[i]);
  731 #ifdef PGT_BUGGY_INTERRUPT_RECOVERY
  732                         if (hack && data)
  733                                 continue;
  734 #endif
  735                         npend = pgt_queue_frags_pending(sc, pqs[i]);
  736                         /*
  737                          * Receive queues clean up below, so qfree must
  738                          * always be qtotal (qdirty is 0).
  739                          */
  740                         if (npend > qfree) {
  741                                 if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
  742                                         DPRINTF(("%s: rx queue [%u] "
  743                                             "overflowed by %u\n",
  744                                             sc->sc_dev.dv_xname, pqs[i],
  745                                             npend - qfree));
  746                                 sc->sc_flags |= SC_INTR_RESET;
  747                                 break;
  748                         }
  749                         while (qfree-- > npend)
  750                                 pgt_rxdone(sc, pqs[i]);
  751                 } else {
  752                         npend = pgt_queue_frags_pending(sc, pqs[i]);
  753                         if (npend > qdirty) {
  754                                 if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
  755                                         DPRINTF(("%s: tx queue [%u] "
  756                                             "underflowed by %u\n",
  757                                             sc->sc_dev.dv_xname, pqs[i],
  758                                             npend - qdirty));
  759                                 sc->sc_flags |= SC_INTR_RESET;
  760                                 break;
  761                         }
  762                         /*
  763                          * If the free queue was empty, or the data transmit
  764                          * queue just became empty, wake up any waiters.
  765                          */
  766                         if (qdirty > npend) {
  767                                 if (pgt_queue_is_data(pqs[i])) {
  768                                         sc->sc_ic.ic_if.if_timer = 0;
  769                                         sc->sc_ic.ic_if.if_flags &=
  770                                             ~IFF_OACTIVE;
  771                                 }
  772                                 while (qdirty-- > npend)
  773                                         pgt_txdone(sc, pqs[i]);
  774                         }
  775                 }
  776         }
  777 
  778         /*
  779          * This is the deferred completion for received management frames
  780          * and where we queue network frames for stack input. 
  781          */
  782         dirtycount = sc->sc_dirtyq_count[PGT_QUEUE_MGMT_RX];
  783         while (!TAILQ_EMPTY(&sc->sc_dirtyq[PGT_QUEUE_MGMT_RX])) {
  784                 struct pgt_mgmt_desc *pmd;
  785 
  786                 pmd = TAILQ_FIRST(&sc->sc_mgmtinprog);
  787                 /*
  788                  * If there is no mgmt request in progress or the operation
  789                  * returned is explicitly a trap, this pmd will essentially
  790                  * be ignored.
  791                  */
  792                 pgt_mgmtrx_completion(sc, pmd);
  793         }
  794         sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_MGMT_RX] =
  795             htole32(dirtycount +
  796                 letoh32(sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_MGMT_RX]));
  797 
  798         dirtycount = sc->sc_dirtyq_count[PGT_QUEUE_DATA_HIGH_RX];
  799         while (!TAILQ_EMPTY(&sc->sc_dirtyq[PGT_QUEUE_DATA_HIGH_RX])) {
  800                 if ((m = pgt_datarx_completion(sc, PGT_QUEUE_DATA_HIGH_RX)))
  801                         pgt_input_frames(sc, m);
  802         }
  803         sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_HIGH_RX] =
  804             htole32(dirtycount +
  805                 letoh32(sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_HIGH_RX]));
  806 
  807         dirtycount = sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_RX];
  808         while (!TAILQ_EMPTY(&sc->sc_dirtyq[PGT_QUEUE_DATA_LOW_RX])) {
  809                 if ((m = pgt_datarx_completion(sc, PGT_QUEUE_DATA_LOW_RX)))
  810                         pgt_input_frames(sc, m);
  811         }
  812         sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_LOW_RX] =
  813             htole32(dirtycount +
  814                 letoh32(sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_LOW_RX]));
  815 
  816         /*
  817          * Write out what we've finished with.
  818          */
  819         bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
  820             sc->sc_cbdmam->dm_mapsize,
  821             BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD);
  822 }
  823 
  824 struct mbuf *
  825 pgt_ieee80211_encap(struct pgt_softc *sc, struct ether_header *eh,
  826     struct mbuf *m, struct ieee80211_node **ni)
  827 {
  828         struct ieee80211com *ic;
  829         struct ieee80211_frame *frame;
  830         struct llc *snap;
  831 
  832         ic = &sc->sc_ic;
  833         if (ni != NULL && ic->ic_opmode == IEEE80211_M_MONITOR) {
  834                 *ni = ieee80211_ref_node(ic->ic_bss);
  835                 (*ni)->ni_inact = 0;
  836                 return (m);
  837         }
  838 
  839         M_PREPEND(m, sizeof(*frame) + sizeof(*snap), M_DONTWAIT);
  840         if (m != NULL)
  841                 m = m_pullup(m, sizeof(*frame) + sizeof(*snap));
  842         if (m == NULL)
  843                 return (m);
  844         frame = mtod(m, struct ieee80211_frame *);
  845         snap = (struct llc *)&frame[1];
  846         if (ni != NULL) {
  847                 if (ic->ic_opmode == IEEE80211_M_STA) {
  848                         *ni = ieee80211_ref_node(ic->ic_bss);
  849                 } else {
  850                         *ni = ieee80211_find_node(ic, eh->ether_shost);
  851                         /*
  852                          * Make up associations for ad-hoc mode.  To support
  853                          * ad-hoc WPA, we'll need to maintain a bounded
  854                          * pool of ad-hoc stations.
  855                          */
  856                         if (*ni == NULL &&
  857                             ic->ic_opmode != IEEE80211_M_HOSTAP) {
  858                                 *ni = ieee80211_dup_bss(ic, eh->ether_shost);
  859                                 if (*ni != NULL) {
  860                                         (*ni)->ni_associd = 1;
  861                                         ic->ic_newassoc(ic, *ni, 1);
  862                                 }
  863                         }
  864                         if (*ni == NULL) {
  865                                 m_freem(m);
  866                                 return (NULL);
  867                         }
  868                 }
  869                 (*ni)->ni_inact = 0;
  870         }
  871         snap->llc_dsap = snap->llc_ssap = LLC_SNAP_LSAP;
  872         snap->llc_control = LLC_UI;
  873         snap->llc_snap.org_code[0] = 0;
  874         snap->llc_snap.org_code[1] = 0;
  875         snap->llc_snap.org_code[2] = 0;
  876         snap->llc_snap.ether_type = eh->ether_type;
  877         frame->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_DATA;
  878         /* Doesn't look like much of the 802.11 header is available. */
  879         *(uint16_t *)frame->i_dur = *(uint16_t *)frame->i_seq = 0;
  880         /*
  881          * Translate the addresses; WDS is not handled.
  882          */
  883         switch (ic->ic_opmode) {
  884         case IEEE80211_M_STA:
  885                 frame->i_fc[1] = IEEE80211_FC1_DIR_FROMDS;
  886                 IEEE80211_ADDR_COPY(frame->i_addr1, eh->ether_dhost);
  887                 IEEE80211_ADDR_COPY(frame->i_addr2, ic->ic_bss->ni_bssid);
  888                 IEEE80211_ADDR_COPY(frame->i_addr3, eh->ether_shost);
  889                 break;
  890         case IEEE80211_M_IBSS:
  891         case IEEE80211_M_AHDEMO:
  892                 frame->i_fc[1] = IEEE80211_FC1_DIR_NODS;
  893                 IEEE80211_ADDR_COPY(frame->i_addr1, eh->ether_dhost);
  894                 IEEE80211_ADDR_COPY(frame->i_addr2, eh->ether_shost);
  895                 IEEE80211_ADDR_COPY(frame->i_addr3, ic->ic_bss->ni_bssid);
  896                 break;
  897         case IEEE80211_M_HOSTAP:
  898                 /* HostAP forwarding defaults to being done on firmware. */
  899                 frame->i_fc[1] = IEEE80211_FC1_DIR_TODS;
  900                 IEEE80211_ADDR_COPY(frame->i_addr1, ic->ic_bss->ni_bssid);
  901                 IEEE80211_ADDR_COPY(frame->i_addr2, eh->ether_shost);
  902                 IEEE80211_ADDR_COPY(frame->i_addr3, eh->ether_dhost);
  903                 break;
  904         default:
  905                 break;
  906         }
  907         return (m);
  908 }
  909 
  910 void
  911 pgt_input_frames(struct pgt_softc *sc, struct mbuf *m)
  912 {
  913         struct ether_header eh;
  914         struct ifnet *ifp;
  915         struct ieee80211_channel *chan;
  916         struct ieee80211_node *ni;
  917         struct ieee80211com *ic;
  918         struct pgt_rx_annex *pra;
  919         struct pgt_rx_header *pha;
  920         struct mbuf *next;
  921         unsigned int n;
  922         uint32_t rstamp;
  923         uint8_t rate, rssi;
  924 
  925         ic = &sc->sc_ic;
  926         ifp = &ic->ic_if;
  927         for (next = m; m != NULL; m = next) {
  928                 next = m->m_nextpkt;
  929                 m->m_nextpkt = NULL;
  930 
  931                 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
  932                         if (m->m_len < sizeof(*pha)) {
  933                                 m = m_pullup(m, sizeof(*pha));
  934                                 if (m == NULL) {
  935                                         if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
  936                                                 DPRINTF(("%s: m_pullup "
  937                                                     "failure\n",
  938                                                     sc->sc_dev.dv_xname));
  939                                         ifp->if_ierrors++;
  940                                         continue;
  941                                 }
  942                         }
  943                         pha = mtod(m, struct pgt_rx_header *);
  944                         pra = NULL;
  945                         goto input;
  946                 }
  947 
  948                 if (m->m_len < sizeof(*pra)) {
  949                         m = m_pullup(m, sizeof(*pra));
  950                         if (m == NULL) {
  951                                 if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
  952                                         DPRINTF(("%s: m_pullup failure\n",
  953                                             sc->sc_dev.dv_xname));
  954                                 ifp->if_ierrors++;
  955                                 continue;
  956                         }
  957                 }
  958                 pra = mtod(m, struct pgt_rx_annex *);
  959                 pha = &pra->pra_header;
  960                 if (sc->sc_debug & SC_DEBUG_RXANNEX)
  961                         DPRINTF(("%s: rx annex: ? %04x "
  962                             "len %u clock %u flags %02x ? %02x rate %u ? %02x "
  963                             "freq %u ? %04x rssi %u pad %02x%02x%02x\n",
  964                             sc->sc_dev.dv_xname,
  965                             letoh16(pha->pra_unknown0),
  966                             letoh16(pha->pra_length),
  967                             letoh32(pha->pra_clock), pha->pra_flags,
  968                             pha->pra_unknown1, pha->pra_rate,
  969                             pha->pra_unknown2, letoh32(pha->pra_frequency),
  970                             pha->pra_unknown3, pha->pra_rssi,
  971                             pha->pra_pad[0], pha->pra_pad[1], pha->pra_pad[2]));
  972                 if (sc->sc_debug & SC_DEBUG_RXETHER)
  973                         DPRINTF(("%s: rx ether: %s < %s 0x%04x\n",
  974                             sc->sc_dev.dv_xname,
  975                             ether_sprintf(pra->pra_ether_dhost),
  976                             ether_sprintf(pra->pra_ether_shost),
  977                             ntohs(pra->pra_ether_type)));
  978 
  979                 memcpy(eh.ether_dhost, pra->pra_ether_dhost, ETHER_ADDR_LEN);
  980                 memcpy(eh.ether_shost, pra->pra_ether_shost, ETHER_ADDR_LEN);
  981                 eh.ether_type = pra->pra_ether_type;
  982 
  983 input:
  984                 /*
  985                  * This flag is set if e.g. packet could not be decrypted.
  986                  */
  987                 if (pha->pra_flags & PRA_FLAG_BAD) {
  988                         ifp->if_ierrors++;
  989                         m_freem(m);
  990                         continue;
  991                 }
  992 
  993                 /*
  994                  * After getting what we want, chop off the annex, then
  995                  * turn into something that looks like it really was
  996                  * 802.11.
  997                  */
  998                 rssi = pha->pra_rssi;
  999                 rstamp = letoh32(pha->pra_clock);
 1000                 rate = pha->pra_rate;
 1001                 n = ieee80211_mhz2ieee(letoh32(pha->pra_frequency), 0);
 1002                 if (n <= IEEE80211_CHAN_MAX)
 1003                         chan = &ic->ic_channels[n];
 1004                 else
 1005                         chan = ic->ic_bss->ni_chan;
 1006                 /* Send to 802.3 listeners. */
 1007                 if (pra) {
 1008                         m_adj(m, sizeof(*pra));
 1009                 } else
 1010                         m_adj(m, sizeof(*pha));
 1011 
 1012                 m = pgt_ieee80211_encap(sc, &eh, m, &ni);
 1013                 if (m != NULL) {
 1014 #if NBPFILTER > 0
 1015                         if (sc->sc_drvbpf != NULL) {
 1016                                 struct mbuf mb;
 1017                                 struct pgt_rx_radiotap_hdr *tap = &sc->sc_rxtap;
 1018 
 1019                                 tap->wr_flags = 0;
 1020                                 tap->wr_chan_freq = htole16(chan->ic_freq);
 1021                                 tap->wr_chan_flags = htole16(chan->ic_flags);
 1022                                 tap->wr_rssi = rssi;
 1023                                 tap->wr_max_rssi = ic->ic_max_rssi;
 1024 
 1025                                 mb.m_data = (caddr_t)tap;
 1026                                 mb.m_len = sc->sc_rxtap_len;
 1027                                 mb.m_next = m;
 1028                                 mb.m_nextpkt = NULL;
 1029                                 mb.m_type = 0;
 1030                                 mb.m_flags = 0;
 1031                                 bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN);
 1032                         }
 1033 #endif
 1034                         ni->ni_rssi = rssi;
 1035                         ni->ni_rstamp = rstamp;
 1036                         ieee80211_input(ifp, m, ni, rssi, rstamp);
 1037                         /*
 1038                          * The frame may have caused the node to be marked for
 1039                          * reclamation (e.g. in response to a DEAUTH message)
 1040                          * so use free_node here instead of unref_node.
 1041                          */
 1042                         if (ni == ic->ic_bss)
 1043                                 ieee80211_unref_node(&ni);
 1044                         else
 1045                                 ieee80211_release_node(&sc->sc_ic, ni);
 1046                 } else {
 1047                         ifp->if_ierrors++;
 1048                 }
 1049         }
 1050 }
 1051 
 1052 void
 1053 pgt_wakeup_intr(struct pgt_softc *sc)
 1054 {
 1055         int shouldupdate;
 1056         int i;
 1057 
 1058         shouldupdate = 0;
 1059         /* Check for any queues being empty before updating. */
 1060         bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
 1061             sc->sc_cbdmam->dm_mapsize,
 1062             BUS_DMASYNC_POSTREAD);
 1063         for (i = 0; !shouldupdate && i < PGT_QUEUE_COUNT; i++) {
 1064                 if (pgt_queue_is_tx(i))
 1065                         shouldupdate = pgt_queue_frags_pending(sc, i);
 1066                 else
 1067                         shouldupdate = pgt_queue_frags_pending(sc, i) <
 1068                             sc->sc_freeq_count[i];
 1069         }
 1070         if (!TAILQ_EMPTY(&sc->sc_mgmtinprog))
 1071                 shouldupdate = 1;
 1072         if (sc->sc_debug & SC_DEBUG_POWER)
 1073                 DPRINTF(("%s: wakeup interrupt (update = %d)\n",
 1074                     sc->sc_dev.dv_xname, shouldupdate));
 1075         sc->sc_flags &= ~SC_POWERSAVE;
 1076         if (shouldupdate) {
 1077                 pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_UPDATE);
 1078                 DELAY(PGT_WRITEIO_DELAY);
 1079         }
 1080 }
 1081 
 1082 void
 1083 pgt_sleep_intr(struct pgt_softc *sc)
 1084 {
 1085         int allowed;
 1086         int i;
 1087 
 1088         allowed = 1;
 1089         /* Check for any queues not being empty before allowing. */
 1090         bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
 1091             sc->sc_cbdmam->dm_mapsize,
 1092             BUS_DMASYNC_POSTREAD);
 1093         for (i = 0; allowed && i < PGT_QUEUE_COUNT; i++) {
 1094                 if (pgt_queue_is_tx(i))
 1095                         allowed = pgt_queue_frags_pending(sc, i) == 0;
 1096                 else
 1097                         allowed = pgt_queue_frags_pending(sc, i) >=
 1098                             sc->sc_freeq_count[i];
 1099         }
 1100         if (!TAILQ_EMPTY(&sc->sc_mgmtinprog))
 1101                 allowed = 0;
 1102         if (sc->sc_debug & SC_DEBUG_POWER)
 1103                 DPRINTF(("%s: sleep interrupt (allowed = %d)\n",
 1104                     sc->sc_dev.dv_xname, allowed));
 1105         if (allowed && sc->sc_ic.ic_flags & IEEE80211_F_PMGTON) {
 1106                 sc->sc_flags |= SC_POWERSAVE;
 1107                 pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_SLEEP);
 1108                 DELAY(PGT_WRITEIO_DELAY);
 1109         }
 1110 }
 1111 
 1112 void
 1113 pgt_empty_traps(struct pgt_softc_kthread *sck)
 1114 {
 1115         struct pgt_async_trap *pa;
 1116         struct mbuf *m;
 1117 
 1118         while (!TAILQ_EMPTY(&sck->sck_traps)) {
 1119                 pa = TAILQ_FIRST(&sck->sck_traps);
 1120                 TAILQ_REMOVE(&sck->sck_traps, pa, pa_link);
 1121                 m = pa->pa_mbuf;
 1122                 m_freem(m);
 1123         }
 1124 }
 1125 
 1126 void
 1127 pgt_per_device_kthread(void *argp)
 1128 {
 1129         struct pgt_softc *sc;
 1130         struct pgt_softc_kthread *sck;
 1131         struct pgt_async_trap *pa;
 1132         struct mbuf *m;
 1133         int s;
 1134 
 1135         sc = argp;
 1136         sck = &sc->sc_kthread;
 1137         while (!sck->sck_exit) {
 1138                 if (!sck->sck_update && !sck->sck_reset &&
 1139                     TAILQ_EMPTY(&sck->sck_traps))
 1140                         tsleep(&sc->sc_kthread, 0, "pgtkth", 0);
 1141                 if (sck->sck_reset) {
 1142                         DPRINTF(("%s: [thread] async reset\n",
 1143                             sc->sc_dev.dv_xname));
 1144                         sck->sck_reset = 0;
 1145                         sck->sck_update = 0;
 1146                         pgt_empty_traps(sck);
 1147                         s = splnet();
 1148                         pgt_stop(sc, SC_NEEDS_RESET);
 1149                         splx(s);
 1150                 } else if (!TAILQ_EMPTY(&sck->sck_traps)) {
 1151                         DPRINTF(("%s: [thread] got a trap\n",
 1152                             sc->sc_dev.dv_xname));
 1153                         pa = TAILQ_FIRST(&sck->sck_traps);
 1154                         TAILQ_REMOVE(&sck->sck_traps, pa, pa_link);
 1155                         m = pa->pa_mbuf;
 1156                         m_adj(m, sizeof(*pa));
 1157                         pgt_update_sw_from_hw(sc, pa, m);
 1158                         m_freem(m);
 1159                 } else if (sck->sck_update) {
 1160                         sck->sck_update = 0;
 1161                         pgt_update_sw_from_hw(sc, NULL, NULL);
 1162                 }
 1163         }
 1164         pgt_empty_traps(sck);
 1165         kthread_exit(0);
 1166 }
 1167 
 1168 void
 1169 pgt_async_reset(struct pgt_softc *sc)
 1170 {
 1171         if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET))
 1172                 return;
 1173         sc->sc_kthread.sck_reset = 1;
 1174         wakeup(&sc->sc_kthread);
 1175 }
 1176 
 1177 void
 1178 pgt_async_update(struct pgt_softc *sc)
 1179 {
 1180         if (sc->sc_flags & SC_DYING)
 1181                 return;
 1182         sc->sc_kthread.sck_update = 1;
 1183         wakeup(&sc->sc_kthread);
 1184 }
 1185 
 1186 int
 1187 pgt_intr(void *arg)
 1188 {
 1189         struct pgt_softc *sc;
 1190         struct ifnet *ifp;
 1191         u_int32_t reg;
 1192 
 1193         sc = arg;
 1194         ifp = &sc->sc_ic.ic_if;
 1195 
 1196         /*
 1197          * Here the Linux driver ands in the value of the INT_EN register,
 1198          * and masks off everything but the documented interrupt bits.  Why?
 1199          *
 1200          * Unknown bit 0x4000 is set upon initialization, 0x8000000 some
 1201          * other times.
 1202          */
 1203         if (sc->sc_ic.ic_flags & IEEE80211_F_PMGTON &&
 1204             sc->sc_flags & SC_POWERSAVE) {
 1205                 /*
 1206                  * Don't try handling the interrupt in sleep mode.
 1207                  */
 1208                 reg = pgt_read_4(sc, PGT_REG_CTRL_STAT);
 1209                 if (reg & PGT_CTRL_STAT_SLEEPMODE)
 1210                         return (0);
 1211         }
 1212         reg = pgt_read_4(sc, PGT_REG_INT_STAT);
 1213         if (reg == 0)
 1214                 return (0); /* This interrupt is not from us */
 1215 
 1216         pgt_write_4_flush(sc, PGT_REG_INT_ACK, reg);
 1217         if (reg & PGT_INT_STAT_INIT)
 1218                 pgt_init_intr(sc);
 1219         if (reg & PGT_INT_STAT_UPDATE) {
 1220                 pgt_update_intr(sc, 0);
 1221                 /*
 1222                  * If we got an update, it's not really asleep.
 1223                  */
 1224                 sc->sc_flags &= ~SC_POWERSAVE;
 1225                 /*
 1226                  * Pretend I have any idea what the documentation
 1227                  * would say, and just give it a shot sending an
 1228                  * "update" after acknowledging the interrupt
 1229                  * bits and writing out the new control block.
 1230                  */
 1231                 pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_UPDATE);
 1232                 DELAY(PGT_WRITEIO_DELAY);
 1233         }
 1234         if (reg & PGT_INT_STAT_SLEEP && !(reg & PGT_INT_STAT_WAKEUP))
 1235                 pgt_sleep_intr(sc);
 1236         if (reg & PGT_INT_STAT_WAKEUP)
 1237                 pgt_wakeup_intr(sc);
 1238 
 1239         if (sc->sc_flags & SC_INTR_RESET) {
 1240                 sc->sc_flags &= ~SC_INTR_RESET;
 1241                 pgt_async_reset(sc);
 1242         }
 1243 
 1244         if (reg & ~PGT_INT_STAT_SOURCES && sc->sc_debug & SC_DEBUG_UNEXPECTED) {
 1245                 DPRINTF(("%s: unknown interrupt bits %#x (stat %#x)\n",
 1246                     sc->sc_dev.dv_xname,
 1247                     reg & ~PGT_INT_STAT_SOURCES,
 1248                     pgt_read_4(sc, PGT_REG_CTRL_STAT)));
 1249         }
 1250 
 1251         if (!IFQ_IS_EMPTY(&ifp->if_snd))
 1252                 pgt_start(ifp);
 1253 
 1254         return (1);
 1255 }
 1256 
 1257 void
 1258 pgt_txdone(struct pgt_softc *sc, enum pgt_queue pq)
 1259 {
 1260         struct pgt_desc *pd;
 1261 
 1262         pd = TAILQ_FIRST(&sc->sc_dirtyq[pq]);
 1263         TAILQ_REMOVE(&sc->sc_dirtyq[pq], pd, pd_link);
 1264         sc->sc_dirtyq_count[pq]--;
 1265         TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link);
 1266         sc->sc_freeq_count[pq]++;
 1267         bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0,
 1268             pd->pd_dmam->dm_mapsize,
 1269             BUS_DMASYNC_POSTREAD);
 1270         /* Management frames want completion information. */
 1271         if (sc->sc_debug & SC_DEBUG_QUEUES) {
 1272                 DPRINTF(("%s: queue: tx %u <- [%u]\n",
 1273                     sc->sc_dev.dv_xname, pd->pd_fragnum, pq));
 1274                 if (sc->sc_debug & SC_DEBUG_MGMT && pgt_queue_is_mgmt(pq)) {
 1275                         struct pgt_mgmt_frame *pmf;
 1276 
 1277                         pmf = (struct pgt_mgmt_frame *)pd->pd_mem;
 1278                         DPRINTF(("%s: queue: txmgmt %p <- "
 1279                             "(ver %u, op %u, flags %#x)\n",
 1280                             sc->sc_dev.dv_xname,
 1281                             pd, pmf->pmf_version, pmf->pmf_operation,
 1282                             pmf->pmf_flags));
 1283                 }
 1284         }
 1285         pgt_unload_tx_desc_frag(sc, pd);
 1286 }
 1287 
 1288 void
 1289 pgt_rxdone(struct pgt_softc *sc, enum pgt_queue pq)
 1290 {
 1291         struct pgt_desc *pd;
 1292 
 1293         pd = TAILQ_FIRST(&sc->sc_freeq[pq]);
 1294         TAILQ_REMOVE(&sc->sc_freeq[pq], pd, pd_link);
 1295         sc->sc_freeq_count[pq]--;
 1296         TAILQ_INSERT_TAIL(&sc->sc_dirtyq[pq], pd, pd_link);
 1297         sc->sc_dirtyq_count[pq]++;
 1298         bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0,
 1299             pd->pd_dmam->dm_mapsize,
 1300             BUS_DMASYNC_POSTREAD);
 1301         if (sc->sc_debug & SC_DEBUG_QUEUES)
 1302                 DPRINTF(("%s: queue: rx %u <- [%u]\n",
 1303                     sc->sc_dev.dv_xname, pd->pd_fragnum, pq));
 1304         if (sc->sc_debug & SC_DEBUG_UNEXPECTED &&
 1305             pd->pd_fragp->pf_flags & ~htole16(PF_FLAG_MF))
 1306                 DPRINTF(("%s: unknown flags on rx [%u]: %#x\n",
 1307                     sc->sc_dev.dv_xname, pq, letoh16(pd->pd_fragp->pf_flags)));
 1308 }
 1309 
 1310 /*
 1311  * Traps are generally used for the firmware to report changes in state
 1312  * back to the host.  Mostly this processes changes in link state, but
 1313  * it needs to also be used to initiate WPA and other authentication
 1314  * schemes in terms of client (station) or server (access point).
 1315  */
 1316 void
 1317 pgt_trap_received(struct pgt_softc *sc, uint32_t oid, void *trapdata,
 1318     size_t size)
 1319 {
 1320         struct pgt_async_trap *pa;
 1321         struct mbuf *m;
 1322         char *p;
 1323         size_t total;
 1324 
 1325         if (sc->sc_flags & SC_DYING)
 1326                 return;
 1327 
 1328         total = sizeof(oid) + size + sizeof(struct pgt_async_trap);
 1329         if (total >= MINCLSIZE) {
 1330                 MGETHDR(m, M_DONTWAIT, MT_DATA);
 1331                 if (m == NULL)
 1332                         return;
 1333                 MCLGET(m, M_DONTWAIT);
 1334                 if (!(m->m_flags & M_EXT)) {
 1335                         m_freem(m);
 1336                         m = NULL;
 1337                 }
 1338         } else
 1339                 m = m_get(M_DONTWAIT, MT_DATA);
 1340 
 1341         if (m == NULL)
 1342                 return;
 1343         else
 1344                 m->m_len = total;
 1345 
 1346         pa = mtod(m, struct pgt_async_trap *);
 1347         p = mtod(m, char *) + sizeof(*pa);
 1348         *(uint32_t *)p = oid;
 1349         p += sizeof(uint32_t);
 1350         memcpy(p, trapdata, size);
 1351         pa->pa_mbuf = m;
 1352 
 1353         TAILQ_INSERT_TAIL(&sc->sc_kthread.sck_traps, pa, pa_link);
 1354         wakeup(&sc->sc_kthread);
 1355 }
 1356 
 1357 /*
 1358  * Process a completed management response (all requests should be
 1359  * responded to, quickly) or an event (trap).
 1360  */
 1361 void
 1362 pgt_mgmtrx_completion(struct pgt_softc *sc, struct pgt_mgmt_desc *pmd)
 1363 {
 1364         struct pgt_desc *pd;
 1365         struct pgt_mgmt_frame *pmf;
 1366         uint32_t oid, size;
 1367 
 1368         pd = TAILQ_FIRST(&sc->sc_dirtyq[PGT_QUEUE_MGMT_RX]);
 1369         TAILQ_REMOVE(&sc->sc_dirtyq[PGT_QUEUE_MGMT_RX], pd, pd_link);
 1370         sc->sc_dirtyq_count[PGT_QUEUE_MGMT_RX]--;
 1371         TAILQ_INSERT_TAIL(&sc->sc_freeq[PGT_QUEUE_MGMT_RX],
 1372             pd, pd_link);
 1373         sc->sc_freeq_count[PGT_QUEUE_MGMT_RX]++;
 1374         if (letoh16(pd->pd_fragp->pf_size) < sizeof(*pmf)) {
 1375                 if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
 1376                         DPRINTF(("%s: mgmt desc too small: %u\n",
 1377                             sc->sc_dev.dv_xname,
 1378                             letoh16(pd->pd_fragp->pf_size)));
 1379                 goto out_nopmd;
 1380         }
 1381         pmf = (struct pgt_mgmt_frame *)pd->pd_mem;
 1382         if (pmf->pmf_version != PMF_VER) {
 1383                 if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
 1384                         DPRINTF(("%s: unknown mgmt version %u\n",
 1385                             sc->sc_dev.dv_xname, pmf->pmf_version));
 1386                 goto out_nopmd;
 1387         }
 1388         if (pmf->pmf_device != PMF_DEV) {
 1389                 if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
 1390                         DPRINTF(("%s: unknown mgmt dev %u\n",
 1391                             sc->sc_dev.dv_xname, pmf->pmf_device));
 1392                 goto out;
 1393         }
 1394         if (pmf->pmf_flags & ~PMF_FLAG_VALID) {
 1395                 if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
 1396                         DPRINTF(("%s: unknown mgmt flags %x\n",
 1397                             sc->sc_dev.dv_xname,
 1398                             pmf->pmf_flags & ~PMF_FLAG_VALID));
 1399                 goto out;
 1400         }
 1401         if (pmf->pmf_flags & PMF_FLAG_LE) {
 1402                 oid = letoh32(pmf->pmf_oid);
 1403                 size = letoh32(pmf->pmf_size);
 1404         } else {
 1405                 oid = betoh32(pmf->pmf_oid);
 1406                 size = betoh32(pmf->pmf_size);
 1407         }
 1408         if (pmf->pmf_operation == PMF_OP_TRAP) {
 1409                 pmd = NULL; /* ignored */
 1410                 DPRINTF(("%s: mgmt trap received (op %u, oid %#x, len %u)\n",
 1411                     sc->sc_dev.dv_xname,
 1412                     pmf->pmf_operation, oid, size));
 1413                 pgt_trap_received(sc, oid, (char *)pmf + sizeof(*pmf),
 1414                     min(size, PGT_FRAG_SIZE - sizeof(*pmf)));
 1415                 goto out_nopmd;
 1416         }
 1417         if (pmd == NULL) {
 1418                 if (sc->sc_debug & (SC_DEBUG_UNEXPECTED | SC_DEBUG_MGMT))
 1419                         DPRINTF(("%s: spurious mgmt received "
 1420                             "(op %u, oid %#x, len %u)\n", sc->sc_dev.dv_xname,
 1421                             pmf->pmf_operation, oid, size));
 1422                 goto out_nopmd;
 1423         }
 1424         switch (pmf->pmf_operation) {
 1425         case PMF_OP_RESPONSE:
 1426                 pmd->pmd_error = 0;
 1427                 break;
 1428         case PMF_OP_ERROR:
 1429                 pmd->pmd_error = EPERM;
 1430                 goto out;
 1431         default:
 1432                 if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
 1433                         DPRINTF(("%s: unknown mgmt op %u\n",
 1434                             sc->sc_dev.dv_xname, pmf->pmf_operation));
 1435                 pmd->pmd_error = EIO;
 1436                 goto out;
 1437         }
 1438         if (oid != pmd->pmd_oid) {
 1439                 if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
 1440                         DPRINTF(("%s: mgmt oid changed from %#x -> %#x\n",
 1441                             sc->sc_dev.dv_xname, pmd->pmd_oid, oid));
 1442                 pmd->pmd_oid = oid;
 1443         }
 1444         if (pmd->pmd_recvbuf != NULL) {
 1445                 if (size > PGT_FRAG_SIZE) {
 1446                         if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
 1447                                 DPRINTF(("%s: mgmt oid %#x has bad size %u\n",
 1448                                     sc->sc_dev.dv_xname, oid, size));
 1449                         pmd->pmd_error = EIO;
 1450                         goto out;
 1451                 }
 1452                 if (size > pmd->pmd_len)
 1453                         pmd->pmd_error = ENOMEM;
 1454                 else
 1455                         memcpy(pmd->pmd_recvbuf, (char *)pmf + sizeof(*pmf),
 1456                             size);
 1457                 pmd->pmd_len = size;
 1458         }
 1459 
 1460 out:
 1461         TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link);
 1462         wakeup_one(pmd);
 1463         if (sc->sc_debug & SC_DEBUG_MGMT)
 1464                 DPRINTF(("%s: queue: mgmt %p <- (op %u, oid %#x, len %u)\n",
 1465                     sc->sc_dev.dv_xname, pmd, pmf->pmf_operation,
 1466                     pmd->pmd_oid, pmd->pmd_len));
 1467 out_nopmd:
 1468         pgt_reinit_rx_desc_frag(sc, pd);
 1469 }
 1470 
 1471 /*
 1472  * Queue packets for reception and defragmentation.  I don't know now
 1473  * whether the rx queue being full enough to start, but not finish,
 1474  * queueing a fragmented packet, can happen.
 1475  */
 1476 struct mbuf *
 1477 pgt_datarx_completion(struct pgt_softc *sc, enum pgt_queue pq)
 1478 {
 1479         struct ifnet *ifp;
 1480         struct pgt_desc *pd;
 1481         struct mbuf *top, **mp, *m;
 1482         size_t datalen;
 1483         uint16_t morefrags, dataoff;
 1484         int tlen = 0;
 1485 
 1486         ifp = &sc->sc_ic.ic_if;
 1487         m = NULL;
 1488         top = NULL;
 1489         mp = &top;
 1490 
 1491         while ((pd = TAILQ_FIRST(&sc->sc_dirtyq[pq])) != NULL) {
 1492                 TAILQ_REMOVE(&sc->sc_dirtyq[pq], pd, pd_link);
 1493                 sc->sc_dirtyq_count[pq]--;
 1494                 datalen = letoh16(pd->pd_fragp->pf_size);
 1495                 dataoff = letoh32(pd->pd_fragp->pf_addr) - pd->pd_dmaaddr;
 1496                 morefrags = pd->pd_fragp->pf_flags & htole16(PF_FLAG_MF);
 1497 
 1498                 if (sc->sc_debug & SC_DEBUG_RXFRAG)
 1499                         DPRINTF(("%s: rx frag: len %u memoff %u flags %x\n",
 1500                             sc->sc_dev.dv_xname, datalen, dataoff,
 1501                             pd->pd_fragp->pf_flags));
 1502 
 1503                 /* Add the (two+?) bytes for the header. */
 1504                 if (datalen + dataoff > PGT_FRAG_SIZE) {
 1505                         if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
 1506                                 DPRINTF(("%s data rx too big: %u\n",
 1507                                     sc->sc_dev.dv_xname, datalen));
 1508                         goto fail;
 1509                 }
 1510 
 1511                 if (m == NULL)
 1512                         MGETHDR(m, M_DONTWAIT, MT_DATA);
 1513                 else
 1514                         m = m_get(M_DONTWAIT, MT_DATA);
 1515 
 1516                 if (m == NULL)
 1517                         goto fail;
 1518                 if (datalen >= MINCLSIZE) {
 1519                         MCLGET(m, M_DONTWAIT);
 1520                         if (!(m->m_flags & M_EXT)) {
 1521                                 m_free(m);
 1522                                 goto fail;
 1523                         }
 1524                 }
 1525                 bcopy(pd->pd_mem + dataoff, mtod(m, char *), datalen);
 1526                 m->m_len = datalen;
 1527                 tlen += datalen;
 1528 
 1529                 *mp = m;
 1530                 mp = &m->m_next;
 1531 
 1532                 TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link);
 1533                 sc->sc_freeq_count[pq]++;
 1534                 pgt_reinit_rx_desc_frag(sc, pd);
 1535 
 1536                 if (!morefrags)
 1537                         break;
 1538         }
 1539 
 1540         if (top) {
 1541                 ifp->if_ipackets++;
 1542                 top->m_pkthdr.len = tlen;
 1543                 top->m_pkthdr.rcvif = ifp;
 1544         }
 1545         return (top);
 1546 
 1547 fail:
 1548         TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link);
 1549         sc->sc_freeq_count[pq]++;
 1550         pgt_reinit_rx_desc_frag(sc, pd);
 1551 
 1552         ifp->if_ierrors++;
 1553         if (top)
 1554                 m_freem(top);
 1555         return (NULL);
 1556 }
 1557 
 1558 int
 1559 pgt_oid_get(struct pgt_softc *sc, enum pgt_oid oid,
 1560     void *arg, size_t arglen)
 1561 {
 1562         struct pgt_mgmt_desc pmd;
 1563         int error;
 1564 
 1565         bzero(&pmd, sizeof(pmd));
 1566         pmd.pmd_recvbuf = arg;
 1567         pmd.pmd_len = arglen;
 1568         pmd.pmd_oid = oid;
 1569 
 1570         error = pgt_mgmt_request(sc, &pmd);
 1571         if (error == 0)
 1572                 error = pmd.pmd_error;
 1573         if (error != 0 && error != EPERM && sc->sc_debug & SC_DEBUG_UNEXPECTED)
 1574                 DPRINTF(("%s: failure getting oid %#x: %d\n",
 1575                     sc->sc_dev.dv_xname, oid, error));
 1576 
 1577         return (error);
 1578 }
 1579 
 1580 int
 1581 pgt_oid_retrieve(struct pgt_softc *sc, enum pgt_oid oid,
 1582     void *arg, size_t arglen)
 1583 {
 1584         struct pgt_mgmt_desc pmd;
 1585         int error;
 1586 
 1587         bzero(&pmd, sizeof(pmd));
 1588         pmd.pmd_sendbuf = arg;
 1589         pmd.pmd_recvbuf = arg;
 1590         pmd.pmd_len = arglen;
 1591         pmd.pmd_oid = oid;
 1592 
 1593         error = pgt_mgmt_request(sc, &pmd);
 1594         if (error == 0)
 1595                 error = pmd.pmd_error;
 1596         if (error != 0 && error != EPERM && sc->sc_debug & SC_DEBUG_UNEXPECTED)
 1597                 DPRINTF(("%s: failure retrieving oid %#x: %d\n",
 1598                     sc->sc_dev.dv_xname, oid, error));
 1599 
 1600         return (error);
 1601 }
 1602 
 1603 int
 1604 pgt_oid_set(struct pgt_softc *sc, enum pgt_oid oid,
 1605     const void *arg, size_t arglen)
 1606 {
 1607         struct pgt_mgmt_desc pmd;
 1608         int error;
 1609 
 1610         bzero(&pmd, sizeof(pmd));
 1611         pmd.pmd_sendbuf = arg;
 1612         pmd.pmd_len = arglen;
 1613         pmd.pmd_oid = oid;
 1614 
 1615         error = pgt_mgmt_request(sc, &pmd);
 1616         if (error == 0)
 1617                 error = pmd.pmd_error;
 1618         if (error != 0 && error != EPERM && sc->sc_debug & SC_DEBUG_UNEXPECTED)
 1619                 DPRINTF(("%s: failure setting oid %#x: %d\n",
 1620                     sc->sc_dev.dv_xname, oid, error));
 1621 
 1622         return (error);
 1623 }
 1624 
 1625 void
 1626 pgt_state_dump(struct pgt_softc *sc)
 1627 {
 1628         printf("%s: state dump: control 0x%08x interrupt 0x%08x\n",
 1629             sc->sc_dev.dv_xname,
 1630             pgt_read_4(sc, PGT_REG_CTRL_STAT),
 1631             pgt_read_4(sc, PGT_REG_INT_STAT));
 1632 
 1633         printf("%s: state dump: driver curfrag[]\n",
 1634             sc->sc_dev.dv_xname);
 1635 
 1636         printf("%s: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
 1637             sc->sc_dev.dv_xname,
 1638             letoh32(sc->sc_cb->pcb_driver_curfrag[0]),
 1639             letoh32(sc->sc_cb->pcb_driver_curfrag[1]),
 1640             letoh32(sc->sc_cb->pcb_driver_curfrag[2]),
 1641             letoh32(sc->sc_cb->pcb_driver_curfrag[3]),
 1642             letoh32(sc->sc_cb->pcb_driver_curfrag[4]),
 1643             letoh32(sc->sc_cb->pcb_driver_curfrag[5]));
 1644 
 1645         printf("%s: state dump: device curfrag[]\n",
 1646             sc->sc_dev.dv_xname);
 1647 
 1648         printf("%s: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
 1649             sc->sc_dev.dv_xname,
 1650             letoh32(sc->sc_cb->pcb_device_curfrag[0]),
 1651             letoh32(sc->sc_cb->pcb_device_curfrag[1]),
 1652             letoh32(sc->sc_cb->pcb_device_curfrag[2]),
 1653             letoh32(sc->sc_cb->pcb_device_curfrag[3]),
 1654             letoh32(sc->sc_cb->pcb_device_curfrag[4]),
 1655             letoh32(sc->sc_cb->pcb_device_curfrag[5]));
 1656 }
 1657 
 1658 int
 1659 pgt_mgmt_request(struct pgt_softc *sc, struct pgt_mgmt_desc *pmd)
 1660 {
 1661         struct pgt_desc *pd;
 1662         struct pgt_mgmt_frame *pmf;
 1663         int error, i;
 1664 
 1665         if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET))
 1666                 return (EIO);
 1667         if (pmd->pmd_len > PGT_FRAG_SIZE - sizeof(*pmf))
 1668                 return (ENOMEM);
 1669         pd = TAILQ_FIRST(&sc->sc_freeq[PGT_QUEUE_MGMT_TX]);
 1670         if (pd == NULL)
 1671                 return (ENOMEM);
 1672         error = pgt_load_tx_desc_frag(sc, PGT_QUEUE_MGMT_TX, pd);
 1673         if (error)
 1674                 return (error);
 1675         pmf = (struct pgt_mgmt_frame *)pd->pd_mem;
 1676         pmf->pmf_version = PMF_VER;
 1677         /* "get" and "retrieve" operations look the same */
 1678         if (pmd->pmd_recvbuf != NULL)
 1679                 pmf->pmf_operation = PMF_OP_GET;
 1680         else
 1681                 pmf->pmf_operation = PMF_OP_SET;
 1682         pmf->pmf_oid = htobe32(pmd->pmd_oid);
 1683         pmf->pmf_device = PMF_DEV;
 1684         pmf->pmf_flags = 0;
 1685         pmf->pmf_size = htobe32(pmd->pmd_len);
 1686         /* "set" and "retrieve" operations both send data */
 1687         if (pmd->pmd_sendbuf != NULL)
 1688                 memcpy((char *)pmf + sizeof(*pmf), pmd->pmd_sendbuf,
 1689                     pmd->pmd_len);
 1690         else
 1691                 bzero((char *)pmf + sizeof(*pmf), pmd->pmd_len);
 1692         pmd->pmd_error = EINPROGRESS;
 1693         TAILQ_INSERT_TAIL(&sc->sc_mgmtinprog, pmd, pmd_link);
 1694         if (sc->sc_debug & SC_DEBUG_MGMT)
 1695                 DPRINTF(("%s: queue: mgmt %p -> (op %u, oid %#x, len %u)\n",
 1696                     sc->sc_dev.dv_xname,
 1697                     pmd, pmf->pmf_operation,
 1698                     pmd->pmd_oid, pmd->pmd_len));
 1699         pgt_desc_transmit(sc, PGT_QUEUE_MGMT_TX, pd,
 1700             sizeof(*pmf) + pmd->pmd_len, 0);
 1701         /*
 1702          * Try for one second, triggering 10 times.
 1703          *
 1704          * Do our best to work around seemingly buggy CardBus controllers
 1705          * on Soekris 4521 that fail to get interrupts with alarming
 1706          * regularity: run as if an interrupt occurred and service every
 1707          * queue except for mbuf reception.
 1708          */
 1709         i = 0;
 1710         do {
 1711                 if (tsleep(pmd, 0, "pgtmgm", hz / 10) != EWOULDBLOCK)
 1712                         break;
 1713                 if (pmd->pmd_error != EINPROGRESS)
 1714                         break;
 1715                 if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET)) {
 1716                         pmd->pmd_error = EIO;
 1717                         TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link);
 1718                         break;
 1719                 }
 1720                 if (i != 9)
 1721                         pgt_maybe_trigger(sc, PGT_QUEUE_MGMT_RX);
 1722 #ifdef PGT_BUGGY_INTERRUPT_RECOVERY
 1723                 pgt_update_intr(sc, 0);
 1724 #endif
 1725         } while (i++ < 10);
 1726 
 1727         if (pmd->pmd_error == EINPROGRESS) {
 1728                 printf("%s: timeout waiting for management "
 1729                     "packet response to %#x\n",
 1730                     sc->sc_dev.dv_xname, pmd->pmd_oid);
 1731                 TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link);
 1732                 if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
 1733                         pgt_state_dump(sc);
 1734                 pgt_async_reset(sc);
 1735                 error = ETIMEDOUT;
 1736         } else
 1737                 error = 0;
 1738 
 1739         return (error);
 1740 }
 1741 
 1742 void
 1743 pgt_desc_transmit(struct pgt_softc *sc, enum pgt_queue pq, struct pgt_desc *pd,
 1744     uint16_t len, int morecoming)
 1745 {
 1746         TAILQ_REMOVE(&sc->sc_freeq[pq], pd, pd_link);
 1747         sc->sc_freeq_count[pq]--;
 1748         TAILQ_INSERT_TAIL(&sc->sc_dirtyq[pq], pd, pd_link);
 1749         sc->sc_dirtyq_count[pq]++;
 1750         if (sc->sc_debug & SC_DEBUG_QUEUES)
 1751                 DPRINTF(("%s: queue: tx %u -> [%u]\n", sc->sc_dev.dv_xname,
 1752                     pd->pd_fragnum, pq));
 1753         bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
 1754             sc->sc_cbdmam->dm_mapsize,
 1755             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE);
 1756         if (morecoming)
 1757                 pd->pd_fragp->pf_flags |= htole16(PF_FLAG_MF);
 1758         pd->pd_fragp->pf_size = htole16(len);
 1759         bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0,
 1760             pd->pd_dmam->dm_mapsize,
 1761             BUS_DMASYNC_POSTWRITE);
 1762         sc->sc_cb->pcb_driver_curfrag[pq] =
 1763             htole32(letoh32(sc->sc_cb->pcb_driver_curfrag[pq]) + 1);
 1764         bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
 1765             sc->sc_cbdmam->dm_mapsize,
 1766             BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD);
 1767         if (!morecoming)
 1768                 pgt_maybe_trigger(sc, pq);
 1769 }
 1770 
 1771 void
 1772 pgt_maybe_trigger(struct pgt_softc *sc, enum pgt_queue pq)
 1773 {
 1774         unsigned int tries = 1000000 / PGT_WRITEIO_DELAY; /* one second */
 1775         uint32_t reg;
 1776 
 1777         if (sc->sc_debug & SC_DEBUG_TRIGGER)
 1778                 DPRINTF(("%s: triggered by queue [%u]\n",
 1779                     sc->sc_dev.dv_xname, pq));
 1780         pgt_debug_events(sc, "trig");
 1781         if (sc->sc_flags & SC_POWERSAVE) {
 1782                 /* Magic values ahoy? */
 1783                 if (pgt_read_4(sc, PGT_REG_INT_STAT) == 0xabadface) {
 1784                         do {
 1785                                 reg = pgt_read_4(sc, PGT_REG_CTRL_STAT);
 1786                                 if (!(reg & PGT_CTRL_STAT_SLEEPMODE))
 1787                                         DELAY(PGT_WRITEIO_DELAY);
 1788                         } while (tries-- != 0);
 1789                         if (!(reg & PGT_CTRL_STAT_SLEEPMODE)) {
 1790                                 if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
 1791                                         DPRINTF(("%s: timeout triggering from "
 1792                                             "sleep mode\n",
 1793                                             sc->sc_dev.dv_xname));
 1794                                 pgt_async_reset(sc);
 1795                                 return;
 1796                         }
 1797                 }
 1798                 pgt_write_4_flush(sc, PGT_REG_DEV_INT,
 1799                     PGT_DEV_INT_WAKEUP);
 1800                 DELAY(PGT_WRITEIO_DELAY);
 1801                 /* read the status back in */
 1802                 (void)pgt_read_4(sc, PGT_REG_CTRL_STAT);
 1803                 DELAY(PGT_WRITEIO_DELAY);
 1804         } else {
 1805                 pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_UPDATE);
 1806                 DELAY(PGT_WRITEIO_DELAY);
 1807         }
 1808 }
 1809 
 1810 struct ieee80211_node *
 1811 pgt_ieee80211_node_alloc(struct ieee80211com *ic)
 1812 {
 1813         struct pgt_ieee80211_node *pin;
 1814 
 1815         pin = malloc(sizeof(*pin), M_DEVBUF, M_NOWAIT);
 1816         if (pin != NULL) {
 1817                 bzero(pin, sizeof *pin);
 1818                 pin->pin_dot1x_auth = PIN_DOT1X_UNAUTHORIZED;
 1819         }
 1820         return (struct ieee80211_node *)pin;
 1821 }
 1822 
 1823 void
 1824 pgt_ieee80211_newassoc(struct ieee80211com *ic, struct ieee80211_node *ni,
 1825     int reallynew)
 1826 {
 1827         ieee80211_ref_node(ni);
 1828 }
 1829 
 1830 void
 1831 pgt_ieee80211_node_free(struct ieee80211com *ic, struct ieee80211_node *ni)
 1832 {
 1833         struct pgt_ieee80211_node *pin;
 1834 
 1835         pin = (struct pgt_ieee80211_node *)ni;
 1836         free(pin, M_DEVBUF);
 1837 }
 1838 
 1839 void
 1840 pgt_ieee80211_node_copy(struct ieee80211com *ic, struct ieee80211_node *dst,
 1841     const struct ieee80211_node *src)
 1842 {
 1843         const struct pgt_ieee80211_node *psrc;
 1844         struct pgt_ieee80211_node *pdst;
 1845 
 1846         psrc = (const struct pgt_ieee80211_node *)src;
 1847         pdst = (struct pgt_ieee80211_node *)dst;
 1848         bcopy(psrc, pdst, sizeof(*psrc));
 1849 }
 1850 
 1851 int
 1852 pgt_ieee80211_send_mgmt(struct ieee80211com *ic, struct ieee80211_node *ni,
 1853     int type, int arg)
 1854 {
 1855         return (EOPNOTSUPP);
 1856 }
 1857 
 1858 int
 1859 pgt_net_attach(struct pgt_softc *sc)
 1860 {
 1861         struct ieee80211com *ic = &sc->sc_ic;
 1862         struct ifnet *ifp = &ic->ic_if;
 1863         struct ieee80211_rateset *rs;
 1864         uint8_t rates[IEEE80211_RATE_MAXSIZE];
 1865         struct pgt_obj_buffer psbuffer;
 1866         struct pgt_obj_frequencies *freqs;
 1867         uint32_t phymode, country;
 1868         unsigned int chan, i, j, firstchan = -1;
 1869         int error;
 1870 
 1871         psbuffer.pob_size = htole32(PGT_FRAG_SIZE * PGT_PSM_BUFFER_FRAME_COUNT);
 1872         psbuffer.pob_addr = htole32(sc->sc_psmdmam->dm_segs[0].ds_addr);
 1873         error = pgt_oid_set(sc, PGT_OID_PSM_BUFFER, &psbuffer, sizeof(country));
 1874         if (error)
 1875                 return (error);
 1876         error = pgt_oid_get(sc, PGT_OID_PHY, &phymode, sizeof(phymode));
 1877         if (error)
 1878                 return (error);
 1879         error = pgt_oid_get(sc, PGT_OID_MAC_ADDRESS, ic->ic_myaddr,
 1880             sizeof(ic->ic_myaddr));
 1881         if (error)
 1882                 return (error);
 1883         error = pgt_oid_get(sc, PGT_OID_COUNTRY, &country, sizeof(country));
 1884         if (error)
 1885                 return (error);
 1886 
 1887         ifp->if_softc = sc;
 1888         ifp->if_init = pgt_init;
 1889         ifp->if_ioctl = pgt_ioctl;
 1890         ifp->if_start = pgt_start;
 1891         ifp->if_watchdog = pgt_watchdog;
 1892         ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
 1893         strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
 1894 
 1895         IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
 1896         IFQ_SET_READY(&ifp->if_snd);
 1897 
 1898         /*
 1899          * Set channels
 1900          *
 1901          * Prism hardware likes to report supported frequencies that are
 1902          * not actually available for the country of origin.
 1903          */
 1904         j = sizeof(*freqs) + (IEEE80211_CHAN_MAX + 1) * sizeof(uint16_t);
 1905         freqs = malloc(j, M_DEVBUF, M_WAITOK);
 1906         error = pgt_oid_get(sc, PGT_OID_SUPPORTED_FREQUENCIES, freqs, j);
 1907         if (error) {
 1908                 free(freqs, M_DEVBUF);
 1909                 return (error);
 1910         }
 1911 
 1912         for (i = 0, j = letoh16(freqs->pof_count); i < j; i++) {
 1913                 chan = ieee80211_mhz2ieee(letoh16(freqs->pof_freqlist_mhz[i]),
 1914                     0);
 1915 
 1916                 if (chan > IEEE80211_CHAN_MAX) {
 1917                         printf("%s: reported bogus channel (%uMHz)\n",
 1918                             sc->sc_dev.dv_xname, chan);
 1919                         free(freqs, M_DEVBUF);
 1920                         return (EIO);
 1921                 }
 1922 
 1923                 if (letoh16(freqs->pof_freqlist_mhz[i]) < 5000) {
 1924                         if (!(phymode & htole32(PGT_OID_PHY_2400MHZ)))
 1925                                 continue;
 1926                         if (country == letoh32(PGT_COUNTRY_USA)) {
 1927                                 if (chan >= 12 && chan <= 14)
 1928                                         continue;
 1929                         }
 1930                         if (chan <= 14)
 1931                                 ic->ic_channels[chan].ic_flags |=
 1932                                     IEEE80211_CHAN_B;
 1933                         ic->ic_channels[chan].ic_flags |= IEEE80211_CHAN_PUREG;
 1934                 } else {
 1935                         if (!(phymode & htole32(PGT_OID_PHY_5000MHZ)))
 1936                                 continue;
 1937                         ic->ic_channels[chan].ic_flags |= IEEE80211_CHAN_A;
 1938                 }
 1939 
 1940                 ic->ic_channels[chan].ic_freq =
 1941                     letoh16(freqs->pof_freqlist_mhz[i]);
 1942 
 1943                 if (firstchan == -1)
 1944                         firstchan = chan;
 1945 
 1946                 DPRINTF(("%s: set channel %d to freq %uMHz\n",
 1947                     sc->sc_dev.dv_xname, chan,
 1948                     letoh16(freqs->pof_freqlist_mhz[i])));
 1949         }
 1950         free(freqs, M_DEVBUF);
 1951         if (firstchan == -1) {
 1952                 printf("%s: no channels found\n", sc->sc_dev.dv_xname);
 1953                 return (EIO);
 1954         }
 1955 
 1956         /*
 1957          * Set rates
 1958          */
 1959         bzero(rates, sizeof(rates));
 1960         error = pgt_oid_get(sc, PGT_OID_SUPPORTED_RATES, rates, sizeof(rates));
 1961         if (error)
 1962                 return (error);
 1963         for (i = 0; i < sizeof(rates) && rates[i] != 0; i++) {
 1964                 switch (rates[i]) {
 1965                 case 2:
 1966                 case 4:
 1967                 case 11:
 1968                 case 22:
 1969                 case 44: /* maybe */
 1970                         if (phymode & htole32(PGT_OID_PHY_2400MHZ)) {
 1971                                 rs = &ic->ic_sup_rates[IEEE80211_MODE_11B];
 1972                                 rs->rs_rates[rs->rs_nrates++] = rates[i];
 1973                         }
 1974                 default:
 1975                         if (phymode & htole32(PGT_OID_PHY_2400MHZ)) {
 1976                                 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
 1977                                 rs->rs_rates[rs->rs_nrates++] = rates[i];
 1978                         }
 1979                         if (phymode & htole32(PGT_OID_PHY_5000MHZ)) {
 1980                                 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
 1981                                 rs->rs_rates[rs->rs_nrates++] = rates[i];
 1982                         }
 1983                         rs = &ic->ic_sup_rates[IEEE80211_MODE_AUTO];
 1984                         rs->rs_rates[rs->rs_nrates++] = rates[i];
 1985                 }
 1986         }
 1987 
 1988         ic->ic_caps = IEEE80211_C_WEP | IEEE80211_C_IBSS | IEEE80211_C_PMGT |
 1989             IEEE80211_C_HOSTAP | IEEE80211_C_TXPMGT | IEEE80211_C_SHSLOT |
 1990             IEEE80211_C_SHPREAMBLE | IEEE80211_C_MONITOR;
 1991 
 1992         ic->ic_opmode = IEEE80211_M_STA;
 1993         ic->ic_state = IEEE80211_S_INIT;
 1994 
 1995         if_attach(ifp);
 1996         ieee80211_ifattach(ifp);
 1997 
 1998         /* setup post-attach/pre-lateattach vector functions */
 1999         sc->sc_newstate = ic->ic_newstate;
 2000         ic->ic_newstate = pgt_newstate;
 2001         ic->ic_node_alloc = pgt_ieee80211_node_alloc;
 2002         ic->ic_newassoc = pgt_ieee80211_newassoc;
 2003         ic->ic_node_free = pgt_ieee80211_node_free;
 2004         ic->ic_node_copy = pgt_ieee80211_node_copy;
 2005         ic->ic_send_mgmt = pgt_ieee80211_send_mgmt;
 2006         ic->ic_max_rssi = 255;  /* rssi is a u_int8_t */
 2007 
 2008         /* let net80211 handle switching around the media + resetting */
 2009         ieee80211_media_init(ifp, pgt_media_change, pgt_media_status);
 2010 
 2011 #if NBPFILTER > 0
 2012         bpfattach(&sc->sc_drvbpf, ifp, DLT_IEEE802_11_RADIO,
 2013             sizeof(struct ieee80211_frame) + 64);
 2014 
 2015         sc->sc_rxtap_len = sizeof(sc->sc_rxtapu);
 2016         sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
 2017         sc->sc_rxtap.wr_ihdr.it_present = htole32(PGT_RX_RADIOTAP_PRESENT);
 2018 
 2019         sc->sc_txtap_len = sizeof(sc->sc_txtapu);
 2020         sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
 2021         sc->sc_txtap.wt_ihdr.it_present = htole32(PGT_TX_RADIOTAP_PRESENT);
 2022 #endif
 2023 
 2024         /*
 2025          * Enable shutdown and power hooks
 2026          */
 2027         sc->sc_shutdown_hook = shutdownhook_establish(pgt_shutdown, sc);
 2028         if (sc->sc_shutdown_hook == NULL)
 2029                 printf("%s: WARNING: unable to establish shutdown hook\n",
 2030                     sc->sc_dev.dv_xname);
 2031         sc->sc_power_hook = powerhook_establish(pgt_power, sc);
 2032         if (sc->sc_power_hook == NULL)
 2033                 printf("%s: WARNING: unable to establish power hook\n",
 2034                     sc->sc_dev.dv_xname);
 2035 
 2036         return (0);
 2037 }
 2038 
 2039 int
 2040 pgt_media_change(struct ifnet *ifp)
 2041 {
 2042         struct pgt_softc *sc = ifp->if_softc;
 2043         int error;
 2044 
 2045         error = ieee80211_media_change(ifp);
 2046         if (error == ENETRESET) {
 2047                 pgt_update_hw_from_sw(sc, 0, 0);
 2048                 error = 0;
 2049         }
 2050 
 2051         return (error);
 2052 }
 2053 
 2054 void
 2055 pgt_media_status(struct ifnet *ifp, struct ifmediareq *imr)
 2056 {
 2057         struct pgt_softc *sc = ifp->if_softc;
 2058         struct ieee80211com *ic = &sc->sc_ic;
 2059         uint32_t rate;
 2060         int s;
 2061 
 2062         imr->ifm_status = 0;
 2063         imr->ifm_active = IFM_IEEE80211 | IFM_NONE;
 2064 
 2065         if (!(ifp->if_flags & IFF_UP))
 2066                 return;
 2067 
 2068         s = splnet();
 2069 
 2070         if (ic->ic_fixed_rate != -1) {
 2071                 rate = ic->ic_sup_rates[ic->ic_curmode].
 2072                     rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
 2073         } else {
 2074                 if (pgt_oid_get(sc, PGT_OID_LINK_STATE, &rate, sizeof(rate)))
 2075                         return;
 2076                 rate = letoh32(rate);
 2077                 if (sc->sc_debug & SC_DEBUG_LINK) {
 2078                         DPRINTF(("%s: %s: link rate %u\n",
 2079                             sc->sc_dev.dv_xname, __func__, rate));
 2080                 }
 2081                 if (rate == 0)
 2082                         return;
 2083         }
 2084 
 2085         imr->ifm_status = IFM_AVALID;
 2086         imr->ifm_active = IFM_IEEE80211;
 2087         if (ic->ic_state == IEEE80211_S_RUN)
 2088                 imr->ifm_status |= IFM_ACTIVE;
 2089 
 2090         imr->ifm_active |= ieee80211_rate2media(ic, rate, ic->ic_curmode);
 2091 
 2092         switch (ic->ic_opmode) {
 2093         case IEEE80211_M_STA:
 2094                 break;
 2095         case IEEE80211_M_IBSS:
 2096                 imr->ifm_active |= IFM_IEEE80211_ADHOC;
 2097                 break;
 2098         case IEEE80211_M_AHDEMO:
 2099                 imr->ifm_active |= IFM_IEEE80211_ADHOC | IFM_FLAG0;
 2100                 break;
 2101         case IEEE80211_M_HOSTAP:
 2102                 imr->ifm_active |= IFM_IEEE80211_HOSTAP;
 2103                 break;
 2104         case IEEE80211_M_MONITOR:
 2105                 imr->ifm_active |= IFM_IEEE80211_MONITOR;
 2106                 break;
 2107         default:
 2108                 break;
 2109         }
 2110 
 2111         splx(s);
 2112 }
 2113 
 2114 /*
 2115  * Start data frames.  Critical sections surround the boundary of
 2116  * management frame transmission / transmission acknowledgement / response
 2117  * and data frame transmission / transmission acknowledgement.
 2118  */
 2119 void
 2120 pgt_start(struct ifnet *ifp)
 2121 {
 2122         struct pgt_softc *sc;
 2123         struct ieee80211com *ic;
 2124         struct pgt_desc *pd;
 2125         struct mbuf *m;
 2126         int error;
 2127 
 2128         sc = ifp->if_softc;
 2129         ic = &sc->sc_ic;
 2130 
 2131         if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET) ||
 2132             !(ifp->if_flags & IFF_RUNNING) ||
 2133             ic->ic_state != IEEE80211_S_RUN) {
 2134                 return;
 2135         }
 2136 
 2137         /*
 2138          * Management packets should probably be MLME frames
 2139          * (i.e. hostap "managed" mode); we don't touch the
 2140          * net80211 management queue.
 2141          */
 2142         for (; sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] <
 2143             PGT_QUEUE_FULL_THRESHOLD && !IFQ_IS_EMPTY(&ifp->if_snd);) {
 2144                 pd = TAILQ_FIRST(&sc->sc_freeq[PGT_QUEUE_DATA_LOW_TX]);
 2145                 IFQ_POLL(&ifp->if_snd, m);
 2146                 if (m == NULL)
 2147                         break;
 2148                 if (m->m_pkthdr.len <= PGT_FRAG_SIZE) {
 2149                         error = pgt_load_tx_desc_frag(sc,
 2150                             PGT_QUEUE_DATA_LOW_TX, pd);
 2151                         if (error)
 2152                                 break;
 2153                         IFQ_DEQUEUE(&ifp->if_snd, m);
 2154                         m_copydata(m, 0, m->m_pkthdr.len, pd->pd_mem);
 2155                         pgt_desc_transmit(sc, PGT_QUEUE_DATA_LOW_TX,
 2156                             pd, m->m_pkthdr.len, 0);
 2157                 } else if (m->m_pkthdr.len <= PGT_FRAG_SIZE * 2) {
 2158                         struct pgt_desc *pd2;
 2159 
 2160                         /*
 2161                          * Transmit a fragmented frame if there is
 2162                          * not enough room in one fragment; limit
 2163                          * to two fragments (802.11 itself couldn't
 2164                          * even support a full two.)
 2165                          */
 2166                         if (sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] + 2 >
 2167                             PGT_QUEUE_FULL_THRESHOLD)
 2168                                 break;
 2169                         pd2 = TAILQ_NEXT(pd, pd_link);
 2170                         error = pgt_load_tx_desc_frag(sc,
 2171                             PGT_QUEUE_DATA_LOW_TX, pd);
 2172                         if (error == 0) {
 2173                                 error = pgt_load_tx_desc_frag(sc,
 2174                                     PGT_QUEUE_DATA_LOW_TX, pd2);
 2175                                 if (error) {
 2176                                         pgt_unload_tx_desc_frag(sc, pd);
 2177                                         TAILQ_INSERT_HEAD(&sc->sc_freeq[
 2178                                             PGT_QUEUE_DATA_LOW_TX], pd,
 2179                                             pd_link);
 2180                                 }
 2181                         }
 2182                         if (error)
 2183                                 break;
 2184                         IFQ_DEQUEUE(&ifp->if_snd, m);
 2185                         m_copydata(m, 0, PGT_FRAG_SIZE, pd->pd_mem);
 2186                         pgt_desc_transmit(sc, PGT_QUEUE_DATA_LOW_TX,
 2187                             pd, PGT_FRAG_SIZE, 1);
 2188                         m_copydata(m, PGT_FRAG_SIZE,
 2189                             m->m_pkthdr.len - PGT_FRAG_SIZE, pd2->pd_mem);
 2190                         pgt_desc_transmit(sc, PGT_QUEUE_DATA_LOW_TX,
 2191                             pd2, m->m_pkthdr.len - PGT_FRAG_SIZE, 0);
 2192                 } else {
 2193                         IFQ_DEQUEUE(&ifp->if_snd, m);
 2194                         ifp->if_oerrors++;
 2195                         m_freem(m);
 2196                         m = NULL;
 2197                 }
 2198                 if (m != NULL) {
 2199                         struct ieee80211_node *ni;
 2200 #if NBPFILTER > 0
 2201                         if (ifp->if_bpf != NULL)
 2202                                 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
 2203 #endif
 2204                         ifp->if_opackets++;
 2205                         ifp->if_timer = 1;
 2206                         sc->sc_txtimer = 5;
 2207                         ni = ieee80211_find_txnode(&sc->sc_ic,
 2208                             mtod(m, struct ether_header *)->ether_dhost);
 2209                         if (ni != NULL) {
 2210                                 ni->ni_inact = 0;
 2211                                 if (ni != ic->ic_bss)
 2212                                         ieee80211_release_node(&sc->sc_ic, ni);
 2213                         }
 2214 #if NBPFILTER > 0
 2215                         if (sc->sc_drvbpf != NULL) {
 2216                                 struct mbuf mb;
 2217                                 struct ether_header eh;
 2218                                 struct pgt_tx_radiotap_hdr *tap = &sc->sc_txtap;
 2219 
 2220                                 bcopy(mtod(m, struct ether_header *), &eh,
 2221                                     sizeof(eh));
 2222                                 m_adj(m, sizeof(eh));
 2223                                 m = pgt_ieee80211_encap(sc, &eh, m, NULL);
 2224 
 2225                                 tap->wt_flags = 0;
 2226                                 //tap->wt_rate = rate;
 2227                                 tap->wt_rate = 0;
 2228                                 tap->wt_chan_freq =
 2229                                     htole16(ic->ic_bss->ni_chan->ic_freq);
 2230                                 tap->wt_chan_flags =
 2231                                     htole16(ic->ic_bss->ni_chan->ic_flags);
 2232 
 2233                                 if (m != NULL) {
 2234                                         mb.m_data = (caddr_t)tap;
 2235                                         mb.m_len = sc->sc_txtap_len;
 2236                                         mb.m_next = m;
 2237                                         mb.m_nextpkt = NULL;
 2238                                         mb.m_type = 0;
 2239                                         mb.m_flags = 0;
 2240 
 2241                                         bpf_mtap(sc->sc_drvbpf, &mb,
 2242                                             BPF_DIRECTION_OUT);
 2243                                 }
 2244                         }
 2245 #endif
 2246                         if (m != NULL)
 2247                                 m_freem(m);
 2248                 }
 2249         }
 2250 }
 2251 
 2252 int
 2253 pgt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t req)
 2254 {
 2255         struct pgt_softc *sc = ifp->if_softc;
 2256         struct ifaddr *ifa;
 2257         struct ifreq *ifr;
 2258         struct wi_req *wreq;
 2259         struct ieee80211_nodereq_all *na;
 2260         struct ieee80211com *ic;
 2261         struct pgt_obj_bsslist *pob;
 2262         struct wi_scan_p2_hdr *p2hdr;
 2263         struct wi_scan_res *res;
 2264         uint32_t noise;
 2265         int maxscan, i, j, s, error = 0;
 2266 
 2267         ic = &sc->sc_ic;
 2268         ifr = (struct ifreq *)req;
 2269 
 2270         s = splnet();
 2271         switch (cmd) {
 2272         case SIOCS80211SCAN:
 2273                 /*
 2274                  * This chip scans always as soon as it gets initialized.
 2275                  */
 2276 
 2277                 /*
 2278                  * Give us a bit time to scan in case we were not
 2279                  * initialized before and let the userland process wait.
 2280                  */
 2281                 tsleep(&sc->sc_flags, 0, "pgtsca", hz * SCAN_TIMEOUT);
 2282 
 2283                 break;
 2284         case SIOCG80211ALLNODES: {
 2285                 struct ieee80211_nodereq *nr = NULL;
 2286                 na = (struct ieee80211_nodereq_all *)req;
 2287                 wreq = malloc(sizeof(*wreq), M_DEVBUF, M_WAITOK);
 2288                 bzero(wreq, sizeof(*wreq));
 2289 
 2290                 maxscan = PGT_OBJ_BSSLIST_NBSS;
 2291                 pob = malloc(sizeof(*pob) +
 2292                     sizeof(struct pgt_obj_bss) * maxscan, M_DEVBUF, M_WAITOK);
 2293                 error = pgt_oid_get(sc, PGT_OID_NOISE_FLOOR, &noise,
 2294                     sizeof(noise));
 2295 
 2296                 if (error == 0) {
 2297                         noise = letoh32(noise);
 2298                         error = pgt_oid_get(sc, PGT_OID_BSS_LIST, pob,
 2299                             sizeof(*pob) +
 2300                             sizeof(struct pgt_obj_bss) * maxscan);
 2301                 }
 2302 
 2303                 if (error == 0) {
 2304                         maxscan = min(PGT_OBJ_BSSLIST_NBSS,
 2305                             letoh32(pob->pob_count));
 2306                         maxscan = min(maxscan,
 2307                             (sizeof(wreq->wi_val) - sizeof(*p2hdr)) /
 2308                             WI_PRISM2_RES_SIZE);
 2309                         p2hdr = (struct wi_scan_p2_hdr *)&wreq->wi_val;
 2310                         p2hdr->wi_rsvd = 0;
 2311                         p2hdr->wi_reason = 1;
 2312                         wreq->wi_len = (maxscan * WI_PRISM2_RES_SIZE) / 2 +
 2313                             sizeof(*p2hdr) / 2;
 2314                         wreq->wi_type = WI_RID_SCAN_RES;
 2315                 }
 2316 
 2317                 for (na->na_nodes = j = i = 0; i < maxscan &&
 2318                     (na->na_size >= j + sizeof(struct ieee80211_nodereq));
 2319                     i++) {
 2320                         /* allocate node space */
 2321                         if (nr == NULL)
 2322                                 nr = malloc(sizeof(*nr), M_DEVBUF, M_WAITOK);
 2323 
 2324                         /* get next BSS scan result */
 2325                         res = (struct wi_scan_res *)
 2326                             ((char *)&wreq->wi_val + sizeof(*p2hdr) +
 2327                             i * WI_PRISM2_RES_SIZE);
 2328                         pgt_obj_bss2scanres(sc, &pob->pob_bsslist[i],
 2329                             res, noise);
 2330 
 2331                         /* copy it to node structure for ifconfig to read */
 2332                         bzero(nr, sizeof(*nr));
 2333                         IEEE80211_ADDR_COPY(nr->nr_macaddr, res->wi_bssid);
 2334                         IEEE80211_ADDR_COPY(nr->nr_bssid, res->wi_bssid);
 2335                         nr->nr_channel = letoh16(res->wi_chan);
 2336                         nr->nr_chan_flags = IEEE80211_CHAN_B;
 2337                         nr->nr_rssi = letoh16(res->wi_signal);
 2338                         nr->nr_max_rssi = 0; /* XXX */
 2339                         nr->nr_nwid_len = letoh16(res->wi_ssid_len);
 2340                         bcopy(res->wi_ssid, nr->nr_nwid, nr->nr_nwid_len);
 2341                         nr->nr_intval = letoh16(res->wi_interval);
 2342                         nr->nr_capinfo = letoh16(res->wi_capinfo);
 2343                         nr->nr_txrate = res->wi_rate == WI_WAVELAN_RES_1M ? 2 :
 2344                             (res->wi_rate == WI_WAVELAN_RES_2M ? 4 :
 2345                             (res->wi_rate == WI_WAVELAN_RES_5M ? 11 :
 2346                             (res->wi_rate == WI_WAVELAN_RES_11M ? 22 : 0)));
 2347                         nr->nr_nrates = 0;
 2348                         while (res->wi_srates[nr->nr_nrates] != 0) {
 2349                                 nr->nr_rates[nr->nr_nrates] =
 2350                                     res->wi_srates[nr->nr_nrates] &
 2351                                     WI_VAR_SRATES_MASK;
 2352                                 nr->nr_nrates++;
 2353                         }
 2354                         nr->nr_flags = 0;
 2355                         if (bcmp(nr->nr_macaddr, nr->nr_bssid,
 2356                             IEEE80211_ADDR_LEN) == 0)
 2357                                 nr->nr_flags |= IEEE80211_NODEREQ_AP;
 2358                         error = copyout(nr, (caddr_t)na->na_node + j,
 2359                             sizeof(struct ieee80211_nodereq));
 2360                         if (error)
 2361                                 break;
 2362 
 2363                         /* point to next node entry */
 2364                         j += sizeof(struct ieee80211_nodereq);
 2365                         na->na_nodes++;
 2366                 }
 2367                 if (nr)
 2368                         free(nr, M_DEVBUF);
 2369                 free(pob, M_DEVBUF);
 2370                 break;
 2371         }
 2372         case SIOCSIFADDR:
 2373                 ifa = (struct ifaddr *)req;
 2374                 ifp->if_flags |= IFF_UP;
 2375 #ifdef INET
 2376                 if (ifa->ifa_addr->sa_family == AF_INET)
 2377                          arp_ifinit(&sc->sc_ic.ic_ac, ifa);
 2378 #endif
 2379                 /* FALLTHROUGH */
 2380         case SIOCSIFFLAGS:
 2381                 if (ifp->if_flags & IFF_UP) {
 2382                         if ((ifp->if_flags & IFF_RUNNING) == 0) {
 2383                                 pgt_init(ifp);
 2384                                 error = ENETRESET;
 2385                         }
 2386                 } else {
 2387                         if (ifp->if_flags & IFF_RUNNING) {
 2388                                 pgt_stop(sc, SC_NEEDS_RESET);
 2389                                 error = ENETRESET;
 2390                         }
 2391                 }
 2392                 break;
 2393         case SIOCADDMULTI:
 2394         case SIOCDELMULTI:
 2395                 error = (cmd == SIOCADDMULTI) ?
 2396                     ether_addmulti(ifr, &ic->ic_ac) :
 2397                     ether_delmulti(ifr, &ic->ic_ac);
 2398 
 2399                 if (error == ENETRESET)
 2400                         error = 0;
 2401                 break;
 2402         case SIOCSIFMTU:
 2403                 if (ifr->ifr_mtu > PGT_FRAG_SIZE) {
 2404                         error = EINVAL;
 2405                         break;
 2406                 }
 2407                 /* FALLTHROUGH */
 2408         default:
 2409                 error = ieee80211_ioctl(ifp, cmd, req);
 2410                 break;
 2411         }
 2412 
 2413         if (error == ENETRESET) {
 2414                 pgt_update_hw_from_sw(sc, 0, 0);
 2415                 error = 0;
 2416         }
 2417         splx(s);
 2418 
 2419         return (error);
 2420 }
 2421 
 2422 void
 2423 pgt_obj_bss2scanres(struct pgt_softc *sc, struct pgt_obj_bss *pob,
 2424     struct wi_scan_res *scanres, uint32_t noise)
 2425 {
 2426         struct ieee80211_rateset *rs;
 2427         struct wi_scan_res ap;
 2428         unsigned int i, n;
 2429 
 2430         rs = &sc->sc_ic.ic_sup_rates[IEEE80211_MODE_AUTO];
 2431         bzero(&ap, sizeof(ap));
 2432         ap.wi_chan = ieee80211_mhz2ieee(letoh16(pob->pob_channel), 0);
 2433         ap.wi_noise = noise;
 2434         ap.wi_signal = letoh16(pob->pob_rssi);
 2435         IEEE80211_ADDR_COPY(ap.wi_bssid, pob->pob_address);
 2436         ap.wi_interval = letoh16(pob->pob_beacon_period);
 2437         ap.wi_capinfo = letoh16(pob->pob_capinfo);
 2438         ap.wi_ssid_len = min(sizeof(ap.wi_ssid), pob->pob_ssid.pos_length);
 2439         memcpy(ap.wi_ssid, pob->pob_ssid.pos_ssid, ap.wi_ssid_len);
 2440         n = 0;
 2441         for (i = 0; i < 16; i++) {
 2442                 if (letoh16(pob->pob_rates) & (1 << i)) {
 2443                         if (i > rs->rs_nrates)
 2444                                 break;
 2445                         ap.wi_srates[n++] = ap.wi_rate = rs->rs_rates[i];
 2446                         if (n >= sizeof(ap.wi_srates) / sizeof(ap.wi_srates[0]))
 2447                                 break;
 2448                 }
 2449         }
 2450         memcpy(scanres, &ap, WI_PRISM2_RES_SIZE);
 2451 }
 2452 
 2453 void
 2454 node_mark_active_ap(void *arg, struct ieee80211_node *ni)
 2455 {
 2456         /*
 2457          * HostAP mode lets all nodes stick around unless
 2458          * the firmware AP kicks them off.
 2459          */
 2460         ni->ni_inact = 0;
 2461 }
 2462 
 2463 void
 2464 node_mark_active_adhoc(void *arg, struct ieee80211_node *ni)
 2465 {
 2466         struct pgt_ieee80211_node *pin;
 2467 
 2468         /*
 2469          * As there is no association in ad-hoc, we let links just
 2470          * time out naturally as long they are not holding any private
 2471          * configuration, such as 802.1x authorization.
 2472          */
 2473         pin = (struct pgt_ieee80211_node *)ni;
 2474         if (pin->pin_dot1x_auth == PIN_DOT1X_AUTHORIZED)
 2475                 pin->pin_node.ni_inact = 0;
 2476 }
 2477 
 2478 void
 2479 pgt_watchdog(struct ifnet *ifp)
 2480 {
 2481         struct pgt_softc *sc;
 2482 
 2483         sc = ifp->if_softc;
 2484         /*
 2485          * Check for timed out transmissions (and make sure to set
 2486          * this watchdog to fire again if there is still data in the
 2487          * output device queue).
 2488          */
 2489         if (sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] != 0) {
 2490                 int count;
 2491 
 2492                 ifp->if_timer = 1;
 2493                 if (sc->sc_txtimer && --sc->sc_txtimer == 0) {
 2494                         count = pgt_drain_tx_queue(sc, PGT_QUEUE_DATA_LOW_TX);
 2495                         if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
 2496                                 DPRINTF(("%s: timeout %d data transmissions\n",
 2497                                     sc->sc_dev.dv_xname, count));
 2498                 }
 2499         }
 2500         if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET))
 2501                 return;
 2502         /*
 2503          * If we're goign to kick the device out of power-save mode
 2504          * just to update the BSSID and such, we should not do it
 2505          * very often; need to determine in what way to do that.
 2506          */
 2507         if (ifp->if_flags & IFF_RUNNING &&
 2508             sc->sc_ic.ic_state != IEEE80211_S_INIT &&
 2509             sc->sc_ic.ic_opmode != IEEE80211_M_MONITOR)
 2510                 pgt_async_update(sc);
 2511 
 2512         /*
 2513          * As a firmware-based HostAP, we should not time out
 2514          * nodes inside the driver additionally to the timeout
 2515          * that exists in the firmware.  The only things we
 2516          * should have to deal with timing out when doing HostAP
 2517          * are the privacy-related.
 2518          */
 2519         switch (sc->sc_ic.ic_opmode) {
 2520         case IEEE80211_M_HOSTAP:
 2521                 ieee80211_iterate_nodes(&sc->sc_ic,
 2522                     node_mark_active_ap, NULL);
 2523                 break;
 2524         case IEEE80211_M_IBSS:
 2525                 ieee80211_iterate_nodes(&sc->sc_ic,
 2526                     node_mark_active_adhoc, NULL);
 2527                 break;
 2528         default:
 2529                 break;
 2530         }
 2531         ieee80211_watchdog(ifp);
 2532         ifp->if_timer = 1;
 2533 }
 2534 
 2535 int
 2536 pgt_init(struct ifnet *ifp)
 2537 {
 2538         struct pgt_softc *sc = ifp->if_softc;
 2539         struct ieee80211com *ic = &sc->sc_ic;
 2540 
 2541         /* set default channel */
 2542         ic->ic_bss->ni_chan = ic->ic_ibss_chan;
 2543 
 2544         if (!(sc->sc_flags & (SC_DYING | SC_UNINITIALIZED)))
 2545                 pgt_update_hw_from_sw(sc,
 2546                     ic->ic_state != IEEE80211_S_INIT,
 2547                     ic->ic_opmode != IEEE80211_M_MONITOR);
 2548 
 2549         ifp->if_flags |= IFF_RUNNING;
 2550         ifp->if_flags &= ~IFF_OACTIVE;
 2551 
 2552         /* Begin background scanning */
 2553         ieee80211_new_state(&sc->sc_ic, IEEE80211_S_SCAN, -1);
 2554 
 2555         return (0);
 2556 }
 2557 
 2558 /*
 2559  * After most every configuration change, everything needs to be fully
 2560  * reinitialized.  For some operations (currently, WEP settings
 2561  * in ad-hoc+802.1x mode), the change is "soft" and doesn't remove
 2562  * "associations," and allows EAP authorization to occur again.
 2563  * If keepassoc is specified, the reset operation should try to go
 2564  * back to the BSS had before.
 2565  */
 2566 void
 2567 pgt_update_hw_from_sw(struct pgt_softc *sc, int keepassoc, int keepnodes)
 2568 {
 2569         struct ieee80211com *ic = &sc->sc_ic;
 2570         struct arpcom *ac = &ic->ic_ac;
 2571         struct ifnet *ifp = &ac->ac_if;
 2572         struct pgt_obj_key keyobj;
 2573         struct pgt_obj_ssid essid;
 2574         uint8_t availrates[IEEE80211_RATE_MAXSIZE + 1];
 2575         uint32_t mode, bsstype, config, profile, channel, slot, preamble;
 2576         uint32_t wep, exunencrypted, wepkey, dot1x, auth, mlme;
 2577         unsigned int i;
 2578         int success, shouldbeup, s;
 2579 
 2580         config = PGT_CONFIG_MANUAL_RUN | PGT_CONFIG_RX_ANNEX;
 2581 
 2582         /*
 2583          * Promiscuous mode is currently a no-op since packets transmitted,
 2584          * while in promiscuous mode, don't ever seem to go anywhere.
 2585          */
 2586         shouldbeup = ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_UP;
 2587 
 2588         if (shouldbeup) {
 2589                 switch (ic->ic_opmode) {
 2590                 case IEEE80211_M_STA:
 2591                         if (ifp->if_flags & IFF_PROMISC)
 2592                                 mode = PGT_MODE_CLIENT; /* what to do? */
 2593                         else
 2594                                 mode = PGT_MODE_CLIENT;
 2595                         bsstype = PGT_BSS_TYPE_STA;
 2596                         dot1x = PGT_DOT1X_AUTH_ENABLED;
 2597                         break;
 2598                 case IEEE80211_M_IBSS:
 2599                         if (ifp->if_flags & IFF_PROMISC)
 2600                                 mode = PGT_MODE_CLIENT; /* what to do? */
 2601                         else
 2602                                 mode = PGT_MODE_CLIENT;
 2603                         bsstype = PGT_BSS_TYPE_IBSS;
 2604                         dot1x = PGT_DOT1X_AUTH_ENABLED;
 2605                         break;
 2606                 case IEEE80211_M_HOSTAP:
 2607                         mode = PGT_MODE_AP;
 2608                         bsstype = PGT_BSS_TYPE_STA;
 2609                         /*
 2610                          * For IEEE 802.1x, we need to authenticate and
 2611                          * authorize hosts from here on or they remain
 2612                          * associated but without the ability to send or
 2613                          * receive normal traffic to us (courtesy the
 2614                          * firmware AP implementation).
 2615                          */
 2616                         dot1x = PGT_DOT1X_AUTH_ENABLED;
 2617                         /*
 2618                          * WDS mode needs several things to work:
 2619                          * discovery of exactly how creating the WDS
 2620                          * links is meant to function, an interface
 2621                          * for this, and ability to encode or decode
 2622                          * the WDS frames.
 2623                          */
 2624                         if (sc->sc_wds)
 2625                                 config |= PGT_CONFIG_WDS;
 2626                         break;
 2627                 case IEEE80211_M_MONITOR:
 2628                         mode = PGT_MODE_PROMISCUOUS;
 2629                         bsstype = PGT_BSS_TYPE_ANY;
 2630                         dot1x = PGT_DOT1X_AUTH_NONE;
 2631                         break;
 2632                 default:
 2633                         goto badopmode;
 2634                 }
 2635         } else {
 2636 badopmode:
 2637                 mode = PGT_MODE_CLIENT;
 2638                 bsstype = PGT_BSS_TYPE_NONE;
 2639         }
 2640 
 2641         DPRINTF(("%s: current mode is ", sc->sc_dev.dv_xname));
 2642         switch (ic->ic_curmode) {
 2643         case IEEE80211_MODE_11A:
 2644                 profile = PGT_PROFILE_A_ONLY;
 2645                 preamble = PGT_OID_PREAMBLE_MODE_DYNAMIC;
 2646                 DPRINTF(("IEEE80211_MODE_11A\n"));
 2647                 break;
 2648         case IEEE80211_MODE_11B:
 2649                 profile = PGT_PROFILE_B_ONLY;
 2650                 preamble = PGT_OID_PREAMBLE_MODE_LONG;
 2651                 DPRINTF(("IEEE80211_MODE_11B\n"));
 2652                 break;
 2653         case IEEE80211_MODE_11G:
 2654                 profile = PGT_PROFILE_G_ONLY;
 2655                 preamble = PGT_OID_PREAMBLE_MODE_SHORT;
 2656                 DPRINTF(("IEEE80211_MODE_11G\n"));
 2657                 break;
 2658         case IEEE80211_MODE_FH:
 2659                 /* FALLTHROUGH */
 2660         case IEEE80211_MODE_TURBO: /* not handled */
 2661                 /* FALLTHROUGH */
 2662         case IEEE80211_MODE_AUTO:
 2663                 profile = PGT_PROFILE_MIXED_G_WIFI;
 2664                 preamble = PGT_OID_PREAMBLE_MODE_DYNAMIC;
 2665                 DPRINTF(("IEEE80211_MODE_AUTO\n"));
 2666                 break;
 2667         default:
 2668                 panic("unknown mode %d\n", ic->ic_curmode);
 2669         }
 2670 
 2671         switch (sc->sc_80211_ioc_auth) {
 2672         case IEEE80211_AUTH_NONE:
 2673                 auth = PGT_AUTH_MODE_NONE;
 2674                 break;
 2675         case IEEE80211_AUTH_OPEN:
 2676                 auth = PGT_AUTH_MODE_OPEN;
 2677                 break;
 2678         default:
 2679                 auth = PGT_AUTH_MODE_SHARED;
 2680                 break;
 2681         }
 2682 
 2683         if (sc->sc_ic.ic_flags & IEEE80211_F_WEPON) {
 2684                 wep = 1;
 2685                 exunencrypted = 1;
 2686         } else {
 2687                 wep = 0;
 2688                 exunencrypted = 0;
 2689         }
 2690 
 2691         mlme = htole32(PGT_MLME_AUTO_LEVEL_AUTO);
 2692         wep = htole32(wep);
 2693         exunencrypted = htole32(exunencrypted);
 2694         profile = htole32(profile);
 2695         preamble = htole32(preamble);
 2696         bsstype = htole32(bsstype);
 2697         config = htole32(config);
 2698         mode = htole32(mode);
 2699 
 2700         if (!wep || !sc->sc_dot1x)
 2701                 dot1x = PGT_DOT1X_AUTH_NONE;
 2702         dot1x = htole32(dot1x);
 2703         auth = htole32(auth);
 2704 
 2705         if (ic->ic_flags & IEEE80211_F_SHSLOT)
 2706                 slot = htole32(PGT_OID_SLOT_MODE_SHORT);
 2707         else
 2708                 slot = htole32(PGT_OID_SLOT_MODE_DYNAMIC);
 2709 
 2710         if (ic->ic_des_chan == IEEE80211_CHAN_ANYC) {
 2711                 if (keepassoc)
 2712                         channel = 0;
 2713                 else
 2714                         channel = ieee80211_chan2ieee(ic, ic->ic_bss->ni_chan);
 2715         } else
 2716                 channel = ieee80211_chan2ieee(ic, ic->ic_des_chan);
 2717 
 2718         DPRINTF(("%s: set rates", sc->sc_dev.dv_xname));
 2719         for (i = 0; i < ic->ic_sup_rates[ic->ic_curmode].rs_nrates; i++) {
 2720                 availrates[i] = ic->ic_sup_rates[ic->ic_curmode].rs_rates[i];
 2721                 DPRINTF((" %d", availrates[i]));
 2722         }
 2723         DPRINTF(("\n"));
 2724         availrates[i++] = 0;
 2725 
 2726         essid.pos_length = min(ic->ic_des_esslen, sizeof(essid.pos_ssid));
 2727         memcpy(&essid.pos_ssid, ic->ic_des_essid, essid.pos_length);
 2728 
 2729         s = splnet();
 2730         for (success = 0; success == 0; success = 1) {
 2731                 SETOID(PGT_OID_PROFILE, &profile, sizeof(profile));
 2732                 SETOID(PGT_OID_CONFIG, &config, sizeof(config));
 2733                 SETOID(PGT_OID_MLME_AUTO_LEVEL, &mlme, sizeof(mlme));
 2734 
 2735                 if (!IEEE80211_ADDR_EQ(ic->ic_myaddr, ac->ac_enaddr)) {
 2736                         SETOID(PGT_OID_MAC_ADDRESS, ac->ac_enaddr,
 2737                             sizeof(ac->ac_enaddr));
 2738                         IEEE80211_ADDR_COPY(ic->ic_myaddr, ac->ac_enaddr);
 2739                 }
 2740 
 2741                 SETOID(PGT_OID_MODE, &mode, sizeof(mode));
 2742                 SETOID(PGT_OID_BSS_TYPE, &bsstype, sizeof(bsstype));
 2743 
 2744                 if (channel != 0 && channel != IEEE80211_CHAN_ANY)
 2745                         SETOID(PGT_OID_CHANNEL, &channel, sizeof(channel));
 2746 
 2747                 if (ic->ic_flags & IEEE80211_F_DESBSSID) {
 2748                         SETOID(PGT_OID_BSSID, ic->ic_des_bssid,
 2749                             sizeof(ic->ic_des_bssid));
 2750                 } else if (keepassoc) {
 2751                         SETOID(PGT_OID_BSSID, ic->ic_bss->ni_bssid,
 2752                             sizeof(ic->ic_bss->ni_bssid));
 2753                 }
 2754 
 2755                 SETOID(PGT_OID_SSID, &essid, sizeof(essid));
 2756 
 2757                 if (ic->ic_des_esslen > 0)
 2758                         SETOID(PGT_OID_SSID_OVERRIDE, &essid, sizeof(essid));
 2759 
 2760                 SETOID(PGT_OID_RATES, &availrates, i);
 2761                 SETOID(PGT_OID_EXTENDED_RATES, &availrates, i);
 2762                 SETOID(PGT_OID_PREAMBLE_MODE, &preamble, sizeof(preamble));
 2763                 SETOID(PGT_OID_SLOT_MODE, &slot, sizeof(slot));
 2764                 SETOID(PGT_OID_AUTH_MODE, &auth, sizeof(auth));
 2765                 SETOID(PGT_OID_EXCLUDE_UNENCRYPTED, &exunencrypted,
 2766                     sizeof(exunencrypted));
 2767                 SETOID(PGT_OID_DOT1X, &dot1x, sizeof(dot1x));
 2768                 SETOID(PGT_OID_PRIVACY_INVOKED, &wep, sizeof(wep));
 2769                 /*
 2770                  * Setting WEP key(s)
 2771                  */
 2772                 if (letoh32(wep) != 0) {
 2773                         keyobj.pok_type = PGT_OBJ_KEY_TYPE_WEP;
 2774                         /* key 1 */
 2775                         keyobj.pok_length = min(sizeof(keyobj.pok_key),
 2776                             IEEE80211_KEYBUF_SIZE);
 2777                         keyobj.pok_length = min(keyobj.pok_length,
 2778                             ic->ic_nw_keys[0].k_len);
 2779                         bcopy(ic->ic_nw_keys[0].k_key, keyobj.pok_key,
 2780                             keyobj.pok_length);
 2781                         SETOID(PGT_OID_DEFAULT_KEY0, &keyobj, sizeof(keyobj));
 2782                         /* key 2 */
 2783                         keyobj.pok_length = min(sizeof(keyobj.pok_key),
 2784                             IEEE80211_KEYBUF_SIZE);
 2785                         keyobj.pok_length = min(keyobj.pok_length,
 2786                             ic->ic_nw_keys[1].k_len);
 2787                         bcopy(ic->ic_nw_keys[1].k_key, keyobj.pok_key,
 2788                             keyobj.pok_length);
 2789                         SETOID(PGT_OID_DEFAULT_KEY1, &keyobj, sizeof(keyobj));
 2790                         /* key 3 */
 2791                         keyobj.pok_length = min(sizeof(keyobj.pok_key),
 2792                             IEEE80211_KEYBUF_SIZE);
 2793                         keyobj.pok_length = min(keyobj.pok_length,
 2794                             ic->ic_nw_keys[2].k_len);
 2795                         bcopy(ic->ic_nw_keys[2].k_key, keyobj.pok_key,
 2796                             keyobj.pok_length);
 2797                         SETOID(PGT_OID_DEFAULT_KEY2, &keyobj, sizeof(keyobj));
 2798                         /* key 4 */
 2799                         keyobj.pok_length = min(sizeof(keyobj.pok_key),
 2800                             IEEE80211_KEYBUF_SIZE);
 2801                         keyobj.pok_length = min(keyobj.pok_length,
 2802                             ic->ic_nw_keys[3].k_len);
 2803                         bcopy(ic->ic_nw_keys[3].k_key, keyobj.pok_key,
 2804                             keyobj.pok_length);
 2805                         SETOID(PGT_OID_DEFAULT_KEY3, &keyobj, sizeof(keyobj));
 2806 
 2807                         wepkey = htole32(ic->ic_wep_txkey);
 2808                         SETOID(PGT_OID_DEFAULT_KEYNUM, &wepkey, sizeof(wepkey));
 2809                 }
 2810                 /* set mode again to commit */
 2811                 SETOID(PGT_OID_MODE, &mode, sizeof(mode));
 2812         }
 2813         splx(s);
 2814 
 2815         if (success) {
 2816                 if (shouldbeup && keepnodes)
 2817                         sc->sc_flags |= SC_NOFREE_ALLNODES;
 2818                 if (shouldbeup)
 2819                         ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
 2820                 else
 2821                         ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
 2822         } else {
 2823                 printf("%s: problem setting modes\n", sc->sc_dev.dv_xname);
 2824                 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
 2825         }
 2826 }
 2827 
 2828 void
 2829 pgt_hostap_handle_mlme(struct pgt_softc *sc, uint32_t oid,
 2830     struct pgt_obj_mlme *mlme)
 2831 {
 2832         struct ieee80211com *ic = &sc->sc_ic;
 2833         struct pgt_ieee80211_node *pin;
 2834         struct ieee80211_node *ni;
 2835 
 2836         ni = ieee80211_find_node(ic, mlme->pom_address);
 2837         pin = (struct pgt_ieee80211_node *)ni;
 2838         switch (oid) {
 2839         case PGT_OID_DISASSOCIATE:
 2840                 if (ni != NULL)
 2841                         ieee80211_release_node(&sc->sc_ic, ni);
 2842                 break;
 2843         case PGT_OID_ASSOCIATE:
 2844                 if (ni == NULL) {
 2845                         ni = ieee80211_dup_bss(ic, mlme->pom_address);
 2846                         if (ni == NULL)
 2847                                 break;
 2848                         ic->ic_newassoc(ic, ni, 1);
 2849                         pin = (struct pgt_ieee80211_node *)ni;
 2850                 }
 2851                 ni->ni_associd = letoh16(mlme->pom_id);
 2852                 pin->pin_mlme_state = letoh16(mlme->pom_state);
 2853                 break;
 2854         default:
 2855                 if (pin != NULL)
 2856                         pin->pin_mlme_state = letoh16(mlme->pom_state);
 2857                 break;
 2858         }
 2859 }
 2860 
 2861 /*
 2862  * Either in response to an event or after a certain amount of time,
 2863  * synchronize our idea of the network we're part of from the hardware.
 2864  */
 2865 void
 2866 pgt_update_sw_from_hw(struct pgt_softc *sc, struct pgt_async_trap *pa,
 2867             struct mbuf *args)
 2868 {
 2869         struct ieee80211com *ic = &sc->sc_ic;
 2870         struct pgt_obj_ssid ssid;
 2871         struct pgt_obj_bss bss;
 2872         uint32_t channel, noise, ls;
 2873         int error, s;
 2874 
 2875         if (pa != NULL) {
 2876                 struct pgt_obj_mlme *mlme;
 2877                 uint32_t oid;
 2878 
 2879                 oid = *mtod(args, uint32_t *);
 2880                 m_adj(args, sizeof(uint32_t));
 2881                 if (sc->sc_debug & SC_DEBUG_TRAP)
 2882                         DPRINTF(("%s: trap: oid %#x len %u\n",
 2883                             sc->sc_dev.dv_xname, oid, args->m_len));
 2884                 switch (oid) {
 2885                 case PGT_OID_LINK_STATE:
 2886                         if (args->m_len < sizeof(uint32_t))
 2887                                 break;
 2888                         ls = letoh32(*mtod(args, uint32_t *));
 2889                         if (sc->sc_debug & (SC_DEBUG_TRAP | SC_DEBUG_LINK))
 2890                                 DPRINTF(("%s: %s: link rate %u\n",
 2891                                     sc->sc_dev.dv_xname, __func__, ls));
 2892                         if (ls)
 2893                                 ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
 2894                         else
 2895                                 ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
 2896                         goto gotlinkstate;
 2897                 case PGT_OID_DEAUTHENTICATE:
 2898                 case PGT_OID_AUTHENTICATE:
 2899                 case PGT_OID_DISASSOCIATE:
 2900                 case PGT_OID_ASSOCIATE:
 2901                         if (args->m_len < sizeof(struct pgt_obj_mlme))
 2902                                 break;
 2903                         mlme = mtod(args, struct pgt_obj_mlme *);
 2904                         if (sc->sc_debug & SC_DEBUG_TRAP)
 2905                                 DPRINTF(("%s: mlme: address "
 2906                                     "%s id 0x%02x state 0x%02x code 0x%02x\n",
 2907                                     sc->sc_dev.dv_xname,
 2908                                     ether_sprintf(mlme->pom_address),
 2909                                     letoh16(mlme->pom_id),
 2910                                     letoh16(mlme->pom_state),
 2911                                     letoh16(mlme->pom_code)));
 2912                         if (ic->ic_opmode == IEEE80211_M_HOSTAP)
 2913                                 pgt_hostap_handle_mlme(sc, oid, mlme);
 2914                         break;
 2915                 }
 2916                 return;
 2917         }
 2918         if (ic->ic_state == IEEE80211_S_SCAN) {
 2919                 s = splnet();
 2920                 error = pgt_oid_get(sc, PGT_OID_LINK_STATE, &ls, sizeof(ls));
 2921                 splx(s);
 2922                 if (error)
 2923                         return;
 2924                 DPRINTF(("%s: up_sw_from_hw: link %u\n", sc->sc_dev.dv_xname,
 2925                     htole32(ls)));
 2926                 if (ls != 0)
 2927                         ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
 2928         }
 2929 
 2930 gotlinkstate:
 2931         s = splnet();
 2932         if (pgt_oid_get(sc, PGT_OID_NOISE_FLOOR, &noise, sizeof(noise)) != 0)
 2933                 goto out;
 2934         sc->sc_noise = letoh32(noise);
 2935         if (ic->ic_state == IEEE80211_S_RUN) {
 2936                 if (pgt_oid_get(sc, PGT_OID_CHANNEL, &channel,
 2937                     sizeof(channel)) != 0)
 2938                         goto out;
 2939                 channel = min(letoh32(channel), IEEE80211_CHAN_MAX);
 2940                 ic->ic_bss->ni_chan = &ic->ic_channels[channel];
 2941                 if (pgt_oid_get(sc, PGT_OID_BSSID, ic->ic_bss->ni_bssid,
 2942                     sizeof(ic->ic_bss->ni_bssid)) != 0)
 2943                         goto out;
 2944                 IEEE80211_ADDR_COPY(&bss.pob_address, ic->ic_bss->ni_bssid);
 2945                 error = pgt_oid_retrieve(sc, PGT_OID_BSS_FIND, &bss,
 2946                     sizeof(bss));
 2947                 if (error == 0)
 2948                         ic->ic_bss->ni_rssi = bss.pob_rssi;
 2949                 else if (error != EPERM)
 2950                         goto out;
 2951                 error = pgt_oid_get(sc, PGT_OID_SSID, &ssid, sizeof(ssid));
 2952                 if (error)
 2953                         goto out;
 2954                 ic->ic_bss->ni_esslen = min(ssid.pos_length,
 2955                     sizeof(ic->ic_bss->ni_essid));
 2956                 memcpy(ic->ic_bss->ni_essid, ssid.pos_ssid,
 2957                     ssid.pos_length);
 2958         }
 2959 
 2960 out:
 2961         splx(s);
 2962 }
 2963 
 2964 int
 2965 pgt_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
 2966 {
 2967         struct pgt_softc *sc = ic->ic_if.if_softc;
 2968         enum ieee80211_state ostate;
 2969 
 2970         ostate = ic->ic_state;
 2971 
 2972         DPRINTF(("%s: newstate %s -> %s\n", sc->sc_dev.dv_xname,
 2973             ieee80211_state_name[ostate], ieee80211_state_name[nstate]));
 2974 
 2975         switch (nstate) {
 2976         case IEEE80211_S_INIT:
 2977                 if (sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] == 0)
 2978                         ic->ic_if.if_timer = 0;
 2979                 ic->ic_mgt_timer = 0;
 2980                 ic->ic_flags &= ~IEEE80211_F_SIBSS;
 2981                 if (ic->ic_wep_ctx != NULL) {
 2982                         free(ic->ic_wep_ctx, M_DEVBUF);  
 2983                         ic->ic_wep_ctx = NULL;
 2984                 }
 2985                 ieee80211_free_allnodes(ic);
 2986                 break;
 2987         case IEEE80211_S_SCAN:
 2988                 ic->ic_if.if_timer = 1;
 2989                 ic->ic_mgt_timer = 0;
 2990                 if (sc->sc_flags & SC_NOFREE_ALLNODES)
 2991                         sc->sc_flags &= ~SC_NOFREE_ALLNODES;
 2992                 else
 2993                         ieee80211_free_allnodes(ic);
 2994 
 2995                 /* Just use any old channel; we override it anyway. */
 2996                 if (ic->ic_opmode == IEEE80211_M_HOSTAP)
 2997                         ieee80211_create_ibss(ic, ic->ic_ibss_chan);
 2998                 break;
 2999         case IEEE80211_S_RUN:
 3000                 ic->ic_if.if_timer = 1;
 3001                 break;
 3002         default:
 3003                 break;
 3004         }
 3005 
 3006         return (sc->sc_newstate(ic, nstate, arg));
 3007 }
 3008 
 3009 int
 3010 pgt_drain_tx_queue(struct pgt_softc *sc, enum pgt_queue pq)
 3011 {
 3012         int wokeup = 0;
 3013 
 3014         bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
 3015             sc->sc_cbdmam->dm_mapsize,
 3016             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE);
 3017         sc->sc_cb->pcb_device_curfrag[pq] =
 3018             sc->sc_cb->pcb_driver_curfrag[pq];
 3019         bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
 3020             sc->sc_cbdmam->dm_mapsize,
 3021             BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD);
 3022         while (!TAILQ_EMPTY(&sc->sc_dirtyq[pq])) {
 3023                 struct pgt_desc *pd;
 3024 
 3025                 pd = TAILQ_FIRST(&sc->sc_dirtyq[pq]);
 3026                 TAILQ_REMOVE(&sc->sc_dirtyq[pq], pd, pd_link);
 3027                 sc->sc_dirtyq_count[pq]--;
 3028                 TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link);
 3029                 sc->sc_freeq_count[pq]++;
 3030                 pgt_unload_tx_desc_frag(sc, pd);
 3031                 if (sc->sc_debug & SC_DEBUG_QUEUES)
 3032                         DPRINTF(("%s: queue: tx %u <- [%u] (drained)\n",
 3033                             sc->sc_dev.dv_xname, pd->pd_fragnum, pq));
 3034                 wokeup++;
 3035                 if (pgt_queue_is_data(pq))
 3036                         sc->sc_ic.ic_if.if_oerrors++;
 3037         }
 3038 
 3039         return (wokeup);
 3040 }
 3041 
 3042 int
 3043 pgt_dma_alloc(struct pgt_softc *sc)
 3044 {
 3045         size_t size;
 3046         int i, error, nsegs;
 3047 
 3048         for (i = 0; i < PGT_QUEUE_COUNT; i++) {
 3049                 TAILQ_INIT(&sc->sc_freeq[i]);
 3050                 TAILQ_INIT(&sc->sc_dirtyq[i]);
 3051         }
 3052 
 3053         /*
 3054          * control block
 3055          */
 3056         size = sizeof(struct pgt_control_block);
 3057 
 3058         error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
 3059             BUS_DMA_NOWAIT, &sc->sc_cbdmam);
 3060         if (error != 0) {
 3061                 printf("%s: can not create DMA tag for control block\n",
 3062                     sc->sc_dev.dv_xname);
 3063                 goto out;
 3064         }
 3065 
 3066         error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE,
 3067             0, &sc->sc_cbdmas, 1, &nsegs, BUS_DMA_NOWAIT);
 3068         if (error != 0) {
 3069                 printf("%s: can not allocate DMA memory for control block\n",
 3070                     sc->sc_dev.dv_xname);
 3071                 goto out;
 3072         }
 3073 
 3074         error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cbdmas, nsegs,
 3075             size, (caddr_t *)&sc->sc_cb, BUS_DMA_NOWAIT);
 3076         if (error != 0) {
 3077                 printf("%s: can not map DMA memory for control block\n",
 3078                     sc->sc_dev.dv_xname);
 3079                 goto out;
 3080         }
 3081         bzero(sc->sc_cb, size);
 3082 
 3083         error = bus_dmamap_load(sc->sc_dmat, sc->sc_cbdmam,
 3084             sc->sc_cb, size, NULL, BUS_DMA_NOWAIT);
 3085         if (error != 0) {
 3086                 printf("%s: can not load DMA map for control block\n",
 3087                     sc->sc_dev.dv_xname);
 3088                 goto out;
 3089         }
 3090 
 3091         /*
 3092          * powersave
 3093          */
 3094         size = PGT_FRAG_SIZE * PGT_PSM_BUFFER_FRAME_COUNT;
 3095 
 3096         error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
 3097             BUS_DMA_ALLOCNOW, &sc->sc_psmdmam);
 3098         if (error != 0) {
 3099                 printf("%s: can not create DMA tag for powersave\n",
 3100                     sc->sc_dev.dv_xname);
 3101                 goto out;
 3102         }
 3103 
 3104         error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE,
 3105            0, &sc->sc_psmdmas, 1, &nsegs, BUS_DMA_NOWAIT);
 3106         if (error != 0) {
 3107                 printf("%s: can not allocate DMA memory for powersave\n",
 3108                     sc->sc_dev.dv_xname);
 3109                 goto out;
 3110         }
 3111 
 3112         error = bus_dmamem_map(sc->sc_dmat, &sc->sc_psmdmas, nsegs,
 3113             size, (caddr_t *)&sc->sc_psmbuf, BUS_DMA_NOWAIT);
 3114         if (error != 0) {
 3115                 printf("%s: can not map DMA memory for powersave\n",
 3116                     sc->sc_dev.dv_xname);
 3117                 goto out;
 3118         }
 3119         bzero(sc->sc_psmbuf, size);
 3120 
 3121         error = bus_dmamap_load(sc->sc_dmat, sc->sc_psmdmam,
 3122             sc->sc_psmbuf, size, NULL, BUS_DMA_WAITOK);
 3123         if (error != 0) {
 3124                 printf("%s: can not load DMA map for powersave\n",
 3125                     sc->sc_dev.dv_xname);
 3126                 goto out;
 3127         }
 3128 
 3129         /*
 3130          * fragments
 3131          */
 3132         error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_LOW_RX);
 3133         if (error != 0)
 3134                 goto out;
 3135 
 3136         error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_LOW_TX);
 3137         if (error != 0)
 3138                 goto out;
 3139 
 3140         error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_HIGH_RX);
 3141         if (error != 0)
 3142                 goto out;
 3143 
 3144         error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_HIGH_TX);
 3145         if (error != 0)
 3146                 goto out;
 3147 
 3148         error = pgt_dma_alloc_queue(sc, PGT_QUEUE_MGMT_RX);
 3149         if (error != 0)
 3150                 goto out;
 3151 
 3152         error = pgt_dma_alloc_queue(sc, PGT_QUEUE_MGMT_TX);
 3153         if (error != 0)
 3154                 goto out;
 3155 
 3156 out:
 3157         if (error) {
 3158                 printf("%s: error in DMA allocation\n", sc->sc_dev.dv_xname);
 3159                 pgt_dma_free(sc);
 3160         }
 3161 
 3162         return (error);
 3163 }
 3164 
 3165 int
 3166 pgt_dma_alloc_queue(struct pgt_softc *sc, enum pgt_queue pq)
 3167 {
 3168         struct pgt_desc *pd;
 3169         struct pgt_frag *pcbqueue;
 3170         size_t i, qsize;
 3171         int error, nsegs;
 3172 
 3173         switch (pq) {
 3174                 case PGT_QUEUE_DATA_LOW_RX:
 3175                         pcbqueue = sc->sc_cb->pcb_data_low_rx;
 3176                         qsize = PGT_QUEUE_DATA_RX_SIZE;
 3177                         break;
 3178                 case PGT_QUEUE_DATA_LOW_TX:
 3179                         pcbqueue = sc->sc_cb->pcb_data_low_tx;
 3180                         qsize = PGT_QUEUE_DATA_TX_SIZE;
 3181                         break;
 3182                 case PGT_QUEUE_DATA_HIGH_RX:
 3183                         pcbqueue = sc->sc_cb->pcb_data_high_rx;
 3184                         qsize = PGT_QUEUE_DATA_RX_SIZE;
 3185                         break;
 3186                 case PGT_QUEUE_DATA_HIGH_TX:
 3187                         pcbqueue = sc->sc_cb->pcb_data_high_tx;
 3188                         qsize = PGT_QUEUE_DATA_TX_SIZE;
 3189                         break;
 3190                 case PGT_QUEUE_MGMT_RX:
 3191                         pcbqueue = sc->sc_cb->pcb_mgmt_rx;
 3192                         qsize = PGT_QUEUE_MGMT_SIZE;
 3193                         break;
 3194                 case PGT_QUEUE_MGMT_TX:
 3195                         pcbqueue = sc->sc_cb->pcb_mgmt_tx;
 3196                         qsize = PGT_QUEUE_MGMT_SIZE;
 3197                         break;
 3198         }
 3199 
 3200         for (i = 0; i < qsize; i++) {
 3201                 pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
 3202 
 3203                 error = bus_dmamap_create(sc->sc_dmat, PGT_FRAG_SIZE, 1,
 3204                     PGT_FRAG_SIZE, 0, BUS_DMA_ALLOCNOW, &pd->pd_dmam);
 3205                 if (error != 0) {
 3206                         printf("%s: can not create DMA tag for fragment\n",
 3207                             sc->sc_dev.dv_xname);
 3208                         free(pd, M_DEVBUF);
 3209                         break;
 3210                 }
 3211 
 3212                 error = bus_dmamem_alloc(sc->sc_dmat, PGT_FRAG_SIZE, PAGE_SIZE,
 3213                     0, &pd->pd_dmas, 1, &nsegs, BUS_DMA_WAITOK);
 3214                 if (error != 0) {
 3215                         printf("%s: error alloc frag %u on queue %u\n",
 3216                             sc->sc_dev.dv_xname, i, pq);
 3217                         free(pd, M_DEVBUF);
 3218                         break;
 3219                 }
 3220 
 3221                 error = bus_dmamem_map(sc->sc_dmat, &pd->pd_dmas, nsegs,
 3222                     PGT_FRAG_SIZE, (caddr_t *)&pd->pd_mem, BUS_DMA_WAITOK);
 3223                 if (error != 0) {
 3224                         printf("%s: error map frag %u on queue %u\n",
 3225                             sc->sc_dev.dv_xname, i, pq);
 3226                         free(pd, M_DEVBUF);
 3227                         break;
 3228                 }
 3229 
 3230                 if (pgt_queue_is_rx(pq)) {
 3231                         error = bus_dmamap_load(sc->sc_dmat, pd->pd_dmam,
 3232                             pd->pd_mem, PGT_FRAG_SIZE, NULL, BUS_DMA_NOWAIT);
 3233                         if (error != 0) {
 3234                                 printf("%s: error load frag %u on queue %u\n",
 3235                                     sc->sc_dev.dv_xname, i, pq);
 3236                                 bus_dmamem_free(sc->sc_dmat, &pd->pd_dmas,
 3237                                     nsegs);
 3238                                 free(pd, M_DEVBUF);
 3239                                 break;
 3240                         }
 3241                         pd->pd_dmaaddr = pd->pd_dmam->dm_segs[0].ds_addr;
 3242                 }
 3243                 TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link);
 3244         }
 3245 
 3246         return (error);
 3247 }
 3248 
 3249 void
 3250 pgt_dma_free(struct pgt_softc *sc)
 3251 {
 3252         /*
 3253          * fragments
 3254          */
 3255         if (sc->sc_dmat != NULL) {
 3256                 pgt_dma_free_queue(sc, PGT_QUEUE_DATA_LOW_RX);
 3257                 pgt_dma_free_queue(sc, PGT_QUEUE_DATA_LOW_TX);
 3258                 pgt_dma_free_queue(sc, PGT_QUEUE_DATA_HIGH_RX);
 3259                 pgt_dma_free_queue(sc, PGT_QUEUE_DATA_HIGH_TX);
 3260                 pgt_dma_free_queue(sc, PGT_QUEUE_MGMT_RX);
 3261                 pgt_dma_free_queue(sc, PGT_QUEUE_MGMT_TX);
 3262         }
 3263 
 3264         /*
 3265          * powersave
 3266          */
 3267         if (sc->sc_psmbuf != NULL) {
 3268                 bus_dmamap_unload(sc->sc_dmat, sc->sc_psmdmam);
 3269                 bus_dmamem_free(sc->sc_dmat, &sc->sc_psmdmas, 1);
 3270                 sc->sc_psmbuf = NULL;
 3271                 sc->sc_psmdmam = NULL;
 3272         }
 3273 
 3274         /*
 3275          * control block
 3276          */
 3277         if (sc->sc_cb != NULL) {
 3278                 bus_dmamap_unload(sc->sc_dmat, sc->sc_cbdmam);
 3279                 bus_dmamem_free(sc->sc_dmat, &sc->sc_cbdmas, 1);
 3280                 sc->sc_cb = NULL;
 3281                 sc->sc_cbdmam = NULL;
 3282         }
 3283 }
 3284 
 3285 void
 3286 pgt_dma_free_queue(struct pgt_softc *sc, enum pgt_queue pq)
 3287 {
 3288         struct pgt_desc *pd;
 3289 
 3290         while (!TAILQ_EMPTY(&sc->sc_freeq[pq])) {
 3291                 pd = TAILQ_FIRST(&sc->sc_freeq[pq]);
 3292                 TAILQ_REMOVE(&sc->sc_freeq[pq], pd, pd_link);
 3293                 if (pd->pd_dmam != NULL) {
 3294                         bus_dmamap_unload(sc->sc_dmat, pd->pd_dmam);
 3295                         pd->pd_dmam = NULL;
 3296                 }
 3297                 bus_dmamem_free(sc->sc_dmat, &pd->pd_dmas, 1);
 3298                 free(pd, M_DEVBUF);
 3299         }
 3300 }
 3301 
 3302 void
 3303 pgt_shutdown(void *arg)
 3304 {
 3305         struct pgt_softc *sc = arg;
 3306 
 3307         DPRINTF(("%s: %s\n", sc->sc_dev.dv_xname, __func__));
 3308 
 3309         pgt_stop(sc, SC_DYING);
 3310 }
 3311 
 3312 void
 3313 pgt_power(int why, void *arg)
 3314 {
 3315         struct pgt_softc *sc = arg;
 3316         struct ifnet *ifp = &sc->sc_ic.ic_if;
 3317         int s;
 3318 
 3319         DPRINTF(("%s: %s(%d)\n", sc->sc_dev.dv_xname, __func__, why));
 3320 
 3321         s = splnet();
 3322 
 3323         switch (why) {
 3324         case PWR_STANDBY:
 3325         case PWR_SUSPEND:
 3326                 pgt_stop(sc, SC_NEEDS_RESET);
 3327                 pgt_update_hw_from_sw(sc, 0, 0);
 3328 
 3329                 if (sc->sc_power != NULL)
 3330                         (*sc->sc_power)(sc, why);
 3331                 break;
 3332         case PWR_RESUME:
 3333                 if (sc->sc_power != NULL)
 3334                         (*sc->sc_power)(sc, why);
 3335 
 3336                 pgt_stop(sc, SC_NEEDS_RESET);
 3337                 pgt_update_hw_from_sw(sc, 0, 0);
 3338 
 3339                 if ((ifp->if_flags & IFF_UP) &&
 3340                     !(ifp->if_flags & IFF_RUNNING)) {
 3341                         pgt_init(ifp);
 3342                         pgt_update_hw_from_sw(sc, 0, 0);
 3343                 }
 3344                 break;
 3345         }
 3346 
 3347         splx(s);
 3348 }

/* [<][>][^][v][top][bottom][index][help] */