This source file includes following definitions.
- vic_match
- vic_attach
- vic_map_pci
- vic_query
- vic_alloc_data
- vic_init_data
- vic_uninit_data
- vic_link_state
- vic_shutdown
- vic_intr
- vic_rx_proc
- vic_tx_proc
- vic_iff
- vic_getlladdr
- vic_setlladdr
- vic_media_change
- vic_media_status
- vic_start
- vic_load_txb
- vic_watchdog
- vic_ioctl
- vic_init
- vic_stop
- vic_alloc_mbuf
- vic_tick
- vic_read
- vic_write
- vic_read_cmd
- vic_alloc_dmamem
- vic_free_dmamem
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24 #include "bpfilter.h"
25
26 #include <sys/param.h>
27 #include <sys/systm.h>
28 #include <sys/sockio.h>
29 #include <sys/mbuf.h>
30 #include <sys/kernel.h>
31 #include <sys/socket.h>
32 #include <sys/malloc.h>
33 #include <sys/timeout.h>
34 #include <sys/device.h>
35
36 #include <machine/bus.h>
37 #include <machine/intr.h>
38
39 #include <net/if.h>
40 #include <net/if_dl.h>
41 #include <net/if_media.h>
42 #include <net/if_types.h>
43
44 #if NBPFILTER > 0
45 #include <net/bpf.h>
46 #endif
47
48 #ifdef INET
49 #include <netinet/in.h>
50 #include <netinet/if_ether.h>
51 #endif
52
53 #include <dev/pci/pcireg.h>
54 #include <dev/pci/pcivar.h>
55 #include <dev/pci/pcidevs.h>
56
57 #define VIC_PCI_BAR PCI_MAPREG_START
58
59 #define VIC_MAGIC 0xbabe864f
60
61
62 #define VIC_DATA_ADDR 0x0000
63 #define VIC_DATA_LENGTH 0x0004
64 #define VIC_Tx_ADDR 0x0008
65
66
67 #define VIC_CMD 0x000c
68 #define VIC_CMD_INTR_ACK 0x0001
69 #define VIC_CMD_MCASTFIL 0x0002
70 #define VIC_CMD_MCASTFIL_LENGTH 2
71 #define VIC_CMD_IFF 0x0004
72 #define VIC_CMD_IFF_PROMISC 0x0001
73 #define VIC_CMD_IFF_BROADCAST 0x0002
74 #define VIC_CMD_IFF_MULTICAST 0x0004
75 #define VIC_CMD_INTR_DISABLE 0x0020
76 #define VIC_CMD_INTR_ENABLE 0x0040
77 #define VIC_CMD_Tx_DONE 0x0100
78 #define VIC_CMD_NUM_Rx_BUF 0x0200
79 #define VIC_CMD_NUM_Tx_BUF 0x0400
80 #define VIC_CMD_NUM_PINNED_BUF 0x0800
81 #define VIC_CMD_HWCAP 0x1000
82 #define VIC_CMD_HWCAP_SG (1<<0)
83 #define VIC_CMD_HWCAP_CSUM_IPv4 (1<<1)
84 #define VIC_CMD_HWCAP_CSUM_ALL (1<<3)
85 #define VIC_CMD_HWCAP_CSUM \
86 (VIC_CMD_HWCAP_CSUM_IPv4 | VIC_CMD_HWCAP_CSUM_ALL)
87 #define VIC_CMD_HWCAP_DMA_HIGH (1<<4)
88 #define VIC_CMD_HWCAP_TOE (1<<5)
89 #define VIC_CMD_HWCAP_TSO (1<<6)
90 #define VIC_CMD_HWCAP_TSO_SW (1<<7)
91 #define VIC_CMD_HWCAP_VPROM (1<<8)
92 #define VIC_CMD_HWCAP_VLAN_Tx (1<<9)
93 #define VIC_CMD_HWCAP_VLAN_Rx (1<<10)
94 #define VIC_CMD_HWCAP_VLAN_SW (1<<11)
95 #define VIC_CMD_HWCAP_VLAN \
96 (VIC_CMD_HWCAP_VLAN_Tx | VIC_CMD_HWCAP_VLAN_Rx | \
97 VIC_CMD_HWCAP_VLAN_SW)
98 #define VIC_CMD_HWCAP_BITS \
99 "\20\01SG\02CSUM4\03CSUM\04HDMA\05TOE\06TSO" \
100 "\07TSOSW\10VPROM\13VLANTx\14VLANRx\15VLANSW"
101 #define VIC_CMD_FEATURE 0x2000
102 #define VIC_CMD_FEATURE_0_Tx (1<<0)
103 #define VIC_CMD_FEATURE_TSO (1<<1)
104
105 #define VIC_LLADDR 0x0010
106 #define VIC_VERSION_MINOR 0x0018
107 #define VIC_VERSION_MAJOR 0x001c
108 #define VIC_VERSION_MAJOR_M 0xffff0000
109
110
111 #define VIC_STATUS 0x0020
112 #define VIC_STATUS_CONNECTED (1<<0)
113 #define VIC_STATUS_ENABLED (1<<1)
114
115 #define VIC_TOE_ADDR 0x0024
116
117
118 #define VIC_VPROM 0x0028
119 #define VIC_VPROM_LENGTH 6
120
121
122
123 struct vic_sg {
124 u_int32_t sg_addr_low;
125 u_int16_t sg_addr_high;
126 u_int16_t sg_length;
127 } __packed;
128
129 #define VIC_SG_MAX 6
130 #define VIC_SG_ADDR_MACH 0
131 #define VIC_SG_ADDR_PHYS 1
132 #define VIC_SG_ADDR_VIRT 3
133
134 struct vic_sgarray {
135 u_int16_t sa_addr_type;
136 u_int16_t sa_length;
137 struct vic_sg sa_sg[VIC_SG_MAX];
138 } __packed;
139
140 struct vic_rxdesc {
141 u_int64_t rx_physaddr;
142 u_int32_t rx_buflength;
143 u_int32_t rx_length;
144 u_int16_t rx_owner;
145 u_int16_t rx_flags;
146 u_int32_t rx_priv;
147 } __packed;
148
149 #define VIC_RX_FLAGS_CSUMHW_OK 0x0001
150
151 struct vic_txdesc {
152 u_int16_t tx_flags;
153 u_int16_t tx_owner;
154 u_int32_t tx_priv;
155 u_int32_t tx_tsomss;
156 struct vic_sgarray tx_sa;
157 } __packed;
158
159 #define VIC_TX_FLAGS_KEEP 0x0001
160 #define VIC_TX_FLAGS_TXURN 0x0002
161 #define VIC_TX_FLAGS_CSUMHW 0x0004
162 #define VIC_TX_FLAGS_TSO 0x0008
163 #define VIC_TX_FLAGS_PINNED 0x0010
164 #define VIC_TX_FLAGS_QRETRY 0x1000
165
166 struct vic_stats {
167 u_int32_t vs_tx_count;
168 u_int32_t vs_tx_packets;
169 u_int32_t vs_tx_0copy;
170 u_int32_t vs_tx_copy;
171 u_int32_t vs_tx_maxpending;
172 u_int32_t vs_tx_stopped;
173 u_int32_t vs_tx_overrun;
174 u_int32_t vs_intr;
175 u_int32_t vs_rx_packets;
176 u_int32_t vs_rx_underrun;
177 } __packed;
178
179 struct vic_data {
180 u_int32_t vd_magic;
181
182 u_int32_t vd_rx_length;
183 u_int32_t vd_rx_nextidx;
184 u_int32_t vd_rx_length2;
185 u_int32_t vd_rx_nextidx2;
186
187 u_int32_t vd_irq;
188 u_int32_t vd_iff;
189
190 u_int32_t vd_mcastfil[VIC_CMD_MCASTFIL_LENGTH];
191
192 u_int32_t vd_reserved1[1];
193
194 u_int32_t vd_tx_length;
195 u_int32_t vd_tx_curidx;
196 u_int32_t vd_tx_nextidx;
197 u_int32_t vd_tx_stopped;
198 u_int32_t vd_tx_triggerlvl;
199 u_int32_t vd_tx_queued;
200 u_int32_t vd_tx_minlength;
201
202 u_int32_t vd_reserved2[6];
203
204 u_int32_t vd_rx_saved_nextidx;
205 u_int32_t vd_rx_saved_nextidx2;
206 u_int32_t vd_tx_saved_nextidx;
207
208 u_int32_t vd_length;
209 u_int32_t vd_rx_offset;
210 u_int32_t vd_rx_offset2;
211 u_int32_t vd_tx_offset;
212 u_int32_t vd_debug;
213 u_int32_t vd_tx_physaddr;
214 u_int32_t vd_tx_physaddr_length;
215 u_int32_t vd_tx_maxlength;
216
217 struct vic_stats vd_stats;
218 } __packed;
219
220 #define VIC_OWNER_DRIVER 0
221 #define VIC_OWNER_DRIVER_PEND 1
222 #define VIC_OWNER_NIC 2
223 #define VIC_OWNER_NIC_PEND 3
224
225 #define VIC_JUMBO_FRAMELEN 9018
226 #define VIC_JUMBO_MTU (VIC_JUMBO_FRAMELEN - ETHER_HDR_LEN - ETHER_CRC_LEN)
227
228 #define VIC_NBUF 100
229 #define VIC_NBUF_MAX 128
230 #define VIC_MAX_SCATTER 1
231 #define VIC_QUEUE_SIZE VIC_NBUF_MAX
232 #define VIC_QUEUE2_SIZE 1
233 #define VIC_INC(_x, _y) (_x) = ((_x) + 1) % (_y)
234 #define VIC_TX_TIMEOUT 5
235
236 #define VIC_MIN_FRAMELEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
237
238 #define VIC_TXURN_WARN(_sc) ((_sc)->sc_txpending >= ((_sc)->sc_ntxbuf - 5))
239 #define VIC_TXURN(_sc) ((_sc)->sc_txpending >= (_sc)->sc_ntxbuf)
240
241 struct vic_rxbuf {
242 bus_dmamap_t rxb_dmamap;
243 struct mbuf *rxb_m;
244 };
245
246 struct vic_txbuf {
247 bus_dmamap_t txb_dmamap;
248 struct mbuf *txb_m;
249 };
250
251 struct vic_softc {
252 struct device sc_dev;
253
254 pci_chipset_tag_t sc_pc;
255 pcitag_t sc_tag;
256
257 bus_space_tag_t sc_iot;
258 bus_space_handle_t sc_ioh;
259 bus_size_t sc_ios;
260 bus_dma_tag_t sc_dmat;
261
262 void *sc_ih;
263
264 struct timeout sc_tick;
265
266 struct arpcom sc_ac;
267 struct ifmedia sc_media;
268
269 u_int32_t sc_nrxbuf;
270 u_int32_t sc_ntxbuf;
271 u_int32_t sc_cap;
272 u_int32_t sc_feature;
273 u_int8_t sc_lladdr[ETHER_ADDR_LEN];
274
275 bus_dmamap_t sc_dma_map;
276 bus_dma_segment_t sc_dma_seg;
277 size_t sc_dma_size;
278 caddr_t sc_dma_kva;
279 #define VIC_DMA_DVA(_sc) ((_sc)->sc_dma_map->dm_segs[0].ds_addr)
280 #define VIC_DMA_KVA(_sc) ((void *)(_sc)->sc_dma_kva)
281
282 struct vic_data *sc_data;
283
284 struct vic_rxbuf *sc_rxbuf;
285 struct vic_rxdesc *sc_rxq;
286 struct vic_rxdesc *sc_rxq2;
287
288 struct vic_txbuf *sc_txbuf;
289 struct vic_txdesc *sc_txq;
290 volatile u_int sc_txpending;
291 };
292
293 struct cfdriver vic_cd = {
294 0, "vic", DV_IFNET
295 };
296
297 int vic_match(struct device *, void *, void *);
298 void vic_attach(struct device *, struct device *, void *);
299
300 struct cfattach vic_ca = {
301 sizeof(struct vic_softc), vic_match, vic_attach
302 };
303
304 int vic_intr(void *);
305 void vic_shutdown(void *);
306
307 int vic_map_pci(struct vic_softc *, struct pci_attach_args *);
308 int vic_query(struct vic_softc *);
309 int vic_alloc_data(struct vic_softc *);
310 int vic_init_data(struct vic_softc *sc);
311 int vic_uninit_data(struct vic_softc *sc);
312
313 u_int32_t vic_read(struct vic_softc *, bus_size_t);
314 void vic_write(struct vic_softc *, bus_size_t, u_int32_t);
315
316 u_int32_t vic_read_cmd(struct vic_softc *, u_int32_t);
317
318 int vic_alloc_dmamem(struct vic_softc *);
319 void vic_free_dmamem(struct vic_softc *);
320
321 void vic_link_state(struct vic_softc *);
322 void vic_rx_proc(struct vic_softc *);
323 void vic_tx_proc(struct vic_softc *);
324 void vic_iff(struct vic_softc *);
325 void vic_getlladdr(struct vic_softc *);
326 void vic_setlladdr(struct vic_softc *);
327 int vic_media_change(struct ifnet *);
328 void vic_media_status(struct ifnet *, struct ifmediareq *);
329 void vic_start(struct ifnet *);
330 int vic_load_txb(struct vic_softc *, struct vic_txbuf *,
331 struct mbuf *);
332 void vic_watchdog(struct ifnet *);
333 int vic_ioctl(struct ifnet *, u_long, caddr_t);
334 void vic_init(struct ifnet *);
335 void vic_stop(struct ifnet *);
336 void vic_tick(void *);
337
338 #define DEVNAME(_s) ((_s)->sc_dev.dv_xname)
339
340 struct mbuf *vic_alloc_mbuf(struct vic_softc *, bus_dmamap_t);
341
342 const struct pci_matchid vic_devices[] = {
343 { PCI_VENDOR_VMWARE, PCI_PRODUCT_VMWARE_NET }
344 };
345
346 int
347 vic_match(struct device *parent, void *match, void *aux)
348 {
349 return (pci_matchbyid((struct pci_attach_args *)aux,
350 vic_devices, sizeof(vic_devices)/sizeof(vic_devices[0])));
351 }
352
353 void
354 vic_attach(struct device *parent, struct device *self, void *aux)
355 {
356 struct vic_softc *sc = (struct vic_softc *)self;
357 struct pci_attach_args *pa = aux;
358 struct ifnet *ifp;
359
360 if (vic_map_pci(sc, pa) != 0) {
361
362 return;
363 }
364
365 if (vic_query(sc) != 0) {
366
367 return;
368 }
369
370 if (vic_alloc_data(sc) != 0) {
371
372 return;
373 }
374
375 timeout_set(&sc->sc_tick, vic_tick, sc);
376
377 bcopy(sc->sc_lladdr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
378
379 ifp = &sc->sc_ac.ac_if;
380 ifp->if_softc = sc;
381 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
382 ifp->if_ioctl = vic_ioctl;
383 ifp->if_start = vic_start;
384 ifp->if_watchdog = vic_watchdog;
385 ifp->if_hardmtu = VIC_JUMBO_MTU;
386 strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
387 IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_ntxbuf - 1);
388 IFQ_SET_READY(&ifp->if_snd);
389
390 ifp->if_capabilities = IFCAP_VLAN_MTU;
391
392 #if 0
393
394 if (sc->sc_cap & VIC_CMD_HWCAP_VLAN)
395 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
396 if (sc->sc_cap & VIC_CMD_HWCAP_CSUM)
397 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
398 IFCAP_CSUM_UDPv4;
399 #endif
400
401 ifmedia_init(&sc->sc_media, 0, vic_media_change, vic_media_status);
402 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
403 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
404
405 if_attach(ifp);
406 ether_ifattach(ifp);
407
408 return;
409 }
410
411 int
412 vic_map_pci(struct vic_softc *sc, struct pci_attach_args *pa)
413 {
414 pcireg_t memtype;
415 pci_intr_handle_t ih;
416 const char *intrstr;
417
418 sc->sc_pc = pa->pa_pc;
419 sc->sc_tag = pa->pa_tag;
420 sc->sc_dmat = pa->pa_dmat;
421
422 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, VIC_PCI_BAR);
423 if (pci_mapreg_map(pa, VIC_PCI_BAR, memtype, 0, &sc->sc_iot,
424 &sc->sc_ioh, NULL, &sc->sc_ios, 0) != 0) {
425 printf(": unable to map system interface register\n");
426 return (1);
427 }
428
429 if (pci_intr_map(pa, &ih) != 0) {
430 printf(": unable to map interrupt\n");
431 goto unmap;
432 }
433
434 intrstr = pci_intr_string(pa->pa_pc, ih);
435 sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET,
436 vic_intr, sc, DEVNAME(sc));
437 if (sc->sc_ih == NULL) {
438 printf(": unable to map interrupt%s%s\n",
439 intrstr == NULL ? "" : " at ",
440 intrstr == NULL ? "" : intrstr);
441 goto unmap;
442 }
443 printf(": %s\n", intrstr);
444
445 return (0);
446
447 unmap:
448 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
449 sc->sc_ios = 0;
450 return (1);
451 }
452
453 int
454 vic_query(struct vic_softc *sc)
455 {
456 u_int32_t major, minor;
457
458 major = vic_read(sc, VIC_VERSION_MAJOR);
459 minor = vic_read(sc, VIC_VERSION_MINOR);
460
461
462 if ((major & VIC_VERSION_MAJOR_M) !=
463 (VIC_MAGIC & VIC_VERSION_MAJOR_M)) {
464 printf("%s: magic mismatch\n", DEVNAME(sc));
465 return (1);
466 }
467
468 if (VIC_MAGIC > major || VIC_MAGIC < minor) {
469 printf("%s: unsupported version (%X)\n", DEVNAME(sc),
470 major & ~VIC_VERSION_MAJOR_M);
471 return (1);
472 }
473
474 sc->sc_nrxbuf = vic_read_cmd(sc, VIC_CMD_NUM_Rx_BUF);
475 sc->sc_ntxbuf = vic_read_cmd(sc, VIC_CMD_NUM_Tx_BUF);
476 sc->sc_feature = vic_read_cmd(sc, VIC_CMD_FEATURE);
477 sc->sc_cap = vic_read_cmd(sc, VIC_CMD_HWCAP);
478
479 vic_getlladdr(sc);
480
481 printf("%s: VMXnet %04X, address %s\n", DEVNAME(sc),
482 major & ~VIC_VERSION_MAJOR_M, ether_sprintf(sc->sc_lladdr));
483
484 #ifdef VIC_DEBUG
485 printf("%s: feature 0x%8x, cap 0x%8x, rx/txbuf %d/%d\n", DEVNAME(sc),
486 sc->sc_feature, sc->sc_cap, sc->sc_nrxbuf, sc->sc_ntxbuf);
487 #endif
488
489 if (sc->sc_nrxbuf > VIC_NBUF_MAX || sc->sc_nrxbuf == 0)
490 sc->sc_nrxbuf = VIC_NBUF;
491 if (sc->sc_ntxbuf > VIC_NBUF_MAX || sc->sc_ntxbuf == 0)
492 sc->sc_ntxbuf = VIC_NBUF;
493
494 return (0);
495 }
496
497 int
498 vic_alloc_data(struct vic_softc *sc)
499 {
500 u_int8_t *kva;
501 u_int offset;
502 struct vic_rxdesc *rxd;
503 int i;
504
505 sc->sc_rxbuf = malloc(sizeof(struct vic_rxbuf) * sc->sc_nrxbuf,
506 M_NOWAIT, M_DEVBUF);
507 if (sc->sc_rxbuf == NULL) {
508 printf("%s: unable to allocate rxbuf\n", DEVNAME(sc));
509 goto err;
510 }
511
512 sc->sc_txbuf = malloc(sizeof(struct vic_txbuf) * sc->sc_ntxbuf,
513 M_NOWAIT, M_DEVBUF);
514 if (sc->sc_txbuf == NULL) {
515 printf("%s: unable to allocate txbuf\n", DEVNAME(sc));
516 goto freerx;
517 }
518
519 sc->sc_dma_size = sizeof(struct vic_data) +
520 (sc->sc_nrxbuf + VIC_QUEUE2_SIZE) * sizeof(struct vic_rxdesc) +
521 sc->sc_ntxbuf * sizeof(struct vic_txdesc);
522
523 if (vic_alloc_dmamem(sc) != 0) {
524 printf("%s: unable to allocate dma region\n", DEVNAME(sc));
525 goto freetx;
526 }
527 kva = VIC_DMA_KVA(sc);
528
529
530 sc->sc_data = VIC_DMA_KVA(sc);
531
532 sc->sc_data->vd_magic = VIC_MAGIC;
533 sc->sc_data->vd_length = sc->sc_dma_size;
534
535 offset = sizeof(struct vic_data);
536
537
538 sc->sc_rxq = (struct vic_rxdesc *)&kva[offset];
539
540 sc->sc_data->vd_rx_offset = offset;
541 sc->sc_data->vd_rx_length = sc->sc_nrxbuf;
542
543 offset += sizeof(struct vic_rxdesc) * sc->sc_nrxbuf;
544
545
546 sc->sc_rxq2 = (struct vic_rxdesc *)&kva[offset];
547
548 sc->sc_data->vd_rx_offset2 = offset;
549 sc->sc_data->vd_rx_length2 = VIC_QUEUE2_SIZE;
550
551 for (i = 0; i < VIC_QUEUE2_SIZE; i++) {
552 rxd = &sc->sc_rxq2[i];
553
554 rxd->rx_physaddr = 0;
555 rxd->rx_buflength = 0;
556 rxd->rx_length = 0;
557 rxd->rx_owner = VIC_OWNER_DRIVER;
558
559 offset += sizeof(struct vic_rxdesc);
560 }
561
562
563 sc->sc_txq = (struct vic_txdesc *)&kva[offset];
564
565 sc->sc_data->vd_tx_offset = offset;
566 sc->sc_data->vd_tx_length = sc->sc_ntxbuf;
567
568 return (0);
569 freetx:
570 free(sc->sc_txbuf, M_DEVBUF);
571 freerx:
572 free(sc->sc_rxbuf, M_DEVBUF);
573 err:
574 return (1);
575 }
576
577 int
578 vic_init_data(struct vic_softc *sc)
579 {
580 struct vic_rxbuf *rxb;
581 struct vic_rxdesc *rxd;
582 struct vic_txbuf *txb;
583
584 int i;
585
586 for (i = 0; i < sc->sc_nrxbuf; i++) {
587 rxb = &sc->sc_rxbuf[i];
588 rxd = &sc->sc_rxq[i];
589
590 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
591 MCLBYTES, 0, BUS_DMA_NOWAIT, &rxb->rxb_dmamap) != 0) {
592 printf("%s: unable to create dmamap for rxb %d\n",
593 DEVNAME(sc), i);
594 goto freerxbs;
595 }
596
597 rxb->rxb_m = vic_alloc_mbuf(sc, rxb->rxb_dmamap);
598 if (rxb->rxb_m == NULL) {
599
600 bus_dmamap_destroy(sc->sc_dmat, rxb->rxb_dmamap);
601 goto freerxbs;
602 }
603
604 bus_dmamap_sync(sc->sc_dmat, rxb->rxb_dmamap, 0,
605 rxb->rxb_m->m_pkthdr.len, BUS_DMASYNC_PREREAD);
606
607 rxd->rx_physaddr = rxb->rxb_dmamap->dm_segs[0].ds_addr;
608 rxd->rx_buflength = rxb->rxb_m->m_pkthdr.len;
609 rxd->rx_length = 0;
610 rxd->rx_owner = VIC_OWNER_NIC;
611 }
612
613 for (i = 0; i < sc->sc_ntxbuf; i++) {
614 txb = &sc->sc_txbuf[i];
615 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,
616 (sc->sc_cap & VIC_CMD_HWCAP_SG) ? VIC_SG_MAX : 1,
617 MCLBYTES, 0, BUS_DMA_NOWAIT, &txb->txb_dmamap) != 0) {
618 printf("%s: unable to create dmamap for tx %d\n",
619 DEVNAME(sc), i);
620 goto freetxbs;
621 }
622 txb->txb_m = NULL;
623 }
624
625 return (0);
626
627 freetxbs:
628 while (i--) {
629 txb = &sc->sc_txbuf[i];
630 bus_dmamap_destroy(sc->sc_dmat, txb->txb_dmamap);
631 }
632
633 i = sc->sc_nrxbuf;
634 freerxbs:
635 while (i--) {
636 rxb = &sc->sc_rxbuf[i];
637 bus_dmamap_sync(sc->sc_dmat, rxb->rxb_dmamap, 0,
638 rxb->rxb_m->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
639 bus_dmamap_unload(sc->sc_dmat, rxb->rxb_dmamap);
640 bus_dmamap_destroy(sc->sc_dmat, rxb->rxb_dmamap);
641 }
642
643 return (1);
644 }
645
646 int
647 vic_uninit_data(struct vic_softc *sc)
648 {
649 struct vic_rxbuf *rxb;
650 struct vic_rxdesc *rxd;
651 struct vic_txbuf *txb;
652
653 int i;
654
655 for (i = 0; i < sc->sc_nrxbuf; i++) {
656 rxb = &sc->sc_rxbuf[i];
657 rxd = &sc->sc_rxq[i];
658
659 bus_dmamap_sync(sc->sc_dmat, rxb->rxb_dmamap, 0,
660 rxb->rxb_m->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
661 bus_dmamap_unload(sc->sc_dmat, rxb->rxb_dmamap);
662 bus_dmamap_destroy(sc->sc_dmat, rxb->rxb_dmamap);
663
664 m_freem(rxb->rxb_m);
665 rxb->rxb_m = NULL;
666 }
667
668 for (i = 0; i < sc->sc_ntxbuf; i++) {
669 txb = &sc->sc_txbuf[i];
670 bus_dmamap_destroy(sc->sc_dmat, txb->txb_dmamap);
671 }
672
673 return (0);
674 }
675
676 void
677 vic_link_state(struct vic_softc *sc)
678 {
679 struct ifnet *ifp = &sc->sc_ac.ac_if;
680 u_int32_t status;
681 int link_state = LINK_STATE_DOWN;
682
683 status = vic_read(sc, VIC_STATUS);
684 if (status & VIC_STATUS_CONNECTED)
685 link_state = LINK_STATE_FULL_DUPLEX;
686 if (ifp->if_link_state != link_state) {
687 ifp->if_link_state = link_state;
688 if_link_state_change(ifp);
689 }
690 }
691
692 void
693 vic_shutdown(void *self)
694 {
695 struct vic_softc *sc = (struct vic_softc *)self;
696
697 vic_stop(&sc->sc_ac.ac_if);
698 }
699
700 int
701 vic_intr(void *arg)
702 {
703 struct vic_softc *sc = (struct vic_softc *)arg;
704
705 vic_rx_proc(sc);
706 vic_tx_proc(sc);
707
708 vic_write(sc, VIC_CMD, VIC_CMD_INTR_ACK);
709
710 return (1);
711 }
712
713 void
714 vic_rx_proc(struct vic_softc *sc)
715 {
716 struct ifnet *ifp = &sc->sc_ac.ac_if;
717 struct vic_rxdesc *rxd;
718 struct vic_rxbuf *rxb;
719 struct mbuf *m;
720 int len, idx;
721
722 if ((ifp->if_flags & IFF_RUNNING) == 0)
723 return;
724
725 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
726 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
727
728 for (;;) {
729 idx = sc->sc_data->vd_rx_nextidx;
730 if (idx >= sc->sc_data->vd_rx_length) {
731 ifp->if_ierrors++;
732 if (ifp->if_flags & IFF_DEBUG)
733 printf("%s: receive index error\n",
734 sc->sc_dev.dv_xname);
735 break;
736 }
737
738 rxd = &sc->sc_rxq[idx];
739 if (rxd->rx_owner != VIC_OWNER_DRIVER)
740 break;
741
742 rxb = &sc->sc_rxbuf[idx];
743
744 len = rxd->rx_length;
745 if (len < VIC_MIN_FRAMELEN) {
746 ifp->if_iqdrops++;
747 goto nextp;
748 }
749
750 if (rxb->rxb_m == NULL) {
751 ifp->if_ierrors++;
752 printf("%s: rxb %d has no mbuf\n", DEVNAME(sc), idx);
753 break;
754 }
755
756 bus_dmamap_sync(sc->sc_dmat, rxb->rxb_dmamap, 0,
757 rxb->rxb_m->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
758 bus_dmamap_unload(sc->sc_dmat, rxb->rxb_dmamap);
759
760 m = rxb->rxb_m;
761 rxb->rxb_m = NULL;
762 m->m_pkthdr.rcvif = ifp;
763 m->m_pkthdr.len = m->m_len = len;
764
765
766 rxb->rxb_m = vic_alloc_mbuf(sc, rxb->rxb_dmamap);
767 if (rxb->rxb_m == NULL) {
768 ifp->if_ierrors++;
769 printf("%s: mbuf alloc failed\n", DEVNAME(sc));
770 break;
771 }
772 bus_dmamap_sync(sc->sc_dmat, rxb->rxb_dmamap, 0,
773 rxb->rxb_m->m_pkthdr.len, BUS_DMASYNC_PREREAD);
774
775 rxd->rx_physaddr = rxb->rxb_dmamap->dm_segs[0].ds_addr;
776 rxd->rx_buflength = rxb->rxb_m->m_pkthdr.len;
777 rxd->rx_length = 0;
778 rxd->rx_owner = VIC_OWNER_DRIVER;
779
780 ifp->if_ipackets++;
781
782 #if NBPFILTER > 0
783 if (ifp->if_bpf)
784 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
785 #endif
786
787 ether_input_mbuf(ifp, m);
788
789 nextp:
790 rxd->rx_owner = VIC_OWNER_NIC;
791 VIC_INC(sc->sc_data->vd_rx_nextidx, sc->sc_data->vd_rx_length);
792 }
793
794 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
795 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
796 }
797
798 void
799 vic_tx_proc(struct vic_softc *sc)
800 {
801 struct ifnet *ifp = &sc->sc_ac.ac_if;
802 struct vic_txdesc *txd;
803 struct vic_txbuf *txb;
804 int idx;
805
806 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
807 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
808
809 while (sc->sc_txpending > 0) {
810 idx = sc->sc_data->vd_tx_curidx;
811 if (idx >= sc->sc_data->vd_tx_length) {
812 ifp->if_oerrors++;
813 break;
814 }
815
816 txd = &sc->sc_txq[idx];
817 if (txd->tx_owner != VIC_OWNER_DRIVER)
818 break;
819
820 txb = &sc->sc_txbuf[idx];
821 if (txb->txb_m == NULL) {
822 printf("%s: tx ring is corrupt\n", DEVNAME(sc));
823 ifp->if_oerrors++;
824 break;
825 }
826
827 bus_dmamap_sync(sc->sc_dmat, txb->txb_dmamap, 0,
828 txb->txb_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
829 bus_dmamap_unload(sc->sc_dmat, txb->txb_dmamap);
830
831 m_freem(txb->txb_m);
832 txb->txb_m = NULL;
833 ifp->if_flags &= ~IFF_OACTIVE;
834
835 sc->sc_txpending--;
836 sc->sc_data->vd_tx_stopped = 0;
837
838 VIC_INC(sc->sc_data->vd_tx_curidx, sc->sc_data->vd_tx_length);
839 }
840
841 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
842 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
843
844 vic_start(ifp);
845 }
846
847 void
848 vic_iff(struct vic_softc *sc)
849 {
850 struct arpcom *ac = &sc->sc_ac;
851 struct ifnet *ifp = &sc->sc_ac.ac_if;
852 struct ether_multi *enm;
853 struct ether_multistep step;
854 u_int32_t crc;
855 u_int16_t *mcastfil = (u_int16_t *)sc->sc_data->vd_mcastfil;
856 u_int flags = 0;
857
858 bzero(&sc->sc_data->vd_mcastfil, sizeof(sc->sc_data->vd_mcastfil));
859 ifp->if_flags &= ~IFF_ALLMULTI;
860
861 if ((ifp->if_flags & IFF_RUNNING) == 0)
862 goto domulti;
863 if (ifp->if_flags & IFF_PROMISC)
864 goto allmulti;
865
866 ETHER_FIRST_MULTI(step, ac, enm);
867 while (enm != NULL) {
868 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN))
869 goto allmulti;
870
871 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
872 crc >>= 26;
873 mcastfil[crc >> 4] |= htole16(1 << (crc & 0xf));
874
875 ETHER_NEXT_MULTI(step, enm);
876 }
877
878 goto domulti;
879
880 allmulti:
881 ifp->if_flags |= IFF_ALLMULTI;
882 memset(&sc->sc_data->vd_mcastfil, 0xff,
883 sizeof(sc->sc_data->vd_mcastfil));
884
885 domulti:
886 vic_write(sc, VIC_CMD, VIC_CMD_MCASTFIL);
887
888 if (ifp->if_flags & IFF_RUNNING) {
889 flags = (ifp->if_flags & IFF_PROMISC) ?
890 VIC_CMD_IFF_PROMISC :
891 (VIC_CMD_IFF_BROADCAST | VIC_CMD_IFF_MULTICAST);
892 }
893 sc->sc_data->vd_iff = flags;
894 vic_write(sc, VIC_CMD, VIC_CMD_IFF);
895 }
896
897 void
898 vic_getlladdr(struct vic_softc *sc)
899 {
900 u_int32_t reg;
901
902
903 reg = (sc->sc_cap & VIC_CMD_HWCAP_VPROM) ? VIC_VPROM : VIC_LLADDR;
904
905 bus_space_barrier(sc->sc_iot, sc->sc_ioh, reg, ETHER_ADDR_LEN,
906 BUS_SPACE_BARRIER_READ);
907 bus_space_read_region_1(sc->sc_iot, sc->sc_ioh, reg, sc->sc_lladdr,
908 ETHER_ADDR_LEN);
909
910
911 if (reg == VIC_VPROM)
912 vic_setlladdr(sc);
913 }
914
915 void
916 vic_setlladdr(struct vic_softc *sc)
917 {
918 bus_space_write_region_1(sc->sc_iot, sc->sc_ioh, VIC_LLADDR,
919 sc->sc_lladdr, ETHER_ADDR_LEN);
920 bus_space_barrier(sc->sc_iot, sc->sc_ioh, VIC_LLADDR, ETHER_ADDR_LEN,
921 BUS_SPACE_BARRIER_WRITE);
922 }
923
924 int
925 vic_media_change(struct ifnet *ifp)
926 {
927
928 return (0);
929 }
930
931 void
932 vic_media_status(struct ifnet *ifp, struct ifmediareq *imr)
933 {
934 struct vic_softc *sc = (struct vic_softc *)ifp->if_softc;
935
936 imr->ifm_active = IFM_ETHER | IFM_AUTO;
937 imr->ifm_status = IFM_AVALID;
938
939 vic_link_state(sc);
940
941 if (LINK_STATE_IS_UP(ifp->if_link_state) &&
942 ifp->if_flags & IFF_UP)
943 imr->ifm_status |= IFM_ACTIVE;
944 }
945
946 void
947 vic_start(struct ifnet *ifp)
948 {
949 struct vic_softc *sc;
950 struct mbuf *m;
951 struct vic_txbuf *txb;
952 struct vic_txdesc *txd;
953 struct vic_sg *sge;
954 bus_dmamap_t dmap;
955 int i, idx;
956 int tx = 0;
957
958 if (!(ifp->if_flags & IFF_RUNNING))
959 return;
960
961 if (ifp->if_flags & IFF_OACTIVE)
962 return;
963
964 if (IFQ_IS_EMPTY(&ifp->if_snd))
965 return;
966
967 sc = (struct vic_softc *)ifp->if_softc;
968
969 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
970 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
971
972 for (;;) {
973 if (VIC_TXURN(sc)) {
974 ifp->if_flags |= IFF_OACTIVE;
975 break;
976 }
977
978 IFQ_POLL(&ifp->if_snd, m);
979 if (m == NULL)
980 break;
981
982 idx = sc->sc_data->vd_tx_nextidx;
983 if (idx >= sc->sc_data->vd_tx_length) {
984 printf("%s: tx idx is corrupt\n", DEVNAME(sc));
985 ifp->if_oerrors++;
986 break;
987 }
988
989 txd = &sc->sc_txq[idx];
990 txb = &sc->sc_txbuf[idx];
991
992 if (txb->txb_m != NULL) {
993 printf("%s: tx ring is corrupt\n", DEVNAME(sc));
994 sc->sc_data->vd_tx_stopped = 1;
995 ifp->if_oerrors++;
996 break;
997 }
998
999
1000
1001
1002
1003 IFQ_DEQUEUE(&ifp->if_snd, m);
1004 if (vic_load_txb(sc, txb, m) != 0) {
1005 m_freem(m);
1006 ifp->if_oerrors++;
1007
1008 break;
1009 }
1010
1011 #if NBPFILTER > 0
1012 if (ifp->if_bpf)
1013 bpf_mtap(ifp->if_bpf, txb->txb_m, BPF_DIRECTION_OUT);
1014 #endif
1015
1016 dmap = txb->txb_dmamap;
1017 txd->tx_flags = VIC_TX_FLAGS_KEEP;
1018 txd->tx_owner = VIC_OWNER_NIC;
1019 txd->tx_sa.sa_addr_type = VIC_SG_ADDR_PHYS;
1020 txd->tx_sa.sa_length = dmap->dm_nsegs;
1021 for (i = 0; i < dmap->dm_nsegs; i++) {
1022 sge = &txd->tx_sa.sa_sg[i];
1023 sge->sg_length = dmap->dm_segs[i].ds_len;
1024 sge->sg_addr_low = dmap->dm_segs[i].ds_addr;
1025 }
1026
1027 if (VIC_TXURN_WARN(sc)) {
1028 txd->tx_flags |= VIC_TX_FLAGS_TXURN;
1029 }
1030
1031 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1032 BUS_DMASYNC_PREWRITE);
1033
1034 ifp->if_opackets++;
1035 sc->sc_txpending++;
1036
1037 VIC_INC(sc->sc_data->vd_tx_nextidx, sc->sc_data->vd_tx_length);
1038
1039 tx = 1;
1040 }
1041
1042 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
1043 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1044
1045 if (tx)
1046 vic_read(sc, VIC_Tx_ADDR);
1047 }
1048
1049 int
1050 vic_load_txb(struct vic_softc *sc, struct vic_txbuf *txb, struct mbuf *m)
1051 {
1052 bus_dmamap_t dmap = txb->txb_dmamap;
1053 struct mbuf *m0 = NULL;
1054 int error;
1055
1056 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmap, m, BUS_DMA_NOWAIT);
1057 switch (error) {
1058 case 0:
1059 txb->txb_m = m;
1060 break;
1061
1062 case EFBIG:
1063 MGETHDR(m0, M_DONTWAIT, MT_DATA);
1064 if (m0 == NULL)
1065 return (ENOBUFS);
1066 if (m->m_pkthdr.len > MHLEN) {
1067 MCLGET(m0, M_DONTWAIT);
1068 if (!(m0->m_flags & M_EXT)) {
1069 m_freem(m0);
1070 return (ENOBUFS);
1071 }
1072 }
1073 m_copydata(m, 0, m->m_pkthdr.len, mtod(m0, caddr_t));
1074 m0->m_pkthdr.len = m0->m_len = m->m_pkthdr.len;
1075 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmap, m0,
1076 BUS_DMA_NOWAIT);
1077 if (error != 0) {
1078 m_freem(m0);
1079 printf("%s: tx dmamap load error %d\n", DEVNAME(sc),
1080 error);
1081 return (ENOBUFS);
1082 }
1083 m_freem(m);
1084 txb->txb_m = m0;
1085 break;
1086
1087 default:
1088 printf("%s: tx dmamap load error %d\n", DEVNAME(sc), error);
1089 return (ENOBUFS);
1090 }
1091
1092 return (0);
1093 }
1094
1095 void
1096 vic_watchdog(struct ifnet *ifp)
1097 {
1098 #if 0
1099 struct vic_softc *sc = (struct vic_softc *)ifp->if_softc;
1100
1101 if (sc->sc_txpending && sc->sc_txtimeout > 0) {
1102 if (--sc->sc_txtimeout == 0) {
1103 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
1104 ifp->if_flags &= ~IFF_RUNNING;
1105 vic_init(ifp);
1106 ifp->if_oerrors++;
1107 return;
1108 }
1109 }
1110
1111 if (!IFQ_IS_EMPTY(&ifp->if_snd))
1112 vic_start(ifp);
1113 #endif
1114 }
1115
1116 int
1117 vic_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1118 {
1119 struct vic_softc *sc = (struct vic_softc *)ifp->if_softc;
1120 struct ifreq *ifr = (struct ifreq *)data;
1121 struct ifaddr *ifa;
1122 int s, error = 0;
1123
1124 s = splnet();
1125
1126 if ((error = ether_ioctl(ifp, &sc->sc_ac, cmd, data)) > 0) {
1127 splx(s);
1128 return (error);
1129 }
1130
1131 switch (cmd) {
1132 case SIOCSIFADDR:
1133 ifa = (struct ifaddr *)data;
1134 ifp->if_flags |= IFF_UP;
1135 #ifdef INET
1136 if (ifa->ifa_addr->sa_family == AF_INET)
1137 arp_ifinit(&sc->sc_ac, ifa);
1138 #endif
1139
1140 case SIOCSIFFLAGS:
1141 if (ifp->if_flags & IFF_UP) {
1142 if (ifp->if_flags & IFF_RUNNING)
1143 vic_iff(sc);
1144 else
1145 vic_init(ifp);
1146 } else {
1147 if (ifp->if_flags & IFF_RUNNING)
1148 vic_stop(ifp);
1149 }
1150 break;
1151
1152 case SIOCSIFMTU:
1153 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ifp->if_hardmtu)
1154 error = EINVAL;
1155 else if (ifp->if_mtu != ifr->ifr_mtu)
1156 ifp->if_mtu = ifr->ifr_mtu;
1157 break;
1158
1159 case SIOCADDMULTI:
1160 case SIOCDELMULTI:
1161 ifr = (struct ifreq *)data;
1162 error = (cmd == SIOCADDMULTI) ?
1163 ether_addmulti(ifr, &sc->sc_ac) :
1164 ether_delmulti(ifr, &sc->sc_ac);
1165
1166 if (error == ENETRESET) {
1167 if (ifp->if_flags & IFF_RUNNING)
1168 vic_iff(sc);
1169 error = 0;
1170 }
1171 break;
1172
1173 case SIOCGIFMEDIA:
1174 case SIOCSIFMEDIA:
1175 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1176 break;
1177
1178 default:
1179 error = ENOTTY;
1180 break;
1181 }
1182
1183 if (error == ENETRESET) {
1184 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
1185 (IFF_UP | IFF_RUNNING))
1186 vic_iff(ifp->if_softc);
1187 error = 0;
1188 }
1189
1190 splx(s);
1191
1192 return (error);
1193 }
1194
1195 void
1196 vic_init(struct ifnet *ifp)
1197 {
1198 struct vic_softc *sc = (struct vic_softc *)ifp->if_softc;
1199 int s;
1200
1201 if (vic_init_data(sc) != 0)
1202 return;
1203
1204 sc->sc_data->vd_tx_curidx = 0;
1205 sc->sc_data->vd_tx_nextidx = 0;
1206 sc->sc_data->vd_tx_stopped = sc->sc_data->vd_tx_queued = 0;
1207
1208 sc->sc_data->vd_rx_nextidx = 0;
1209 sc->sc_data->vd_rx_nextidx2 = 0;
1210
1211 sc->sc_data->vd_rx_saved_nextidx = 0;
1212 sc->sc_data->vd_rx_saved_nextidx2 = 0;
1213 sc->sc_data->vd_tx_saved_nextidx = 0;
1214
1215 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
1216 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1217
1218 s = splnet();
1219
1220 vic_write(sc, VIC_DATA_ADDR, VIC_DMA_DVA(sc));
1221 vic_write(sc, VIC_DATA_LENGTH, sc->sc_dma_size);
1222
1223 ifp->if_flags |= IFF_RUNNING;
1224 ifp->if_flags &= ~IFF_OACTIVE;
1225
1226 vic_iff(sc);
1227 vic_write(sc, VIC_CMD, VIC_CMD_INTR_ENABLE);
1228
1229 splx(s);
1230
1231 timeout_add(&sc->sc_tick, hz);
1232 }
1233
1234 void
1235 vic_stop(struct ifnet *ifp)
1236 {
1237 struct vic_softc *sc = (struct vic_softc *)ifp->if_softc;
1238 int s;
1239
1240 s = splnet();
1241
1242 timeout_del(&sc->sc_tick);
1243
1244 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1245
1246 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
1247 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1248
1249
1250 while (sc->sc_txpending > 0) {
1251 splx(s);
1252 delay(1000);
1253 s = splnet();
1254 }
1255
1256 sc->sc_data->vd_tx_stopped = 1;
1257
1258 vic_write(sc, VIC_CMD, VIC_CMD_INTR_DISABLE);
1259
1260 vic_iff(sc);
1261 vic_write(sc, VIC_DATA_ADDR, 0);
1262
1263 vic_uninit_data(sc);
1264
1265 splx(s);
1266 }
1267
1268 struct mbuf *
1269 vic_alloc_mbuf(struct vic_softc *sc, bus_dmamap_t map)
1270 {
1271 struct mbuf *m = NULL;
1272
1273 MGETHDR(m, M_DONTWAIT, MT_DATA);
1274 if (m == NULL)
1275 return (NULL);
1276
1277 MCLGET(m, M_DONTWAIT);
1278 if ((m->m_flags & M_EXT) == 0) {
1279 m_freem(m);
1280 return (NULL);
1281 }
1282 m->m_len = m->m_pkthdr.len = MCLBYTES;
1283
1284 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) {
1285 printf("%s: could not load mbuf DMA map", DEVNAME(sc));
1286 m_freem(m);
1287 return (NULL);
1288 }
1289
1290 return (m);
1291 }
1292
1293 void
1294 vic_tick(void *arg)
1295 {
1296 struct vic_softc *sc = (struct vic_softc *)arg;
1297
1298 vic_link_state(sc);
1299
1300 timeout_add(&sc->sc_tick, hz);
1301 }
1302
1303 u_int32_t
1304 vic_read(struct vic_softc *sc, bus_size_t r)
1305 {
1306 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1307 BUS_SPACE_BARRIER_READ);
1308 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r));
1309 }
1310
1311 void
1312 vic_write(struct vic_softc *sc, bus_size_t r, u_int32_t v)
1313 {
1314 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1315 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1316 BUS_SPACE_BARRIER_WRITE);
1317 }
1318
1319 u_int32_t
1320 vic_read_cmd(struct vic_softc *sc, u_int32_t cmd)
1321 {
1322 vic_write(sc, VIC_CMD, cmd);
1323 return (vic_read(sc, VIC_CMD));
1324 }
1325
1326 int
1327 vic_alloc_dmamem(struct vic_softc *sc)
1328 {
1329 int nsegs;
1330
1331 if (bus_dmamap_create(sc->sc_dmat, sc->sc_dma_size, 1,
1332 sc->sc_dma_size, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1333 &sc->sc_dma_map) != 0)
1334 goto err;
1335
1336 if (bus_dmamem_alloc(sc->sc_dmat, sc->sc_dma_size, 16, 0,
1337 &sc->sc_dma_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0)
1338 goto destroy;
1339
1340 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_seg, nsegs,
1341 sc->sc_dma_size, &sc->sc_dma_kva, BUS_DMA_NOWAIT) != 0)
1342 goto free;
1343
1344 if (bus_dmamap_load(sc->sc_dmat, sc->sc_dma_map, sc->sc_dma_kva,
1345 sc->sc_dma_size, NULL, BUS_DMA_NOWAIT) != 0)
1346 goto unmap;
1347
1348 bzero(sc->sc_dma_kva, sc->sc_dma_size);
1349
1350 return (0);
1351
1352 unmap:
1353 bus_dmamem_unmap(sc->sc_dmat, sc->sc_dma_kva, sc->sc_dma_size);
1354 free:
1355 bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_seg, 1);
1356 destroy:
1357 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dma_map);
1358 err:
1359 return (1);
1360 }
1361
1362 void
1363 vic_free_dmamem(struct vic_softc *sc)
1364 {
1365 bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_map);
1366 bus_dmamem_unmap(sc->sc_dmat, sc->sc_dma_kva, sc->sc_dma_size);
1367 bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_seg, 1);
1368 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dma_map);
1369 }