This source file includes following definitions.
- hme_config
- hme_tick
- hme_reset
- hme_stop
- hme_meminit
- hme_init
- hme_start
- hme_tint
- hme_rxcksum
- hme_rint
- hme_eint
- hme_intr
- hme_watchdog
- hme_mifinit
- hme_mii_readreg
- hme_mii_writereg
- hme_mii_statchg
- hme_mediachange
- hme_mediastatus
- hme_ioctl
- hme_shutdown
- hme_setladrf
- hme_encap
- hme_newbuf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44 #include "bpfilter.h"
45 #include "vlan.h"
46
47 #undef HMEDEBUG
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/kernel.h>
52 #include <sys/mbuf.h>
53 #include <sys/syslog.h>
54 #include <sys/socket.h>
55 #include <sys/device.h>
56 #include <sys/malloc.h>
57 #include <sys/ioctl.h>
58 #include <sys/errno.h>
59
60 #include <net/if.h>
61 #include <net/if_dl.h>
62 #include <net/if_media.h>
63
64 #ifdef INET
65 #include <netinet/in.h>
66 #include <netinet/in_systm.h>
67 #include <netinet/in_var.h>
68 #include <netinet/ip.h>
69 #include <netinet/if_ether.h>
70 #include <netinet/tcp.h>
71 #include <netinet/udp.h>
72 #endif
73
74 #if NBPFILTER > 0
75 #include <net/bpf.h>
76 #endif
77
78 #include <dev/mii/mii.h>
79 #include <dev/mii/miivar.h>
80
81 #include <machine/bus.h>
82
83 #include <dev/ic/hmereg.h>
84 #include <dev/ic/hmevar.h>
85
86 struct cfdriver hme_cd = {
87 NULL, "hme", DV_IFNET
88 };
89
90 #define HME_RX_OFFSET 2
91
92 void hme_start(struct ifnet *);
93 void hme_stop(struct hme_softc *);
94 int hme_ioctl(struct ifnet *, u_long, caddr_t);
95 void hme_tick(void *);
96 void hme_watchdog(struct ifnet *);
97 void hme_shutdown(void *);
98 void hme_init(struct hme_softc *);
99 void hme_meminit(struct hme_softc *);
100 void hme_mifinit(struct hme_softc *);
101 void hme_reset(struct hme_softc *);
102 void hme_setladrf(struct hme_softc *);
103 int hme_newbuf(struct hme_softc *, struct hme_sxd *, int);
104 int hme_encap(struct hme_softc *, struct mbuf *, int *);
105
106
107 static int hme_mii_readreg(struct device *, int, int);
108 static void hme_mii_writereg(struct device *, int, int, int);
109 static void hme_mii_statchg(struct device *);
110
111 int hme_mediachange(struct ifnet *);
112 void hme_mediastatus(struct ifnet *, struct ifmediareq *);
113
114 int hme_eint(struct hme_softc *, u_int);
115 int hme_rint(struct hme_softc *);
116 int hme_tint(struct hme_softc *);
117
118 void hme_rxcksum(struct mbuf *, u_int32_t);
119
120 void
121 hme_config(sc)
122 struct hme_softc *sc;
123 {
124 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
125 struct mii_data *mii = &sc->sc_mii;
126 struct mii_softc *child;
127 bus_dma_tag_t dmatag = sc->sc_dmatag;
128 bus_dma_segment_t seg;
129 bus_size_t size;
130 int rseg, error, i;
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159 hme_stop(sc);
160
161 for (i = 0; i < HME_TX_RING_SIZE; i++) {
162 if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1,
163 MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
164 &sc->sc_txd[i].sd_map) != 0) {
165 sc->sc_txd[i].sd_map = NULL;
166 goto fail;
167 }
168 }
169 for (i = 0; i < HME_RX_RING_SIZE; i++) {
170 if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1,
171 MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
172 &sc->sc_rxd[i].sd_map) != 0) {
173 sc->sc_rxd[i].sd_map = NULL;
174 goto fail;
175 }
176 }
177 if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1, MCLBYTES, 0,
178 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_rxmap_spare) != 0) {
179 sc->sc_rxmap_spare = NULL;
180 goto fail;
181 }
182
183
184
185
186
187
188
189
190 size = (HME_XD_SIZE * HME_RX_RING_MAX) +
191 (HME_XD_SIZE * HME_TX_RING_MAX);
192
193
194 if ((error = bus_dmamem_alloc(dmatag, size, 2048, 0, &seg, 1, &rseg,
195 BUS_DMA_NOWAIT)) != 0) {
196 printf("\n%s: DMA buffer alloc error %d\n",
197 sc->sc_dev.dv_xname, error);
198 return;
199 }
200
201
202 if ((error = bus_dmamem_map(dmatag, &seg, rseg, size,
203 &sc->sc_rb.rb_membase, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
204 printf("\n%s: DMA buffer map error %d\n",
205 sc->sc_dev.dv_xname, error);
206 bus_dmamap_unload(dmatag, sc->sc_dmamap);
207 bus_dmamem_free(dmatag, &seg, rseg);
208 return;
209 }
210
211 if ((error = bus_dmamap_create(dmatag, size, 1, size, 0,
212 BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
213 printf("\n%s: DMA map create error %d\n",
214 sc->sc_dev.dv_xname, error);
215 return;
216 }
217
218
219 if ((error = bus_dmamap_load(dmatag, sc->sc_dmamap,
220 sc->sc_rb.rb_membase, size, NULL,
221 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
222 printf("\n%s: DMA buffer map load error %d\n",
223 sc->sc_dev.dv_xname, error);
224 bus_dmamem_free(dmatag, &seg, rseg);
225 return;
226 }
227 sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr;
228
229 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
230
231
232 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof ifp->if_xname);
233 ifp->if_softc = sc;
234 ifp->if_start = hme_start;
235 ifp->if_ioctl = hme_ioctl;
236 ifp->if_watchdog = hme_watchdog;
237 ifp->if_flags =
238 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
239 sc->sc_if_flags = ifp->if_flags;
240 IFQ_SET_READY(&ifp->if_snd);
241 ifp->if_capabilities = IFCAP_VLAN_MTU;
242
243
244 mii->mii_ifp = ifp;
245 mii->mii_readreg = hme_mii_readreg;
246 mii->mii_writereg = hme_mii_writereg;
247 mii->mii_statchg = hme_mii_statchg;
248
249 ifmedia_init(&mii->mii_media, IFM_IMASK,
250 hme_mediachange, hme_mediastatus);
251
252 hme_mifinit(sc);
253
254 if (sc->sc_tcvr == -1)
255 mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
256 MII_OFFSET_ANY, 0);
257 else
258 mii_attach(&sc->sc_dev, mii, 0xffffffff, sc->sc_tcvr,
259 MII_OFFSET_ANY, 0);
260
261 child = LIST_FIRST(&mii->mii_phys);
262 if (child == NULL) {
263
264 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
265 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
266 } else {
267
268
269
270
271
272
273
274 for (; child != NULL; child = LIST_NEXT(child, mii_list)) {
275
276
277
278
279
280 if (child->mii_phy > 1 || child->mii_inst > 1) {
281 printf("%s: cannot accommodate MII device %s"
282 " at phy %d, instance %d\n",
283 sc->sc_dev.dv_xname,
284 child->mii_dev.dv_xname,
285 child->mii_phy, child->mii_inst);
286 continue;
287 }
288
289 sc->sc_phys[child->mii_inst] = child->mii_phy;
290 }
291
292
293
294
295
296 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
297 }
298
299
300 if_attach(ifp);
301 ether_ifattach(ifp);
302
303 sc->sc_sh = shutdownhook_establish(hme_shutdown, sc);
304 if (sc->sc_sh == NULL)
305 panic("hme_config: can't establish shutdownhook");
306
307 timeout_set(&sc->sc_tick_ch, hme_tick, sc);
308 return;
309
310 fail:
311 if (sc->sc_rxmap_spare != NULL)
312 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_rxmap_spare);
313 for (i = 0; i < HME_TX_RING_SIZE; i++)
314 if (sc->sc_txd[i].sd_map != NULL)
315 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_txd[i].sd_map);
316 for (i = 0; i < HME_RX_RING_SIZE; i++)
317 if (sc->sc_rxd[i].sd_map != NULL)
318 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_rxd[i].sd_map);
319 }
320
321 void
322 hme_tick(arg)
323 void *arg;
324 {
325 struct hme_softc *sc = arg;
326 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
327 bus_space_tag_t t = sc->sc_bustag;
328 bus_space_handle_t mac = sc->sc_mac;
329 int s;
330
331 s = splnet();
332
333
334
335 ifp->if_collisions +=
336 bus_space_read_4(t, mac, HME_MACI_NCCNT) +
337 bus_space_read_4(t, mac, HME_MACI_FCCNT) +
338 bus_space_read_4(t, mac, HME_MACI_EXCNT) +
339 bus_space_read_4(t, mac, HME_MACI_LTCNT);
340
341
342
343
344 bus_space_write_4(t, mac, HME_MACI_NCCNT, 0);
345 bus_space_write_4(t, mac, HME_MACI_FCCNT, 0);
346 bus_space_write_4(t, mac, HME_MACI_EXCNT, 0);
347 bus_space_write_4(t, mac, HME_MACI_LTCNT, 0);
348
349 mii_tick(&sc->sc_mii);
350 splx(s);
351
352 timeout_add(&sc->sc_tick_ch, hz);
353 }
354
355 void
356 hme_reset(sc)
357 struct hme_softc *sc;
358 {
359 int s;
360
361 s = splnet();
362 hme_init(sc);
363 splx(s);
364 }
365
366 void
367 hme_stop(sc)
368 struct hme_softc *sc;
369 {
370 bus_space_tag_t t = sc->sc_bustag;
371 bus_space_handle_t seb = sc->sc_seb;
372 int n;
373
374 timeout_del(&sc->sc_tick_ch);
375 mii_down(&sc->sc_mii);
376
377
378 bus_space_write_4(t, seb, HME_SEBI_IMASK, 0xffffffff);
379
380
381 bus_space_write_4(t, seb, HME_SEBI_RESET,
382 (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX));
383
384 for (n = 0; n < 20; n++) {
385 u_int32_t v = bus_space_read_4(t, seb, HME_SEBI_RESET);
386 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
387 break;
388 DELAY(20);
389 }
390 if (n >= 20)
391 printf("%s: hme_stop: reset failed\n", sc->sc_dev.dv_xname);
392
393 for (n = 0; n < HME_TX_RING_SIZE; n++) {
394 if (sc->sc_txd[n].sd_loaded) {
395 bus_dmamap_sync(sc->sc_dmatag, sc->sc_txd[n].sd_map,
396 0, sc->sc_txd[n].sd_map->dm_mapsize,
397 BUS_DMASYNC_POSTWRITE);
398 bus_dmamap_unload(sc->sc_dmatag, sc->sc_txd[n].sd_map);
399 sc->sc_txd[n].sd_loaded = 0;
400 }
401 if (sc->sc_txd[n].sd_mbuf != NULL) {
402 m_freem(sc->sc_txd[n].sd_mbuf);
403 sc->sc_txd[n].sd_mbuf = NULL;
404 }
405 }
406 }
407
408 void
409 hme_meminit(sc)
410 struct hme_softc *sc;
411 {
412 bus_addr_t dma;
413 caddr_t p;
414 unsigned int i;
415 struct hme_ring *hr = &sc->sc_rb;
416
417 p = hr->rb_membase;
418 dma = hr->rb_dmabase;
419
420
421
422
423 hr->rb_txd = p;
424 hr->rb_txddma = dma;
425 p += HME_TX_RING_SIZE * HME_XD_SIZE;
426 dma += HME_TX_RING_SIZE * HME_XD_SIZE;
427
428 dma = (bus_addr_t)roundup((u_long)dma, 2048);
429 p = (caddr_t)roundup((u_long)p, 2048);
430
431
432
433
434 hr->rb_rxd = p;
435 hr->rb_rxddma = dma;
436 p += HME_RX_RING_SIZE * HME_XD_SIZE;
437 dma += HME_RX_RING_SIZE * HME_XD_SIZE;
438
439 dma = (bus_addr_t)roundup((u_long)dma, 2048);
440 p = (caddr_t)roundup((u_long)p, 2048);
441
442
443
444
445 for (i = 0; i < HME_TX_RING_SIZE; i++) {
446 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, 0);
447 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0);
448 sc->sc_txd[i].sd_mbuf = NULL;
449 }
450
451
452
453
454 for (i = 0; i < HME_RX_RING_SIZE; i++) {
455 if (hme_newbuf(sc, &sc->sc_rxd[i], 1)) {
456 printf("%s: rx allocation failed\n",
457 sc->sc_dev.dv_xname);
458 break;
459 }
460 HME_XD_SETADDR(sc->sc_pci, hr->rb_rxd, i,
461 sc->sc_rxd[i].sd_map->dm_segs[0].ds_addr);
462 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_rxd, i,
463 HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_RX_PKTSIZE));
464 }
465
466 sc->sc_tx_prod = sc->sc_tx_cons = sc->sc_tx_cnt = 0;
467 sc->sc_last_rd = 0;
468 }
469
470
471
472
473
474 void
475 hme_init(sc)
476 struct hme_softc *sc;
477 {
478 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
479 bus_space_tag_t t = sc->sc_bustag;
480 bus_space_handle_t seb = sc->sc_seb;
481 bus_space_handle_t etx = sc->sc_etx;
482 bus_space_handle_t erx = sc->sc_erx;
483 bus_space_handle_t mac = sc->sc_mac;
484 u_int8_t *ea;
485 u_int32_t v, n;
486
487
488
489
490
491
492
493
494
495 hme_stop(sc);
496
497
498 hme_mifinit(sc);
499
500
501 if (sc->sc_hwreset)
502 (*sc->sc_hwreset)(sc);
503
504 #if 0
505
506 bus_space_write_4(t, mif, HME_MIFI_IMASK, 0xffff);
507 #endif
508
509
510 hme_meminit(sc);
511
512
513 bus_space_write_4(t, mac, HME_MACI_NCCNT, 0);
514 bus_space_write_4(t, mac, HME_MACI_FCCNT, 0);
515 bus_space_write_4(t, mac, HME_MACI_EXCNT, 0);
516 bus_space_write_4(t, mac, HME_MACI_LTCNT, 0);
517 bus_space_write_4(t, mac, HME_MACI_TXSIZE, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
518
519
520 ea = sc->sc_arpcom.ac_enaddr;
521 bus_space_write_4(t, mac, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
522 bus_space_write_4(t, mac, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
523 bus_space_write_4(t, mac, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
524
525
526
527
528
529 v = ((ea[4] << 8) | ea[5]) & 0x3fff;
530 bus_space_write_4(t, mac, HME_MACI_RANDSEED, v);
531
532
533
534
535
536
537 hme_setladrf(sc);
538
539
540 bus_space_write_4(t, etx, HME_ETXI_RING, sc->sc_rb.rb_txddma);
541 bus_space_write_4(t, etx, HME_ETXI_RSIZE, HME_TX_RING_SIZE);
542
543 bus_space_write_4(t, erx, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
544 bus_space_write_4(t, mac, HME_MACI_RXSIZE, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
545
546
547 bus_space_write_4(t, seb, HME_SEBI_IMASK,
548 ~(HME_SEB_STAT_HOSTTOTX | HME_SEB_STAT_RXTOHOST |
549 HME_SEB_STAT_TXALL | HME_SEB_STAT_TXPERR |
550 HME_SEB_STAT_RCNTEXP | HME_SEB_STAT_ALL_ERRORS));
551
552 switch (sc->sc_burst) {
553 default:
554 v = 0;
555 break;
556 case 16:
557 v = HME_SEB_CFG_BURST16;
558 break;
559 case 32:
560 v = HME_SEB_CFG_BURST32;
561 break;
562 case 64:
563 v = HME_SEB_CFG_BURST64;
564 break;
565 }
566 bus_space_write_4(t, seb, HME_SEBI_CFG, v);
567
568
569
570
571 v = bus_space_read_4(t, etx, HME_ETXI_CFG);
572 v |= HME_ETX_CFG_DMAENABLE;
573 bus_space_write_4(t, etx, HME_ETXI_CFG, v);
574
575
576 bus_space_write_4(t, etx, HME_ETXI_RSIZE, HME_TX_RING_SIZE / 16 - 1);
577
578
579 v = bus_space_read_4(t, erx, HME_ERXI_CFG);
580 v &= ~HME_ERX_CFG_RINGSIZE256;
581 #if HME_RX_RING_SIZE == 32
582 v |= HME_ERX_CFG_RINGSIZE32;
583 #elif HME_RX_RING_SIZE == 64
584 v |= HME_ERX_CFG_RINGSIZE64;
585 #elif HME_RX_RING_SIZE == 128
586 v |= HME_ERX_CFG_RINGSIZE128;
587 #elif HME_RX_RING_SIZE == 256
588 v |= HME_ERX_CFG_RINGSIZE256;
589 #else
590 # error "RX ring size must be 32, 64, 128, or 256"
591 #endif
592
593 v |= HME_ERX_CFG_DMAENABLE | (HME_RX_OFFSET << 3);
594
595 n = (ETHER_HDR_LEN + sizeof(struct ip)) / 2;
596 n = (n << HME_ERX_CFG_CSUM_SHIFT) & HME_ERX_CFG_CSUMSTART;
597 v |= n;
598 bus_space_write_4(t, erx, HME_ERXI_CFG, v);
599
600
601 v = bus_space_read_4(t, mac, HME_MACI_XIF);
602 v |= HME_MAC_XIF_OE;
603 bus_space_write_4(t, mac, HME_MACI_XIF, v);
604
605
606 v = bus_space_read_4(t, mac, HME_MACI_RXCFG);
607 v |= HME_MAC_RXCFG_ENABLE;
608 bus_space_write_4(t, mac, HME_MACI_RXCFG, v);
609
610
611 v = bus_space_read_4(t, mac, HME_MACI_TXCFG);
612 v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
613 bus_space_write_4(t, mac, HME_MACI_TXCFG, v);
614
615
616
617
618 if (sc->sc_hwinit)
619 (*sc->sc_hwinit)(sc);
620
621
622 mii_mediachg(&sc->sc_mii);
623
624
625 timeout_add(&sc->sc_tick_ch, hz);
626
627 ifp->if_flags |= IFF_RUNNING;
628 ifp->if_flags &= ~IFF_OACTIVE;
629 sc->sc_if_flags = ifp->if_flags;
630 ifp->if_timer = 0;
631 hme_start(ifp);
632 }
633
634 void
635 hme_start(ifp)
636 struct ifnet *ifp;
637 {
638 struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
639 struct mbuf *m;
640 int bix, cnt = 0;
641
642 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
643 return;
644
645 bix = sc->sc_tx_prod;
646 while (sc->sc_txd[bix].sd_mbuf == NULL) {
647 IFQ_POLL(&ifp->if_snd, m);
648 if (m == NULL)
649 break;
650
651 #if NBPFILTER > 0
652
653
654
655
656 if (ifp->if_bpf)
657 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
658 #endif
659
660 if (hme_encap(sc, m, &bix)) {
661 ifp->if_flags |= IFF_OACTIVE;
662 break;
663 }
664
665 IFQ_DEQUEUE(&ifp->if_snd, m);
666
667 bus_space_write_4(sc->sc_bustag, sc->sc_etx, HME_ETXI_PENDING,
668 HME_ETX_TP_DMAWAKEUP);
669 cnt++;
670 }
671
672 if (cnt != 0) {
673 sc->sc_tx_prod = bix;
674 ifp->if_timer = 5;
675 }
676 }
677
678
679
680
681 int
682 hme_tint(sc)
683 struct hme_softc *sc;
684 {
685 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
686 unsigned int ri, txflags;
687 struct hme_sxd *sd;
688 int cnt = sc->sc_tx_cnt;
689
690
691 ri = sc->sc_tx_cons;
692 sd = &sc->sc_txd[ri];
693
694 for (;;) {
695 if (cnt <= 0)
696 break;
697
698 txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri);
699
700 if (txflags & HME_XD_OWN)
701 break;
702
703 ifp->if_flags &= ~IFF_OACTIVE;
704 if (txflags & HME_XD_EOP)
705 ifp->if_opackets++;
706
707 bus_dmamap_sync(sc->sc_dmatag, sd->sd_map,
708 0, sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
709 bus_dmamap_unload(sc->sc_dmatag, sd->sd_map);
710 sd->sd_loaded = 0;
711
712 if (sd->sd_mbuf != NULL) {
713 m_freem(sd->sd_mbuf);
714 sd->sd_mbuf = NULL;
715 }
716
717 if (++ri == HME_TX_RING_SIZE) {
718 ri = 0;
719 sd = sc->sc_txd;
720 } else
721 sd++;
722
723 --cnt;
724 }
725
726 sc->sc_tx_cnt = cnt;
727 ifp->if_timer = cnt > 0 ? 5 : 0;
728
729
730 sc->sc_tx_cons = ri;
731
732 hme_start(ifp);
733
734 return (1);
735 }
736
737
738
739
740
741
742
743
744
745
746 void
747 hme_rxcksum(struct mbuf *m, u_int32_t flags)
748 {
749 struct ether_header *eh;
750 struct ip *ip;
751 struct udphdr *uh;
752 int32_t hlen, len, pktlen;
753 u_int16_t cksum, *opts;
754 u_int32_t temp32;
755 union pseudoh {
756 struct hdr {
757 u_int16_t len;
758 u_int8_t ttl;
759 u_int8_t proto;
760 u_int32_t src;
761 u_int32_t dst;
762 } h;
763 u_int16_t w[6];
764 } ph;
765
766 pktlen = m->m_pkthdr.len;
767 if (pktlen < sizeof(struct ether_header))
768 return;
769 eh = mtod(m, struct ether_header *);
770 if (eh->ether_type != htons(ETHERTYPE_IP))
771 return;
772 ip = (struct ip *)(eh + 1);
773 if (ip->ip_v != IPVERSION)
774 return;
775
776 hlen = ip->ip_hl << 2;
777 pktlen -= sizeof(struct ether_header);
778 if (hlen < sizeof(struct ip))
779 return;
780 if (ntohs(ip->ip_len) < hlen)
781 return;
782 if (ntohs(ip->ip_len) != pktlen)
783 return;
784 if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
785 return;
786
787 switch (ip->ip_p) {
788 case IPPROTO_TCP:
789 if (pktlen < (hlen + sizeof(struct tcphdr)))
790 return;
791 break;
792 case IPPROTO_UDP:
793 if (pktlen < (hlen + sizeof(struct udphdr)))
794 return;
795 uh = (struct udphdr *)((caddr_t)ip + hlen);
796 if (uh->uh_sum == 0)
797 return;
798 break;
799 default:
800 return;
801 }
802
803 cksum = htons(~(flags & HME_XD_RXCKSUM));
804
805 len = hlen - sizeof(struct ip);
806 if (len > 0) {
807 opts = (u_int16_t *)(ip + 1);
808 for (; len > 0; len -= sizeof(u_int16_t), opts++) {
809 temp32 = cksum - *opts;
810 temp32 = (temp32 >> 16) + (temp32 & 65535);
811 cksum = temp32 & 65535;
812 }
813 }
814
815 ph.h.len = htons(ntohs(ip->ip_len) - hlen);
816 ph.h.ttl = 0;
817 ph.h.proto = ip->ip_p;
818 ph.h.src = ip->ip_src.s_addr;
819 ph.h.dst = ip->ip_dst.s_addr;
820 temp32 = cksum;
821 opts = &ph.w[0];
822 temp32 += opts[0] + opts[1] + opts[2] + opts[3] + opts[4] + opts[5];
823 temp32 = (temp32 >> 16) + (temp32 & 65535);
824 temp32 += (temp32 >> 16);
825 cksum = ~temp32;
826 if (cksum == 0) {
827 m->m_pkthdr.csum_flags |=
828 M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
829 }
830 }
831
832
833
834
835 int
836 hme_rint(sc)
837 struct hme_softc *sc;
838 {
839 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
840 struct mbuf *m;
841 struct hme_sxd *sd;
842 unsigned int ri, len;
843 u_int32_t flags;
844
845 ri = sc->sc_last_rd;
846 sd = &sc->sc_rxd[ri];
847
848
849
850
851 for (;;) {
852 flags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri);
853 if (flags & HME_XD_OWN)
854 break;
855
856 if (flags & HME_XD_OFL) {
857 printf("%s: buffer overflow, ri=%d; flags=0x%x\n",
858 sc->sc_dev.dv_xname, ri, flags);
859 goto again;
860 }
861
862 m = sd->sd_mbuf;
863 len = HME_XD_DECODE_RSIZE(flags);
864 m->m_pkthdr.len = m->m_len = len;
865
866 if (hme_newbuf(sc, sd, 0)) {
867
868
869
870
871 ifp->if_ierrors++;
872 goto again;
873 }
874
875 ifp->if_ipackets++;
876 hme_rxcksum(m, flags);
877
878 #if NBPFILTER > 0
879 if (ifp->if_bpf)
880 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
881 #endif
882
883 ether_input_mbuf(ifp, m);
884
885 again:
886 HME_XD_SETADDR(sc->sc_pci, sc->sc_rb.rb_rxd, ri,
887 sd->sd_map->dm_segs[0].ds_addr);
888 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri,
889 HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_RX_PKTSIZE));
890
891 if (++ri == HME_RX_RING_SIZE) {
892 ri = 0;
893 sd = sc->sc_rxd;
894 } else
895 sd++;
896 }
897
898 sc->sc_last_rd = ri;
899 return (1);
900 }
901
902 int
903 hme_eint(sc, status)
904 struct hme_softc *sc;
905 u_int status;
906 {
907 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
908
909 if (status & HME_SEB_STAT_MIFIRQ) {
910 printf("%s: XXXlink status changed\n", sc->sc_dev.dv_xname);
911 status &= ~HME_SEB_STAT_MIFIRQ;
912 }
913
914 if (status & HME_SEB_STAT_DTIMEXP) {
915 ifp->if_oerrors++;
916 status &= ~HME_SEB_STAT_DTIMEXP;
917 }
918
919 if (status & HME_SEB_STAT_NORXD) {
920 ifp->if_ierrors++;
921 status &= ~HME_SEB_STAT_NORXD;
922 }
923
924 status &= ~(HME_SEB_STAT_RXTOHOST | HME_SEB_STAT_GOTFRAME |
925 HME_SEB_STAT_SENTFRAME | HME_SEB_STAT_HOSTTOTX |
926 HME_SEB_STAT_TXALL);
927
928 if (status == 0)
929 return (1);
930
931 #ifdef HME_DEBUG
932 printf("%s: status=%b\n", sc->sc_dev.dv_xname, status, HME_SEB_STAT_BITS);
933 #endif
934 return (1);
935 }
936
937 int
938 hme_intr(v)
939 void *v;
940 {
941 struct hme_softc *sc = (struct hme_softc *)v;
942 bus_space_tag_t t = sc->sc_bustag;
943 bus_space_handle_t seb = sc->sc_seb;
944 u_int32_t status;
945 int r = 0;
946
947 status = bus_space_read_4(t, seb, HME_SEBI_STAT);
948
949 if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
950 r |= hme_eint(sc, status);
951
952 if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
953 r |= hme_tint(sc);
954
955 if ((status & HME_SEB_STAT_RXTOHOST) != 0)
956 r |= hme_rint(sc);
957
958 return (r);
959 }
960
961
962 void
963 hme_watchdog(ifp)
964 struct ifnet *ifp;
965 {
966 struct hme_softc *sc = ifp->if_softc;
967
968 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
969 ifp->if_oerrors++;
970
971 hme_reset(sc);
972 }
973
974
975
976
977 void
978 hme_mifinit(sc)
979 struct hme_softc *sc;
980 {
981 bus_space_tag_t t = sc->sc_bustag;
982 bus_space_handle_t mif = sc->sc_mif;
983 bus_space_handle_t mac = sc->sc_mac;
984 int phy;
985 u_int32_t v;
986
987 v = bus_space_read_4(t, mif, HME_MIFI_CFG);
988 phy = HME_PHYAD_EXTERNAL;
989 if (v & HME_MIF_CFG_MDI1)
990 phy = sc->sc_tcvr = HME_PHYAD_EXTERNAL;
991 else if (v & HME_MIF_CFG_MDI0)
992 phy = sc->sc_tcvr = HME_PHYAD_INTERNAL;
993 else
994 sc->sc_tcvr = -1;
995
996
997 v = 0;
998 if (phy == HME_PHYAD_EXTERNAL)
999 v |= HME_MIF_CFG_PHY;
1000 bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1001
1002
1003 v = bus_space_read_4(t, mac, HME_MACI_XIF);
1004 v &= ~HME_MAC_XIF_MIIENABLE;
1005 if (phy == HME_PHYAD_EXTERNAL)
1006 v |= HME_MAC_XIF_MIIENABLE;
1007 bus_space_write_4(t, mac, HME_MACI_XIF, v);
1008 }
1009
1010
1011
1012
1013 static int
1014 hme_mii_readreg(self, phy, reg)
1015 struct device *self;
1016 int phy, reg;
1017 {
1018 struct hme_softc *sc = (struct hme_softc *)self;
1019 bus_space_tag_t t = sc->sc_bustag;
1020 bus_space_handle_t mif = sc->sc_mif;
1021 bus_space_handle_t mac = sc->sc_mac;
1022 u_int32_t v, xif_cfg, mifi_cfg;
1023 int n;
1024
1025 if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL)
1026 return (0);
1027
1028
1029 v = mifi_cfg = bus_space_read_4(t, mif, HME_MIFI_CFG);
1030 v &= ~HME_MIF_CFG_PHY;
1031 if (phy == HME_PHYAD_EXTERNAL)
1032 v |= HME_MIF_CFG_PHY;
1033 bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1034
1035
1036 v = xif_cfg = bus_space_read_4(t, mac, HME_MACI_XIF);
1037 if (phy == HME_PHYAD_EXTERNAL)
1038 v |= HME_MAC_XIF_MIIENABLE;
1039 else
1040 v &= ~HME_MAC_XIF_MIIENABLE;
1041 bus_space_write_4(t, mac, HME_MACI_XIF, v);
1042
1043
1044 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1045 HME_MIF_FO_TAMSB |
1046 (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
1047 (phy << HME_MIF_FO_PHYAD_SHIFT) |
1048 (reg << HME_MIF_FO_REGAD_SHIFT);
1049
1050 bus_space_write_4(t, mif, HME_MIFI_FO, v);
1051 for (n = 0; n < 100; n++) {
1052 DELAY(1);
1053 v = bus_space_read_4(t, mif, HME_MIFI_FO);
1054 if (v & HME_MIF_FO_TALSB) {
1055 v &= HME_MIF_FO_DATA;
1056 goto out;
1057 }
1058 }
1059
1060 v = 0;
1061 printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname);
1062
1063 out:
1064
1065 bus_space_write_4(t, mif, HME_MIFI_CFG, mifi_cfg);
1066
1067 bus_space_write_4(t, mac, HME_MACI_XIF, xif_cfg);
1068 return (v);
1069 }
1070
1071 static void
1072 hme_mii_writereg(self, phy, reg, val)
1073 struct device *self;
1074 int phy, reg, val;
1075 {
1076 struct hme_softc *sc = (void *)self;
1077 bus_space_tag_t t = sc->sc_bustag;
1078 bus_space_handle_t mif = sc->sc_mif;
1079 bus_space_handle_t mac = sc->sc_mac;
1080 u_int32_t v, xif_cfg, mifi_cfg;
1081 int n;
1082
1083
1084 if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL)
1085 return;
1086
1087
1088 v = mifi_cfg = bus_space_read_4(t, mif, HME_MIFI_CFG);
1089 v &= ~HME_MIF_CFG_PHY;
1090 if (phy == HME_PHYAD_EXTERNAL)
1091 v |= HME_MIF_CFG_PHY;
1092 bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1093
1094
1095 v = xif_cfg = bus_space_read_4(t, mac, HME_MACI_XIF);
1096 if (phy == HME_PHYAD_EXTERNAL)
1097 v |= HME_MAC_XIF_MIIENABLE;
1098 else
1099 v &= ~HME_MAC_XIF_MIIENABLE;
1100 bus_space_write_4(t, mac, HME_MACI_XIF, v);
1101
1102
1103 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1104 HME_MIF_FO_TAMSB |
1105 (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) |
1106 (phy << HME_MIF_FO_PHYAD_SHIFT) |
1107 (reg << HME_MIF_FO_REGAD_SHIFT) |
1108 (val & HME_MIF_FO_DATA);
1109
1110 bus_space_write_4(t, mif, HME_MIFI_FO, v);
1111 for (n = 0; n < 100; n++) {
1112 DELAY(1);
1113 v = bus_space_read_4(t, mif, HME_MIFI_FO);
1114 if (v & HME_MIF_FO_TALSB)
1115 goto out;
1116 }
1117
1118 printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname);
1119 out:
1120
1121 bus_space_write_4(t, mif, HME_MIFI_CFG, mifi_cfg);
1122
1123 bus_space_write_4(t, mac, HME_MACI_XIF, xif_cfg);
1124 }
1125
1126 static void
1127 hme_mii_statchg(dev)
1128 struct device *dev;
1129 {
1130 struct hme_softc *sc = (void *)dev;
1131 bus_space_tag_t t = sc->sc_bustag;
1132 bus_space_handle_t mac = sc->sc_mac;
1133 u_int32_t v;
1134
1135 #ifdef HMEDEBUG
1136 if (sc->sc_debug)
1137 printf("hme_mii_statchg: status change\n", phy);
1138 #endif
1139
1140
1141
1142
1143 v = bus_space_read_4(t, mac, HME_MACI_TXCFG);
1144 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) {
1145 v |= HME_MAC_TXCFG_FULLDPLX;
1146 sc->sc_arpcom.ac_if.if_flags |= IFF_SIMPLEX;
1147 } else {
1148 v &= ~HME_MAC_TXCFG_FULLDPLX;
1149 sc->sc_arpcom.ac_if.if_flags &= ~IFF_SIMPLEX;
1150 }
1151 sc->sc_if_flags = sc->sc_arpcom.ac_if.if_flags;
1152 bus_space_write_4(t, mac, HME_MACI_TXCFG, v);
1153 }
1154
1155 int
1156 hme_mediachange(ifp)
1157 struct ifnet *ifp;
1158 {
1159 struct hme_softc *sc = ifp->if_softc;
1160 bus_space_tag_t t = sc->sc_bustag;
1161 bus_space_handle_t mif = sc->sc_mif;
1162 bus_space_handle_t mac = sc->sc_mac;
1163 int instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media);
1164 int phy = sc->sc_phys[instance];
1165 u_int32_t v;
1166
1167 #ifdef HMEDEBUG
1168 if (sc->sc_debug)
1169 printf("hme_mediachange: phy = %d\n", phy);
1170 #endif
1171 if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
1172 return (EINVAL);
1173
1174
1175 v = bus_space_read_4(t, mif, HME_MIFI_CFG);
1176 v &= ~HME_MIF_CFG_PHY;
1177 if (phy == HME_PHYAD_EXTERNAL)
1178 v |= HME_MIF_CFG_PHY;
1179 bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1180
1181
1182 v = bus_space_read_4(t, mac, HME_MACI_XIF);
1183 v &= ~HME_MAC_XIF_MIIENABLE;
1184 if (phy == HME_PHYAD_EXTERNAL)
1185 v |= HME_MAC_XIF_MIIENABLE;
1186 bus_space_write_4(t, mac, HME_MACI_XIF, v);
1187
1188 return (mii_mediachg(&sc->sc_mii));
1189 }
1190
1191 void
1192 hme_mediastatus(ifp, ifmr)
1193 struct ifnet *ifp;
1194 struct ifmediareq *ifmr;
1195 {
1196 struct hme_softc *sc = ifp->if_softc;
1197
1198 if ((ifp->if_flags & IFF_UP) == 0)
1199 return;
1200
1201 mii_pollstat(&sc->sc_mii);
1202 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1203 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1204 }
1205
1206
1207
1208
1209 int
1210 hme_ioctl(ifp, cmd, data)
1211 struct ifnet *ifp;
1212 u_long cmd;
1213 caddr_t data;
1214 {
1215 struct hme_softc *sc = ifp->if_softc;
1216 struct ifaddr *ifa = (struct ifaddr *)data;
1217 struct ifreq *ifr = (struct ifreq *)data;
1218 int s, error = 0;
1219
1220 s = splnet();
1221
1222 if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) {
1223 splx(s);
1224 return (error);
1225 }
1226
1227 switch (cmd) {
1228
1229 case SIOCSIFADDR:
1230 switch (ifa->ifa_addr->sa_family) {
1231 #ifdef INET
1232 case AF_INET:
1233 if (ifp->if_flags & IFF_UP)
1234 hme_setladrf(sc);
1235 else {
1236 ifp->if_flags |= IFF_UP;
1237 hme_init(sc);
1238 }
1239 arp_ifinit(&sc->sc_arpcom, ifa);
1240 break;
1241 #endif
1242 default:
1243 hme_init(sc);
1244 break;
1245 }
1246 break;
1247
1248 case SIOCSIFFLAGS:
1249 if ((ifp->if_flags & IFF_UP) == 0 &&
1250 (ifp->if_flags & IFF_RUNNING) != 0) {
1251
1252
1253
1254
1255 hme_stop(sc);
1256 ifp->if_flags &= ~IFF_RUNNING;
1257 } else if ((ifp->if_flags & IFF_UP) != 0 &&
1258 (ifp->if_flags & IFF_RUNNING) == 0) {
1259
1260
1261
1262
1263 hme_init(sc);
1264 } else if ((ifp->if_flags & IFF_UP) != 0) {
1265
1266
1267
1268
1269
1270 #define RESETIGN (IFF_CANTCHANGE | IFF_DEBUG)
1271 if (ifp->if_flags == sc->sc_if_flags)
1272 break;
1273 if ((ifp->if_flags & (~RESETIGN))
1274 == (sc->sc_if_flags & (~RESETIGN)))
1275 hme_setladrf(sc);
1276 else
1277 hme_init(sc);
1278 #undef RESETIGN
1279 }
1280 #ifdef HMEDEBUG
1281 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
1282 #endif
1283 break;
1284
1285 case SIOCADDMULTI:
1286 case SIOCDELMULTI:
1287 error = (cmd == SIOCADDMULTI) ?
1288 ether_addmulti(ifr, &sc->sc_arpcom) :
1289 ether_delmulti(ifr, &sc->sc_arpcom);
1290
1291 if (error == ENETRESET) {
1292
1293
1294
1295
1296 if (ifp->if_flags & IFF_RUNNING)
1297 hme_setladrf(sc);
1298 error = 0;
1299 }
1300 break;
1301
1302 case SIOCGIFMEDIA:
1303 case SIOCSIFMEDIA:
1304 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1305 break;
1306
1307 default:
1308 error = ENOTTY;
1309 break;
1310 }
1311
1312 sc->sc_if_flags = ifp->if_flags;
1313 splx(s);
1314 return (error);
1315 }
1316
1317 void
1318 hme_shutdown(arg)
1319 void *arg;
1320 {
1321 hme_stop((struct hme_softc *)arg);
1322 }
1323
1324
1325
1326
1327 void
1328 hme_setladrf(sc)
1329 struct hme_softc *sc;
1330 {
1331 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1332 struct ether_multi *enm;
1333 struct ether_multistep step;
1334 struct arpcom *ac = &sc->sc_arpcom;
1335 bus_space_tag_t t = sc->sc_bustag;
1336 bus_space_handle_t mac = sc->sc_mac;
1337 u_int32_t hash[4];
1338 u_int32_t v, crc;
1339
1340
1341 hash[3] = hash[2] = hash[1] = hash[0] = 0;
1342
1343
1344 v = bus_space_read_4(t, mac, HME_MACI_RXCFG);
1345
1346 if ((ifp->if_flags & IFF_PROMISC) != 0) {
1347
1348 v |= HME_MAC_RXCFG_PMISC;
1349 v &= ~HME_MAC_RXCFG_HENABLE;
1350 ifp->if_flags |= IFF_ALLMULTI;
1351 goto chipit;
1352 }
1353
1354
1355 v &= ~HME_MAC_RXCFG_PMISC;
1356 v |= HME_MAC_RXCFG_HENABLE;
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366 ETHER_FIRST_MULTI(step, ac, enm);
1367 while (enm != NULL) {
1368 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1369
1370
1371
1372
1373
1374
1375
1376
1377 hash[3] = hash[2] = hash[1] = hash[0] = 0xffff;
1378 ifp->if_flags |= IFF_ALLMULTI;
1379 goto chipit;
1380 }
1381
1382 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN)>> 26;
1383
1384
1385 hash[crc >> 4] |= 1 << (crc & 0xf);
1386
1387 ETHER_NEXT_MULTI(step, enm);
1388 }
1389
1390 ifp->if_flags &= ~IFF_ALLMULTI;
1391
1392 chipit:
1393
1394 bus_space_write_4(t, mac, HME_MACI_HASHTAB0, hash[0]);
1395 bus_space_write_4(t, mac, HME_MACI_HASHTAB1, hash[1]);
1396 bus_space_write_4(t, mac, HME_MACI_HASHTAB2, hash[2]);
1397 bus_space_write_4(t, mac, HME_MACI_HASHTAB3, hash[3]);
1398 bus_space_write_4(t, mac, HME_MACI_RXCFG, v);
1399 }
1400
1401 int
1402 hme_encap(sc, mhead, bixp)
1403 struct hme_softc *sc;
1404 struct mbuf *mhead;
1405 int *bixp;
1406 {
1407 struct hme_sxd *sd;
1408 struct mbuf *m;
1409 int frag, cur, cnt = 0;
1410 u_int32_t flags;
1411 struct hme_ring *hr = &sc->sc_rb;
1412
1413 cur = frag = *bixp;
1414 sd = &sc->sc_txd[frag];
1415
1416 for (m = mhead; m != NULL; m = m->m_next) {
1417 if (m->m_len == 0)
1418 continue;
1419
1420 if ((HME_TX_RING_SIZE - (sc->sc_tx_cnt + cnt)) < 5)
1421 goto err;
1422
1423 if (bus_dmamap_load(sc->sc_dmatag, sd->sd_map,
1424 mtod(m, caddr_t), m->m_len, NULL, BUS_DMA_NOWAIT) != 0)
1425 goto err;
1426
1427 sd->sd_loaded = 1;
1428 bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0,
1429 sd->sd_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1430
1431 sd->sd_mbuf = NULL;
1432
1433 flags = HME_XD_ENCODE_TSIZE(m->m_len);
1434 if (cnt == 0)
1435 flags |= HME_XD_SOP;
1436 else
1437 flags |= HME_XD_OWN;
1438
1439 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, frag,
1440 sd->sd_map->dm_segs[0].ds_addr);
1441 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, frag, flags);
1442
1443 cur = frag;
1444 cnt++;
1445 if (++frag == HME_TX_RING_SIZE) {
1446 frag = 0;
1447 sd = sc->sc_txd;
1448 } else
1449 sd++;
1450 }
1451
1452
1453 flags = HME_XD_GETFLAGS(sc->sc_pci, hr->rb_txd, cur);
1454 flags |= HME_XD_EOP;
1455 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, cur, flags);
1456 sc->sc_txd[cur].sd_mbuf = mhead;
1457
1458
1459 flags = HME_XD_GETFLAGS(sc->sc_pci, hr->rb_txd, (*bixp));
1460 flags |= HME_XD_OWN;
1461 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, (*bixp), flags);
1462
1463 sc->sc_tx_cnt += cnt;
1464 *bixp = frag;
1465
1466
1467
1468 return (0);
1469
1470 err:
1471
1472
1473
1474
1475 for (; cnt > 0; cnt--) {
1476 if (--frag == -1)
1477 frag = HME_TX_RING_SIZE - 1;
1478 sd = &sc->sc_txd[frag];
1479 bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0,
1480 sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1481 bus_dmamap_unload(sc->sc_dmatag, sd->sd_map);
1482 sd->sd_loaded = 0;
1483 sd->sd_mbuf = NULL;
1484 }
1485 return (ENOBUFS);
1486 }
1487
1488 int
1489 hme_newbuf(sc, d, freeit)
1490 struct hme_softc *sc;
1491 struct hme_sxd *d;
1492 int freeit;
1493 {
1494 struct mbuf *m;
1495 bus_dmamap_t map;
1496
1497
1498
1499
1500
1501
1502 MGETHDR(m, M_DONTWAIT, MT_DATA);
1503 if (m == NULL)
1504 return (ENOBUFS);
1505 m->m_pkthdr.rcvif = &sc->sc_arpcom.ac_if;
1506
1507 MCLGET(m, M_DONTWAIT);
1508 if ((m->m_flags & M_EXT) == 0) {
1509 m_freem(m);
1510 return (ENOBUFS);
1511 }
1512
1513 if (bus_dmamap_load(sc->sc_dmatag, sc->sc_rxmap_spare,
1514 mtod(m, caddr_t), MCLBYTES - HME_RX_OFFSET, NULL,
1515 BUS_DMA_NOWAIT) != 0) {
1516 m_freem(m);
1517 return (ENOBUFS);
1518 }
1519
1520
1521
1522
1523
1524
1525
1526 if (d->sd_loaded) {
1527 bus_dmamap_sync(sc->sc_dmatag, d->sd_map,
1528 0, d->sd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1529 bus_dmamap_unload(sc->sc_dmatag, d->sd_map);
1530 d->sd_loaded = 0;
1531 }
1532
1533 if ((d->sd_mbuf != NULL) && freeit) {
1534 m_freem(d->sd_mbuf);
1535 d->sd_mbuf = NULL;
1536 }
1537
1538 map = d->sd_map;
1539 d->sd_map = sc->sc_rxmap_spare;
1540 sc->sc_rxmap_spare = map;
1541
1542 d->sd_loaded = 1;
1543
1544 bus_dmamap_sync(sc->sc_dmatag, d->sd_map, 0, d->sd_map->dm_mapsize,
1545 BUS_DMASYNC_PREREAD);
1546
1547 m->m_data += HME_RX_OFFSET;
1548 d->sd_mbuf = m;
1549 return (0);
1550 }