This source file includes following definitions.
- pif_wcsr
- pif_rcsr
- txp_wcsr
- pif_wkey
- xge_match
- xge_attach
- xge_ifmedia_status
- xge_xgmii_mediachange
- xge_enable
- xge_init
- xge_stop
- xge_shutdown
- xge_intr
- xge_ioctl
- xge_setmulti
- xge_setpromisc
- xge_start
- xge_alloc_txmem
- xge_alloc_rxmem
- xge_add_rxbuf
- xge_setup_xgxs_xena
- xge_setup_xgxs_herc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42 #include "bpfilter.h"
43 #include "vlan.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/sockio.h>
48 #include <sys/mbuf.h>
49 #include <sys/malloc.h>
50 #include <sys/kernel.h>
51 #include <sys/socket.h>
52 #include <sys/device.h>
53
54 #include <net/if.h>
55 #include <net/if_dl.h>
56 #include <net/if_media.h>
57
58 #ifdef INET
59 #include <netinet/in.h>
60 #include <netinet/in_systm.h>
61 #include <netinet/in_var.h>
62 #include <netinet/ip.h>
63 #include <netinet/if_ether.h>
64 #include <netinet/tcp.h>
65 #include <netinet/udp.h>
66 #endif
67
68 #if NVLAN > 0
69 #include <net/if_types.h>
70 #include <net/if_vlan_var.h>
71 #endif
72
73 #if NBPFILTER > 0
74 #include <net/bpf.h>
75 #endif
76
77 #include <machine/bus.h>
78 #include <machine/intr.h>
79 #include <machine/endian.h>
80
81 #include <dev/mii/mii.h>
82 #include <dev/mii/miivar.h>
83
84 #include <dev/pci/pcivar.h>
85 #include <dev/pci/pcireg.h>
86 #include <dev/pci/pcidevs.h>
87
88 #include <sys/lock.h>
89 #include <sys/proc.h>
90
91 #include <dev/pci/if_xgereg.h>
92
93
94 #define XGE_TYPE_XENA 1
95 #define XGE_TYPE_HERC 2
96
97 #define XGE_PCISIZE_XENA 26
98 #define XGE_PCISIZE_HERC 64
99
100
101
102
103 #define RX_MODE RX_MODE_1
104 #define NRXDESCS 1016
105 #define NTXDESCS 2048
106 #define NTXFRAGS 100
107
108
109
110
111 #define RX_MODE_1 1
112 #define RX_MODE_3 3
113 #define RX_MODE_5 5
114
115
116
117
118 #define XCONCAT3(x,y,z) x ## y ## z
119 #define CONCAT3(x,y,z) XCONCAT3(x,y,z)
120 #define NDESC_BUFMODE CONCAT3(NDESC_,RX_MODE,BUFMODE)
121 #define rxd_4k CONCAT3(rxd,RX_MODE,_4k)
122
123 #if 0
124 #define rxdesc ___CONCAT(rxd,RX_MODE)
125 #endif
126 #define rxdesc rxd1
127
128 #define NEXTTX(x) (((x)+1) % NTXDESCS)
129 #define NRXFRAGS RX_MODE
130 #define NRXPAGES ((NRXDESCS/NDESC_BUFMODE)+1)
131 #define NRXREAL (NRXPAGES*NDESC_BUFMODE)
132 #define RXMAPSZ (NRXPAGES*PAGE_SIZE)
133
134
135
136
137
138 static uint64_t fix_mac[] = {
139 0x0060000000000000ULL, 0x0060600000000000ULL,
140 0x0040600000000000ULL, 0x0000600000000000ULL,
141 0x0020600000000000ULL, 0x0060600000000000ULL,
142 0x0020600000000000ULL, 0x0060600000000000ULL,
143 0x0020600000000000ULL, 0x0060600000000000ULL,
144 0x0020600000000000ULL, 0x0060600000000000ULL,
145 0x0020600000000000ULL, 0x0060600000000000ULL,
146 0x0020600000000000ULL, 0x0060600000000000ULL,
147 0x0020600000000000ULL, 0x0060600000000000ULL,
148 0x0020600000000000ULL, 0x0060600000000000ULL,
149 0x0020600000000000ULL, 0x0060600000000000ULL,
150 0x0020600000000000ULL, 0x0060600000000000ULL,
151 0x0020600000000000ULL, 0x0000600000000000ULL,
152 0x0040600000000000ULL, 0x0060600000000000ULL,
153 };
154
155
156
157
158
159 #define END_SIGN 0x0
160 static uint64_t herc_dtx_cfg[] = {
161 0x8000051536750000ULL, 0x80000515367500E0ULL,
162 0x8000051536750004ULL, 0x80000515367500E4ULL,
163
164 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
165 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
166
167 0x801205150D440000ULL, 0x801205150D4400E0ULL,
168 0x801205150D440004ULL, 0x801205150D4400E4ULL,
169
170 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
171 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
172
173 END_SIGN
174 };
175
176 struct xge_softc {
177 struct device sc_dev;
178 struct arpcom sc_arpcom;
179 struct ifmedia xena_media;
180
181 void *sc_ih;
182 void *sc_shutdownhook;
183
184 bus_dma_tag_t sc_dmat;
185 bus_space_tag_t sc_st;
186 bus_space_handle_t sc_sh;
187 bus_space_tag_t sc_txt;
188 bus_space_handle_t sc_txh;
189
190 pcireg_t sc_pciregs[16];
191
192 int xge_type;
193 int xge_if_flags;
194
195
196 struct txd *sc_txd[NTXDESCS];
197 bus_addr_t sc_txdp[NTXDESCS];
198 bus_dmamap_t sc_txm[NTXDESCS];
199 struct mbuf *sc_txb[NTXDESCS];
200 int sc_nexttx, sc_lasttx;
201 bus_dmamap_t sc_txmap;
202
203
204 bus_dmamap_t sc_rxmap;
205 struct rxd_4k *sc_rxd_4k[NRXPAGES];
206 bus_dmamap_t sc_rxm[NRXREAL];
207 struct mbuf *sc_rxb[NRXREAL];
208 int sc_nextrx;
209 };
210
211 #ifdef XGE_DEBUG
212 #define DPRINTF(x) do { if (xgedebug) printf x ; } while (0)
213 #define DPRINTFN(n,x) do { if (xgedebug >= (n)) printf x ; } while (0)
214 int xgedebug = 0;
215 #else
216 #define DPRINTF(x)
217 #define DPRINTFN(n,x)
218 #endif
219
220 int xge_match(struct device *, void *, void *);
221 void xge_attach(struct device *, struct device *, void *);
222 int xge_alloc_txmem(struct xge_softc *);
223 int xge_alloc_rxmem(struct xge_softc *);
224 void xge_start(struct ifnet *);
225 void xge_stop(struct ifnet *, int);
226 void xge_shutdown(void *);
227 int xge_add_rxbuf(struct xge_softc *, int);
228 void xge_setmulti(struct xge_softc *);
229 void xge_setpromisc(struct xge_softc *);
230 int xge_setup_xgxs_xena(struct xge_softc *);
231 int xge_setup_xgxs_herc(struct xge_softc *);
232 int xge_ioctl(struct ifnet *, u_long, caddr_t);
233 int xge_init(struct ifnet *);
234 void xge_ifmedia_status(struct ifnet *, struct ifmediareq *);
235 int xge_xgmii_mediachange(struct ifnet *);
236 void xge_enable(struct xge_softc *);
237 int xge_intr(void *);
238
239
240
241
242 #define PIF_WCSR(csr, val) pif_wcsr(sc, csr, val)
243 #define PIF_RCSR(csr) pif_rcsr(sc, csr)
244 #define TXP_WCSR(csr, val) txp_wcsr(sc, csr, val)
245 #define PIF_WKEY(csr, val) pif_wkey(sc, csr, val)
246
247 static inline void
248 pif_wcsr(struct xge_softc *sc, bus_size_t csr, uint64_t val)
249 {
250 uint32_t lval, hval;
251
252 lval = val&0xffffffff;
253 hval = val>>32;
254
255 bus_space_write_4(sc->sc_st, sc->sc_sh, csr, lval);
256 bus_space_write_4(sc->sc_st, sc->sc_sh, csr+4, hval);
257 }
258
259 static inline uint64_t
260 pif_rcsr(struct xge_softc *sc, bus_size_t csr)
261 {
262 uint64_t val, val2;
263
264 val = bus_space_read_4(sc->sc_st, sc->sc_sh, csr);
265 val2 = bus_space_read_4(sc->sc_st, sc->sc_sh, csr+4);
266 val |= (val2 << 32);
267 return (val);
268 }
269
270 static inline void
271 txp_wcsr(struct xge_softc *sc, bus_size_t csr, uint64_t val)
272 {
273 uint32_t lval, hval;
274
275 lval = val&0xffffffff;
276 hval = val>>32;
277
278 bus_space_write_4(sc->sc_txt, sc->sc_txh, csr, lval);
279 bus_space_write_4(sc->sc_txt, sc->sc_txh, csr+4, hval);
280 }
281
282
283 static inline void
284 pif_wkey(struct xge_softc *sc, bus_size_t csr, uint64_t val)
285 {
286 uint32_t lval, hval;
287
288 lval = val&0xffffffff;
289 hval = val>>32;
290
291 if (sc->xge_type == XGE_TYPE_XENA)
292 PIF_WCSR(RMAC_CFG_KEY, RMAC_KEY_VALUE);
293
294 bus_space_write_4(sc->sc_st, sc->sc_sh, csr, lval);
295
296 if (sc->xge_type == XGE_TYPE_XENA)
297 PIF_WCSR(RMAC_CFG_KEY, RMAC_KEY_VALUE);
298
299 bus_space_write_4(sc->sc_st, sc->sc_sh, csr+4, hval);
300 }
301
302 struct cfattach xge_ca = {
303 sizeof(struct xge_softc), xge_match, xge_attach
304 };
305
306 struct cfdriver xge_cd = {
307 0, "xge", DV_IFNET
308 };
309
310 #define XNAME sc->sc_dev.dv_xname
311
312 #define XGE_RXSYNC(desc, what) \
313 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap, \
314 (desc/NDESC_BUFMODE) * XGE_PAGE + sizeof(struct rxdesc) * \
315 (desc%NDESC_BUFMODE), sizeof(struct rxdesc), what)
316 #define XGE_RXD(desc) &sc->sc_rxd_4k[desc/NDESC_BUFMODE]-> \
317 r4_rxd[desc%NDESC_BUFMODE]
318
319
320
321
322 #define XGE_MAX_FRAMELEN 9622
323 #define XGE_MAX_MTU (XGE_MAX_FRAMELEN - ETHER_HDR_LEN - \
324 ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN)
325
326 const struct pci_matchid xge_devices[] = {
327 { PCI_VENDOR_NETERION, PCI_PRODUCT_NETERION_XFRAME },
328 { PCI_VENDOR_NETERION, PCI_PRODUCT_NETERION_XFRAME_2 }
329 };
330
331 int
332 xge_match(struct device *parent, void *match, void *aux)
333 {
334 return (pci_matchbyid((struct pci_attach_args *)aux, xge_devices,
335 sizeof(xge_devices)/sizeof(xge_devices[0])));
336 }
337
338 void
339 xge_attach(struct device *parent, struct device *self, void *aux)
340 {
341 struct pci_attach_args *pa = aux;
342 struct xge_softc *sc;
343 struct ifnet *ifp;
344 pcireg_t memtype;
345 pci_intr_handle_t ih;
346 const char *intrstr = NULL;
347 pci_chipset_tag_t pc = pa->pa_pc;
348 uint8_t enaddr[ETHER_ADDR_LEN];
349 uint64_t val;
350 int i;
351
352 sc = (struct xge_softc *)self;
353
354 sc->sc_dmat = pa->pa_dmat;
355
356 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NETERION_XFRAME)
357 sc->xge_type = XGE_TYPE_XENA;
358 else
359 sc->xge_type = XGE_TYPE_HERC;
360
361
362 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, XGE_PIF_BAR);
363 if (pci_mapreg_map(pa, XGE_PIF_BAR, memtype, 0,
364 &sc->sc_st, &sc->sc_sh, 0, 0, 0)) {
365 printf(": unable to map PIF BAR registers\n");
366 return;
367 }
368
369 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, XGE_TXP_BAR);
370 if (pci_mapreg_map(pa, XGE_TXP_BAR, memtype, 0,
371 &sc->sc_txt, &sc->sc_txh, 0, 0, 0)) {
372 printf(": unable to map TXP BAR registers\n");
373 return;
374 }
375
376 if (sc->xge_type == XGE_TYPE_XENA) {
377
378 for (i = 0; i < XGE_PCISIZE_XENA; i += 4)
379 sc->sc_pciregs[i/4] = pci_conf_read(pa->pa_pc, pa->pa_tag, i);
380 }
381
382 #if BYTE_ORDER == LITTLE_ENDIAN
383 val = (uint64_t)0xFFFFFFFFFFFFFFFFULL;
384 val &= ~(TxF_R_SE|RxF_W_SE);
385 PIF_WCSR(SWAPPER_CTRL, val);
386 PIF_WCSR(SWAPPER_CTRL, val);
387 #endif
388
389 if ((val = PIF_RCSR(PIF_RD_SWAPPER_Fb)) != SWAPPER_MAGIC) {
390 printf(": failed configuring endian, %llx != %llx!\n",
391 (unsigned long long)val, SWAPPER_MAGIC);
392 return;
393 }
394
395
396
397
398
399 if (sc->xge_type == XGE_TYPE_XENA) {
400
401
402
403
404
405 for (i = 0; i < sizeof(fix_mac)/sizeof(fix_mac[0]); i++) {
406 PIF_WCSR(GPIO_CONTROL, fix_mac[i]);
407 PIF_RCSR(GPIO_CONTROL);
408 }
409
410
411
412
413 PIF_WCSR(SW_RESET, 0xa5a5a50000000000ULL);
414 DELAY(500000);
415 for (i = 0; i < XGE_PCISIZE_XENA; i += 4)
416 pci_conf_write(pa->pa_pc, pa->pa_tag, i, sc->sc_pciregs[i/4]);
417
418
419
420
421 #if BYTE_ORDER == LITTLE_ENDIAN
422 val = (uint64_t)0xFFFFFFFFFFFFFFFFULL;
423 val &= ~(TxF_R_SE|RxF_W_SE);
424 PIF_WCSR(SWAPPER_CTRL, val);
425 PIF_WCSR(SWAPPER_CTRL, val);
426 #endif
427
428 if ((val = PIF_RCSR(PIF_RD_SWAPPER_Fb)) != SWAPPER_MAGIC) {
429 printf(": failed configuring endian2, %llx != %llx!\n",
430 (unsigned long long)val, SWAPPER_MAGIC);
431 return;
432 }
433 }
434
435
436
437
438
439
440
441
442 if (sc->xge_type == XGE_TYPE_HERC) {
443 val = PIF_RCSR(SW_RESET);
444 val &= 0xffff00ffffffffffULL;
445 PIF_WCSR(SW_RESET,val);
446 delay(1000*1000);
447 }
448
449
450 val = PIF_RCSR(SW_RESET);
451 val &= 0xffffff00ffffffffULL;
452 PIF_WCSR(SW_RESET, val);
453 DELAY(500000);
454
455
456
457
458 if (sc->xge_type == XGE_TYPE_HERC){
459 for (i = 0; i < 50; i++) {
460 val = PIF_RCSR(ADAPTER_STATUS);
461 if (!(val & RIC_RUNNING))
462 break;
463 delay(20*1000);
464 }
465
466 if (i == 50) {
467 printf(": not safe to access registers\n");
468 return;
469 }
470 }
471
472
473 if (sc->xge_type == XGE_TYPE_XENA)
474 xge_setup_xgxs_xena(sc);
475 else if(sc->xge_type == XGE_TYPE_HERC)
476 xge_setup_xgxs_herc(sc);
477
478
479
480 PIF_WCSR(RMAC_ADDR_CMD_MEM,
481 RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(0));
482 while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR)
483 ;
484 val = PIF_RCSR(RMAC_ADDR_DATA0_MEM);
485 for (i = 0; i < ETHER_ADDR_LEN; i++)
486 enaddr[i] = (uint8_t)(val >> (56 - (8*i)));
487
488
489
490
491 if (xge_alloc_txmem(sc)) {
492 printf(": failed allocating txmem.\n");
493 return;
494 }
495
496
497 PIF_WCSR(TX_FIFO_P0, TX_FIFO_LEN0(NTXDESCS));
498 PIF_WCSR(TX_FIFO_P1, 0ULL);
499 PIF_WCSR(TX_FIFO_P2, 0ULL);
500 PIF_WCSR(TX_FIFO_P3, 0ULL);
501
502
503
504
505 val = PIF_RCSR(TX_FIFO_P0);
506 val |= TX_FIFO_ENABLE;
507 PIF_WCSR(TX_FIFO_P0, val);
508
509
510 PIF_WCSR(TX_PA_CFG,
511 TX_PA_CFG_IFR|TX_PA_CFG_ISO|TX_PA_CFG_ILC|TX_PA_CFG_ILE);
512
513
514 for (i = 0; i < NTXDESCS; i++) {
515 if (bus_dmamap_create(sc->sc_dmat, XGE_MAX_FRAMELEN,
516 NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_txm[i])) {
517 printf(": cannot create TX DMA maps\n");
518 return;
519 }
520 }
521
522 sc->sc_lasttx = NTXDESCS-1;
523
524
525
526
527
528
529 if (xge_alloc_rxmem(sc)) {
530 printf(": failed allocating rxmem\n");
531 return;
532 }
533
534
535 for (i = 0; i < NRXREAL; i++) {
536 if (bus_dmamap_create(sc->sc_dmat, XGE_MAX_FRAMELEN,
537 NRXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_rxm[i])) {
538 printf(": cannot create RX DMA maps\n");
539 return;
540 }
541 }
542
543
544 for (i = 0; i < NRXREAL; i++)
545 if (xge_add_rxbuf(sc, i))
546 panic("out of mbufs too early");
547
548
549 PIF_WCSR(RX_QUEUE_PRIORITY, 0ULL);
550
551
552 PIF_WCSR(RX_W_ROUND_ROBIN_0, 0ULL);
553 PIF_WCSR(RX_W_ROUND_ROBIN_1, 0ULL);
554 PIF_WCSR(RX_W_ROUND_ROBIN_2, 0ULL);
555 PIF_WCSR(RX_W_ROUND_ROBIN_3, 0ULL);
556 PIF_WCSR(RX_W_ROUND_ROBIN_4, 0ULL);
557
558
559 PIF_WCSR(PRC_RXD0_0, (uint64_t)sc->sc_rxmap->dm_segs[0].ds_addr);
560
561
562
563 PIF_WCSR(PRC_ALARM_ACTION, 0ULL);
564
565
566 #if RX_MODE == RX_MODE_1
567 val = RING_MODE_1;
568 #elif RX_MODE == RX_MODE_3
569 val = RING_MODE_3;
570 #else
571 val = RING_MODE_5;
572 #endif
573 PIF_WCSR(PRC_CTRL_0, RC_IN_SVC|val);
574
575
576
577
578 if (sc->xge_type == XGE_TYPE_XENA) {
579
580 PIF_WCSR(RX_QUEUE_CFG, MC_QUEUE(0, 64));
581 } else {
582
583 PIF_WCSR(RX_QUEUE_CFG, MC_QUEUE(0, 32));
584 }
585
586
587
588
589
590
591
592
593
594
595
596
597 val = PIF_RCSR(MC_RLDRAM_MRS);
598 val |= MC_QUEUE_SIZE_ENABLE|MC_RLDRAM_MRS_ENABLE;
599 PIF_WCSR(MC_RLDRAM_MRS, val);
600 DELAY(1000);
601
602
603
604
605
606 PIF_WCSR(TTI_DATA1_MEM, TX_TIMER_VAL(0x1ff) | TX_TIMER_AC |
607 TX_URNG_A(5) | TX_URNG_B(20) | TX_URNG_C(48));
608 PIF_WCSR(TTI_DATA2_MEM,
609 TX_UFC_A(25) | TX_UFC_B(64) | TX_UFC_C(128) | TX_UFC_D(512));
610 PIF_WCSR(TTI_COMMAND_MEM, TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE);
611 while (PIF_RCSR(TTI_COMMAND_MEM) & TTI_CMD_MEM_STROBE)
612 ;
613
614
615 PIF_WCSR(RTI_DATA1_MEM, RX_TIMER_VAL(0x800) | RX_TIMER_AC |
616 RX_URNG_A(5) | RX_URNG_B(20) | RX_URNG_C(50));
617 PIF_WCSR(RTI_DATA2_MEM,
618 RX_UFC_A(64) | RX_UFC_B(128) | RX_UFC_C(256) | RX_UFC_D(512));
619 PIF_WCSR(RTI_COMMAND_MEM, RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE);
620 while (PIF_RCSR(RTI_COMMAND_MEM) & RTI_CMD_MEM_STROBE)
621 ;
622
623
624
625
626 ifmedia_init(&sc->xena_media, IFM_IMASK, xge_xgmii_mediachange,
627 xge_ifmedia_status);
628 ifmedia_add(&sc->xena_media, IFM_ETHER|IFM_10G_SR, 0, NULL);
629 ifmedia_set(&sc->xena_media, IFM_ETHER|IFM_10G_SR);
630
631 ifp = &sc->sc_arpcom.ac_if;
632 strlcpy(ifp->if_xname, XNAME, IFNAMSIZ);
633 memcpy(sc->sc_arpcom.ac_enaddr, enaddr, ETHER_ADDR_LEN);
634 ifp->if_baudrate = 1000000000;
635 ifp->if_softc = sc;
636 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
637 ifp->if_ioctl = xge_ioctl;
638 ifp->if_start = xge_start;
639 #ifdef XGE_JUMBO
640 ifp->if_hardmtu = XGE_MAX_MTU;
641 #endif
642 IFQ_SET_MAXLEN(&ifp->if_snd, NTXDESCS - 1);
643 IFQ_SET_READY(&ifp->if_snd);
644
645 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
646 IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
647
648 #if NVLAN > 0
649 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
650 #endif
651
652
653
654
655 if_attach(ifp);
656 ether_ifattach(ifp);
657
658 sc->sc_shutdownhook = shutdownhook_establish(xge_shutdown, sc);
659
660
661
662
663 if (pci_intr_map(pa, &ih)) {
664 printf(": unable to map interrupt\n");
665 return;
666 }
667 intrstr = pci_intr_string(pc, ih);
668 if ((sc->sc_ih =
669 pci_intr_establish(pc, ih, IPL_NET, xge_intr, sc, XNAME)) == NULL) {
670 printf(": unable to establish interrupt at %s\n",
671 intrstr ? intrstr : "<unknown>");
672 return;
673 }
674 printf(": %s, address %s\n", intrstr, ether_sprintf(enaddr));
675 }
676
677 void
678 xge_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
679 {
680 struct xge_softc *sc = ifp->if_softc;
681 uint64_t reg;
682
683 ifmr->ifm_status = IFM_AVALID;
684 ifmr->ifm_active = IFM_ETHER|IFM_10G_SR;
685
686 reg = PIF_RCSR(ADAPTER_STATUS);
687 if ((reg & (RMAC_REMOTE_FAULT|RMAC_LOCAL_FAULT)) == 0)
688 ifmr->ifm_status |= IFM_ACTIVE;
689 }
690
691 int
692 xge_xgmii_mediachange(struct ifnet *ifp)
693 {
694 return (0);
695 }
696
697 void
698 xge_enable(struct xge_softc *sc)
699 {
700 uint64_t val;
701
702
703 val = PIF_RCSR(ADAPTER_CONTROL);
704 val |= ADAPTER_EN;
705 PIF_WCSR(ADAPTER_CONTROL, val);
706
707
708 val = PIF_RCSR(ADAPTER_CONTROL);
709 val |= LED_ON;
710 PIF_WCSR(ADAPTER_CONTROL, val);
711 #ifdef XGE_DEBUG
712 printf("%s: link up\n", XNAME);
713 #endif
714 }
715
716 int
717 xge_init(struct ifnet *ifp)
718 {
719 struct xge_softc *sc = ifp->if_softc;
720 uint64_t val;
721 int s;
722
723 s = splnet();
724
725
726
727
728 xge_stop(ifp, 0);
729
730
731 PIF_WKEY(MAC_CFG, TMAC_EN|RMAC_EN|TMAC_APPEND_PAD|RMAC_STRIP_FCS|
732 RMAC_BCAST_EN|RMAC_DISCARD_PFRM);
733
734 DELAY(1000);
735
736
737 val = PIF_RCSR(ADAPTER_STATUS);
738 if ((val & QUIESCENT) != QUIESCENT) {
739 #if 0
740 char buf[200];
741 #endif
742 printf("%s: adapter not quiescent, aborting\n", XNAME);
743 val = (val & QUIESCENT) ^ QUIESCENT;
744 #if 0
745 bitmask_snprintf(val, QUIESCENT_BMSK, buf, sizeof buf);
746 printf("%s: ADAPTER_STATUS missing bits %s\n", XNAME, buf);
747 #endif
748 splx(s);
749 return (1);
750 }
751
752
753 val = PIF_RCSR(RX_PA_CFG);
754 val &= ~STRIP_VLAN_TAG;
755 PIF_WCSR(RX_PA_CFG, val);
756
757
758 #ifdef XGE_JUMBO
759 PIF_WCSR(RMAC_MAX_PYLD_LEN, RMAC_PYLD_LEN(XGE_MAX_FRAMELEN));
760 #else
761 PIF_WCSR(RMAC_MAX_PYLD_LEN, RMAC_PYLD_LEN(ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN));
762 #endif
763
764
765 val = PIF_RCSR(ADAPTER_CONTROL);
766 val |= EOI_TX_ON;
767 PIF_WCSR(ADAPTER_CONTROL, val);
768
769 xge_enable(sc);
770
771
772
773
774 PIF_WCSR(TX_TRAFFIC_MASK, 0);
775 PIF_WCSR(RX_TRAFFIC_MASK, 0);
776 PIF_WCSR(GENERAL_INT_MASK, 0);
777 PIF_WCSR(TXPIC_INT_MASK, 0);
778 PIF_WCSR(RXPIC_INT_MASK, 0);
779
780 PIF_WCSR(MAC_INT_MASK, MAC_TMAC_INT);
781 PIF_WCSR(MAC_RMAC_ERR_MASK, ~RMAC_LINK_STATE_CHANGE_INT);
782
783 xge_setpromisc(sc);
784
785 xge_setmulti(sc);
786
787
788 ifp->if_flags |= IFF_RUNNING;
789 ifp->if_flags &= ~IFF_OACTIVE;
790
791 splx(s);
792
793 return (0);
794 }
795
796 void
797 xge_stop(struct ifnet *ifp, int disable)
798 {
799 struct xge_softc *sc = ifp->if_softc;
800 uint64_t val;
801
802 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
803
804 val = PIF_RCSR(ADAPTER_CONTROL);
805 val &= ~ADAPTER_EN;
806 PIF_WCSR(ADAPTER_CONTROL, val);
807
808 while ((PIF_RCSR(ADAPTER_STATUS) & QUIESCENT) != QUIESCENT)
809 ;
810 }
811
812 void
813 xge_shutdown(void *pv)
814 {
815 struct xge_softc *sc = (struct xge_softc *)pv;
816 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
817
818 xge_stop(ifp, 1);
819 }
820
821 int
822 xge_intr(void *pv)
823 {
824 struct xge_softc *sc = pv;
825 struct txd *txd;
826 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
827 bus_dmamap_t dmp;
828 uint64_t val;
829 int i, lasttx, plen;
830
831 val = PIF_RCSR(GENERAL_INT_STATUS);
832 if (val == 0)
833 return (0);
834
835 PIF_WCSR(GENERAL_INT_STATUS, val);
836
837 if ((val = PIF_RCSR(MAC_RMAC_ERR_REG)) & RMAC_LINK_STATE_CHANGE_INT) {
838
839 #ifdef XGE_DEBUG
840 printf("%s: link down\n", XNAME);
841 #endif
842 while ((PIF_RCSR(ADAPTER_STATUS) & QUIESCENT) != QUIESCENT)
843 ;
844 PIF_WCSR(MAC_RMAC_ERR_REG, RMAC_LINK_STATE_CHANGE_INT);
845
846 val = PIF_RCSR(ADAPTER_STATUS);
847 if ((val & (RMAC_REMOTE_FAULT|RMAC_LOCAL_FAULT)) == 0)
848 xge_enable(sc);
849 }
850
851 if ((val = PIF_RCSR(TX_TRAFFIC_INT)))
852 PIF_WCSR(TX_TRAFFIC_INT, val);
853
854
855
856 lasttx = sc->sc_lasttx;
857 while ((i = NEXTTX(sc->sc_lasttx)) != sc->sc_nexttx) {
858 txd = sc->sc_txd[i];
859 dmp = sc->sc_txm[i];
860
861 bus_dmamap_sync(sc->sc_dmat, dmp, 0,
862 dmp->dm_mapsize,
863 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
864
865 if (txd->txd_control1 & TXD_CTL1_OWN) {
866 bus_dmamap_sync(sc->sc_dmat, dmp, 0,
867 dmp->dm_mapsize, BUS_DMASYNC_PREREAD);
868 break;
869 }
870 bus_dmamap_unload(sc->sc_dmat, dmp);
871 m_freem(sc->sc_txb[i]);
872 ifp->if_opackets++;
873 sc->sc_lasttx = i;
874 }
875
876 if (sc->sc_lasttx != lasttx)
877 ifp->if_flags &= ~IFF_OACTIVE;
878
879
880 xge_start(ifp);
881
882
883 if ((val = PIF_RCSR(RX_TRAFFIC_INT)))
884 PIF_WCSR(RX_TRAFFIC_INT, val);
885
886 for (;;) {
887 struct rxdesc *rxd;
888 struct mbuf *m;
889
890 XGE_RXSYNC(sc->sc_nextrx,
891 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
892
893 rxd = XGE_RXD(sc->sc_nextrx);
894 if (rxd->rxd_control1 & RXD_CTL1_OWN) {
895 XGE_RXSYNC(sc->sc_nextrx, BUS_DMASYNC_PREREAD);
896 break;
897 }
898
899
900 m = sc->sc_rxb[sc->sc_nextrx];
901 #if RX_MODE == RX_MODE_1
902 plen = m->m_len = RXD_CTL2_BUF0SIZ(rxd->rxd_control2);
903 #elif RX_MODE == RX_MODE_3
904 #error Fix rxmodes in xge_intr
905 #elif RX_MODE == RX_MODE_5
906 plen = m->m_len = RXD_CTL2_BUF0SIZ(rxd->rxd_control2);
907 plen += m->m_next->m_len = RXD_CTL2_BUF1SIZ(rxd->rxd_control2);
908 plen += m->m_next->m_next->m_len =
909 RXD_CTL2_BUF2SIZ(rxd->rxd_control2);
910 plen += m->m_next->m_next->m_next->m_len =
911 RXD_CTL3_BUF3SIZ(rxd->rxd_control3);
912 plen += m->m_next->m_next->m_next->m_next->m_len =
913 RXD_CTL3_BUF4SIZ(rxd->rxd_control3);
914 #endif
915 m->m_pkthdr.rcvif = ifp;
916 m->m_pkthdr.len = plen;
917
918 val = rxd->rxd_control1;
919
920 if (xge_add_rxbuf(sc, sc->sc_nextrx)) {
921
922 #if RX_MODE == RX_MODE_1
923 rxd->rxd_control2 = RXD_MKCTL2(MCLBYTES, 0, 0);
924 rxd->rxd_control1 = RXD_CTL1_OWN;
925 #elif RX_MODE == RX_MODE_3
926 #elif RX_MODE == RX_MODE_5
927 #endif
928 XGE_RXSYNC(sc->sc_nextrx,
929 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
930 ifp->if_ierrors++;
931 break;
932 }
933
934 ifp->if_ipackets++;
935
936 if (RXD_CTL1_PROTOS(val) & RXD_CTL1_P_IPv4)
937 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
938 if (RXD_CTL1_PROTOS(val) & RXD_CTL1_P_TCP)
939 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
940 if (RXD_CTL1_PROTOS(val) & RXD_CTL1_P_UDP)
941 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK;
942
943 #if NBPFILTER > 0
944 if (ifp->if_bpf)
945 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
946 #endif
947
948 ether_input_mbuf(ifp, m);
949
950 if (++sc->sc_nextrx == NRXREAL)
951 sc->sc_nextrx = 0;
952 }
953
954 return (1);
955 }
956
957 int
958 xge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
959 {
960 struct xge_softc *sc = ifp->if_softc;
961 struct ifreq *ifr = (struct ifreq *) data;
962 struct ifaddr *ifa = (struct ifaddr *)data;
963 int s, error = 0;
964
965 s = splnet();
966
967 if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) {
968 splx(s);
969 return (error);
970 }
971
972 switch (cmd) {
973 case SIOCSIFADDR:
974 ifp->if_flags |= IFF_UP;
975 if (!(ifp->if_flags & IFF_RUNNING))
976 xge_init(ifp);
977 #ifdef INET
978 if (ifa->ifa_addr->sa_family == AF_INET)
979 arp_ifinit(&sc->sc_arpcom, ifa);
980 #endif
981 break;
982 case SIOCSIFMTU:
983 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ifp->if_hardmtu)
984 error = EINVAL;
985 else if (ifp->if_mtu != ifr->ifr_mtu)
986 ifp->if_mtu = ifr->ifr_mtu;
987 break;
988 case SIOCSIFFLAGS:
989 if (ifp->if_flags & IFF_UP) {
990 if (ifp->if_flags & IFF_RUNNING &&
991 (ifp->if_flags ^ sc->xge_if_flags) &
992 IFF_PROMISC) {
993 xge_setpromisc(sc);
994 } else {
995 if (!(ifp->if_flags & IFF_RUNNING))
996 xge_init(ifp);
997 }
998 } else {
999 if (ifp->if_flags & IFF_RUNNING)
1000 xge_stop(ifp, 1);
1001 }
1002 sc->xge_if_flags = ifp->if_flags;
1003 break;
1004 case SIOCADDMULTI:
1005 case SIOCDELMULTI:
1006 error = (cmd == SIOCADDMULTI)
1007 ? ether_addmulti(ifr, &sc->sc_arpcom)
1008 : ether_delmulti(ifr, &sc->sc_arpcom);
1009
1010 if (error == ENETRESET) {
1011 if (ifp->if_flags & IFF_RUNNING)
1012 xge_setmulti(sc);
1013 error = 0;
1014 }
1015 break;
1016 case SIOCGIFMEDIA:
1017 case SIOCSIFMEDIA:
1018 error = ifmedia_ioctl(ifp, ifr, &sc->xena_media, cmd);
1019 break;
1020 default:
1021 error = ENOTTY;
1022 }
1023
1024 splx(s);
1025
1026 return (error);
1027 }
1028
1029 void
1030 xge_setmulti(struct xge_softc *sc)
1031 {
1032 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1033 struct arpcom *ac = &sc->sc_arpcom;
1034 struct ether_multi *enm;
1035 struct ether_multistep step;
1036 int i, numaddr = 1;
1037 uint64_t val;
1038
1039 ETHER_FIRST_MULTI(step, ac, enm);
1040 while (enm != NULL) {
1041 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1042
1043 goto allmulti;
1044 }
1045 if (numaddr == MAX_MCAST_ADDR)
1046 goto allmulti;
1047 for (val = 0, i = 0; i < ETHER_ADDR_LEN; i++) {
1048 val <<= 8;
1049 val |= enm->enm_addrlo[i];
1050 }
1051 PIF_WCSR(RMAC_ADDR_DATA0_MEM, val << 16);
1052 PIF_WCSR(RMAC_ADDR_DATA1_MEM, 0xFFFFFFFFFFFFFFFFULL);
1053 PIF_WCSR(RMAC_ADDR_CMD_MEM, RMAC_ADDR_CMD_MEM_WE|
1054 RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(numaddr));
1055 while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR)
1056 ;
1057 numaddr++;
1058 ETHER_NEXT_MULTI(step, enm);
1059 }
1060
1061 for (i = numaddr; i < MAX_MCAST_ADDR; i++) {
1062 PIF_WCSR(RMAC_ADDR_DATA0_MEM, 0xffffffffffff0000ULL);
1063 PIF_WCSR(RMAC_ADDR_DATA1_MEM, 0xFFFFFFFFFFFFFFFFULL);
1064 PIF_WCSR(RMAC_ADDR_CMD_MEM, RMAC_ADDR_CMD_MEM_WE|
1065 RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(i));
1066 while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR)
1067 ;
1068 }
1069 ifp->if_flags &= ~IFF_ALLMULTI;
1070 return;
1071
1072 allmulti:
1073
1074 ifp->if_flags |= IFF_ALLMULTI;
1075 PIF_WCSR(RMAC_ADDR_DATA0_MEM, 0x8000000000000000ULL);
1076 PIF_WCSR(RMAC_ADDR_DATA1_MEM, 0xF000000000000000ULL);
1077 PIF_WCSR(RMAC_ADDR_CMD_MEM, RMAC_ADDR_CMD_MEM_WE|
1078 RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(1));
1079 while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR)
1080 ;
1081 }
1082
1083 void
1084 xge_setpromisc(struct xge_softc *sc)
1085 {
1086 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1087 uint64_t val;
1088
1089 val = PIF_RCSR(MAC_CFG);
1090
1091 if (ifp->if_flags & IFF_PROMISC)
1092 val |= RMAC_PROM_EN;
1093 else
1094 val &= ~RMAC_PROM_EN;
1095
1096 PIF_WCSR(MAC_CFG, val);
1097 }
1098
1099 void
1100 xge_start(struct ifnet *ifp)
1101 {
1102 struct xge_softc *sc = ifp->if_softc;
1103 struct txd *txd = NULL;
1104 bus_dmamap_t dmp;
1105 struct mbuf *m;
1106 uint64_t par, lcr;
1107 int nexttx = 0, ntxd, error, i;
1108 #if NVLAN > 0
1109 struct ifvlan *ifv = NULL;
1110 #endif
1111
1112 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1113 return;
1114
1115 par = lcr = 0;
1116 for (;;) {
1117 IFQ_POLL(&ifp->if_snd, m);
1118 if (m == NULL)
1119 break;
1120
1121 if (sc->sc_nexttx == sc->sc_lasttx)
1122 break;
1123
1124 nexttx = sc->sc_nexttx;
1125 dmp = sc->sc_txm[nexttx];
1126
1127 if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, dmp, m,
1128 BUS_DMA_WRITE|BUS_DMA_NOWAIT)) != 0) {
1129 printf("%s: bus_dmamap_load_mbuf error %d\n",
1130 XNAME, error);
1131 break;
1132 }
1133 IFQ_DEQUEUE(&ifp->if_snd, m);
1134
1135 bus_dmamap_sync(sc->sc_dmat, dmp, 0, dmp->dm_mapsize,
1136 BUS_DMASYNC_PREWRITE);
1137
1138 txd = sc->sc_txd[nexttx];
1139 sc->sc_txb[nexttx] = m;
1140 for (i = 0; i < dmp->dm_nsegs; i++) {
1141 if (dmp->dm_segs[i].ds_len == 0)
1142 continue;
1143 txd->txd_control1 = dmp->dm_segs[i].ds_len;
1144 txd->txd_control2 = 0;
1145 txd->txd_bufaddr = dmp->dm_segs[i].ds_addr;
1146 txd++;
1147 }
1148 ntxd = txd - sc->sc_txd[nexttx] - 1;
1149 txd = sc->sc_txd[nexttx];
1150 txd->txd_control1 |= TXD_CTL1_OWN|TXD_CTL1_GCF;
1151 txd->txd_control2 = TXD_CTL2_UTIL;
1152
1153 #if NVLAN > 0
1154 if ((m->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
1155 m->m_pkthdr.rcvif != NULL) {
1156 ifv = m->m_pkthdr.rcvif->if_softc;
1157
1158 txd->txd_control2 |= TXD_CTL2_VLANE;
1159 txd->txd_control2 |= TXD_CTL2_VLANT(ifv->ifv_tag);
1160 }
1161 #endif
1162
1163 if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
1164 txd->txd_control2 |= TXD_CTL2_CIPv4;
1165 if (m->m_pkthdr.csum_flags & M_TCPV4_CSUM_OUT)
1166 txd->txd_control2 |= TXD_CTL2_CTCP;
1167 if (m->m_pkthdr.csum_flags & M_UDPV4_CSUM_OUT)
1168 txd->txd_control2 |= TXD_CTL2_CUDP;
1169
1170 txd[ntxd].txd_control1 |= TXD_CTL1_GCL;
1171
1172 bus_dmamap_sync(sc->sc_dmat, dmp, 0, dmp->dm_mapsize,
1173 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1174
1175 par = sc->sc_txdp[nexttx];
1176 lcr = TXDL_NUMTXD(ntxd) | TXDL_LGC_FIRST | TXDL_LGC_LAST;
1177 TXP_WCSR(TXDL_PAR, par);
1178 TXP_WCSR(TXDL_LCR, lcr);
1179
1180 #if NBPFILTER > 0
1181 if (ifp->if_bpf)
1182 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1183 #endif
1184
1185 sc->sc_nexttx = NEXTTX(nexttx);
1186 }
1187 }
1188
1189
1190
1191
1192
1193 int
1194 xge_alloc_txmem(struct xge_softc *sc)
1195 {
1196 struct txd *txp;
1197 bus_dma_segment_t seg;
1198 bus_addr_t txdp;
1199 caddr_t kva;
1200 int i, rseg, state;
1201
1202 #define TXMAPSZ (NTXDESCS*NTXFRAGS*sizeof(struct txd))
1203 state = 0;
1204 if (bus_dmamem_alloc(sc->sc_dmat, TXMAPSZ, PAGE_SIZE, 0,
1205 &seg, 1, &rseg, BUS_DMA_NOWAIT))
1206 goto err;
1207 state++;
1208 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, TXMAPSZ, &kva,
1209 BUS_DMA_NOWAIT))
1210 goto err;
1211
1212 state++;
1213 if (bus_dmamap_create(sc->sc_dmat, TXMAPSZ, 1, TXMAPSZ, 0,
1214 BUS_DMA_NOWAIT, &sc->sc_txmap))
1215 goto err;
1216 state++;
1217 if (bus_dmamap_load(sc->sc_dmat, sc->sc_txmap,
1218 kva, TXMAPSZ, NULL, BUS_DMA_NOWAIT))
1219 goto err;
1220
1221
1222 txp = (struct txd *)kva;
1223 txdp = seg.ds_addr;
1224 for (txp = (struct txd *)kva, i = 0; i < NTXDESCS; i++) {
1225 sc->sc_txd[i] = txp;
1226 sc->sc_txdp[i] = txdp;
1227 txp += NTXFRAGS;
1228 txdp += (NTXFRAGS * sizeof(struct txd));
1229 }
1230
1231 return (0);
1232
1233 err:
1234 if (state > 2)
1235 bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap);
1236 if (state > 1)
1237 bus_dmamem_unmap(sc->sc_dmat, kva, TXMAPSZ);
1238 if (state > 0)
1239 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1240 return (ENOBUFS);
1241 }
1242
1243
1244
1245
1246
1247
1248 int
1249 xge_alloc_rxmem(struct xge_softc *sc)
1250 {
1251 struct rxd_4k *rxpp;
1252 bus_dma_segment_t seg;
1253 caddr_t kva;
1254 int i, rseg, state;
1255
1256
1257 if (sizeof(struct rxd_4k) != XGE_PAGE) {
1258 printf("bad compiler struct alignment, %d != %d\n",
1259 (int)sizeof(struct rxd_4k), XGE_PAGE);
1260 return (EINVAL);
1261 }
1262
1263 state = 0;
1264 if (bus_dmamem_alloc(sc->sc_dmat, RXMAPSZ, PAGE_SIZE, 0,
1265 &seg, 1, &rseg, BUS_DMA_NOWAIT))
1266 goto err;
1267 state++;
1268 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, RXMAPSZ, &kva,
1269 BUS_DMA_NOWAIT))
1270 goto err;
1271
1272 state++;
1273 if (bus_dmamap_create(sc->sc_dmat, RXMAPSZ, 1, RXMAPSZ, 0,
1274 BUS_DMA_NOWAIT, &sc->sc_rxmap))
1275 goto err;
1276 state++;
1277 if (bus_dmamap_load(sc->sc_dmat, sc->sc_rxmap,
1278 kva, RXMAPSZ, NULL, BUS_DMA_NOWAIT))
1279 goto err;
1280
1281
1282 for (rxpp = (struct rxd_4k *)kva, i = 0; i < NRXPAGES; i++, rxpp++) {
1283 sc->sc_rxd_4k[i] = rxpp;
1284 rxpp->r4_next = (uint64_t)sc->sc_rxmap->dm_segs[0].ds_addr +
1285 (i*sizeof(struct rxd_4k)) + sizeof(struct rxd_4k);
1286 }
1287 sc->sc_rxd_4k[NRXPAGES-1]->r4_next =
1288 (uint64_t)sc->sc_rxmap->dm_segs[0].ds_addr;
1289
1290 return (0);
1291
1292 err:
1293 if (state > 2)
1294 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmap);
1295 if (state > 1)
1296 bus_dmamem_unmap(sc->sc_dmat, kva, RXMAPSZ);
1297 if (state > 0)
1298 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1299 return (ENOBUFS);
1300 }
1301
1302
1303
1304
1305
1306 int
1307 xge_add_rxbuf(struct xge_softc *sc, int id)
1308 {
1309 struct rxdesc *rxd;
1310 struct mbuf *m[5];
1311 int page, desc, error;
1312 #if RX_MODE == RX_MODE_5
1313 int i;
1314 #endif
1315
1316 page = id/NDESC_BUFMODE;
1317 desc = id%NDESC_BUFMODE;
1318
1319 rxd = &sc->sc_rxd_4k[page]->r4_rxd[desc];
1320
1321
1322
1323
1324
1325
1326
1327 #if RX_MODE == RX_MODE_1
1328 MGETHDR(m[0], M_DONTWAIT, MT_DATA);
1329 if (m[0] == NULL)
1330 return (ENOBUFS);
1331 MCLGET(m[0], M_DONTWAIT);
1332 if ((m[0]->m_flags & M_EXT) == 0) {
1333 m_freem(m[0]);
1334 return (ENOBUFS);
1335 }
1336 m[0]->m_len = m[0]->m_pkthdr.len = m[0]->m_ext.ext_size;
1337 #elif RX_MODE == RX_MODE_3
1338 #error missing rxmode 3.
1339 #elif RX_MODE == RX_MODE_5
1340 MGETHDR(m[0], M_DONTWAIT, MT_DATA);
1341 for (i = 1; i < 5; i++) {
1342 MGET(m[i], M_DONTWAIT, MT_DATA);
1343 }
1344 if (m[3])
1345 MCLGET(m[3], M_DONTWAIT);
1346 if (m[4])
1347 MCLGET(m[4], M_DONTWAIT);
1348 if (!m[0] || !m[1] || !m[2] || !m[3] || !m[4] ||
1349 ((m[3]->m_flags & M_EXT) == 0) || ((m[4]->m_flags & M_EXT) == 0)) {
1350
1351 for (i = 0; i < 5; i++)
1352 if (m[i] != NULL)
1353 m_free(m[i]);
1354 return (ENOBUFS);
1355 }
1356
1357 m[0]->m_next = m[1];
1358 m[1]->m_next = m[2];
1359 m[2]->m_next = m[3];
1360 m[3]->m_next = m[4];
1361 #else
1362 #error bad mode RX_MODE
1363 #endif
1364
1365 if (sc->sc_rxb[id])
1366 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxm[id]);
1367 sc->sc_rxb[id] = m[0];
1368
1369 error = bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_rxm[id], m[0],
1370 BUS_DMA_READ|BUS_DMA_NOWAIT);
1371 if (error)
1372 return (error);
1373 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxm[id], 0,
1374 sc->sc_rxm[id]->dm_mapsize, BUS_DMASYNC_PREREAD);
1375
1376 #if RX_MODE == RX_MODE_1
1377 rxd->rxd_control2 = RXD_MKCTL2(m[0]->m_len, 0, 0);
1378 rxd->rxd_buf0 = (uint64_t)sc->sc_rxm[id]->dm_segs[0].ds_addr;
1379 rxd->rxd_control1 = RXD_CTL1_OWN;
1380 #elif RX_MODE == RX_MODE_3
1381 #elif RX_MODE == RX_MODE_5
1382 rxd->rxd_control3 = RXD_MKCTL3(0, m[3]->m_len, m[4]->m_len);
1383 rxd->rxd_control2 = RXD_MKCTL2(m[0]->m_len, m[1]->m_len, m[2]->m_len);
1384 rxd->rxd_buf0 = (uint64_t)sc->sc_rxm[id]->dm_segs[0].ds_addr;
1385 rxd->rxd_buf1 = (uint64_t)sc->sc_rxm[id]->dm_segs[1].ds_addr;
1386 rxd->rxd_buf2 = (uint64_t)sc->sc_rxm[id]->dm_segs[2].ds_addr;
1387 rxd->rxd_buf3 = (uint64_t)sc->sc_rxm[id]->dm_segs[3].ds_addr;
1388 rxd->rxd_buf4 = (uint64_t)sc->sc_rxm[id]->dm_segs[4].ds_addr;
1389 rxd->rxd_control1 = RXD_CTL1_OWN;
1390 #endif
1391
1392 XGE_RXSYNC(id, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1393 return (0);
1394 }
1395
1396
1397
1398
1399 int
1400 xge_setup_xgxs_xena(struct xge_softc *sc)
1401 {
1402
1403
1404
1405 PIF_WCSR(DTX_CONTROL, 0x8000051500000000ULL); DELAY(50);
1406 PIF_WCSR(DTX_CONTROL, 0x80000515000000E0ULL); DELAY(50);
1407 PIF_WCSR(DTX_CONTROL, 0x80000515D93500E4ULL); DELAY(50);
1408
1409
1410 PIF_WCSR(DTX_CONTROL, 0x8001051500000000ULL); DELAY(50);
1411 PIF_WCSR(DTX_CONTROL, 0x80010515000000e0ULL); DELAY(50);
1412 PIF_WCSR(DTX_CONTROL, 0x80010515001e00e4ULL); DELAY(50);
1413
1414
1415 PIF_WCSR(DTX_CONTROL, 0x8002051500000000ULL); DELAY(50);
1416 PIF_WCSR(DTX_CONTROL, 0x80020515000000E0ULL); DELAY(50);
1417 PIF_WCSR(DTX_CONTROL, 0x80020515F21000E4ULL); DELAY(50);
1418
1419
1420 PIF_WCSR(DTX_CONTROL, 0x8000051500000000ULL); DELAY(50);
1421 PIF_WCSR(DTX_CONTROL, 0x80000515000000e0ULL); DELAY(50);
1422 PIF_WCSR(DTX_CONTROL, 0x80000515000000ecULL); DELAY(50);
1423
1424 PIF_WCSR(DTX_CONTROL, 0x8001051500000000ULL); DELAY(50);
1425 PIF_WCSR(DTX_CONTROL, 0x80010515000000e0ULL); DELAY(50);
1426 PIF_WCSR(DTX_CONTROL, 0x80010515000000ecULL); DELAY(50);
1427
1428 PIF_WCSR(DTX_CONTROL, 0x8002051500000000ULL); DELAY(50);
1429 PIF_WCSR(DTX_CONTROL, 0x80020515000000e0ULL); DELAY(50);
1430 PIF_WCSR(DTX_CONTROL, 0x80020515000000ecULL); DELAY(50);
1431
1432
1433
1434 PIF_WCSR(DTX_CONTROL, 0x0018040000000000ULL); DELAY(50);
1435 PIF_WCSR(DTX_CONTROL, 0x00180400000000e0ULL); DELAY(50);
1436 PIF_WCSR(DTX_CONTROL, 0x00180400000000ecULL); DELAY(50);
1437
1438
1439
1440
1441
1442
1443
1444
1445 #if 0
1446 val = PIF_RCSR(MDIO_CONTROL);
1447 if (val != 0x1804001c0F001cULL) {
1448 printf("%s: MDIO_CONTROL: %llx != %llx\n",
1449 XNAME, val, 0x1804001c0F001cULL);
1450 return (1);
1451 }
1452 #endif
1453
1454
1455 PIF_WCSR(DTX_CONTROL, 0x0000051500000000ULL); DELAY(50);
1456 PIF_WCSR(DTX_CONTROL, 0x00000515604000e0ULL); DELAY(50);
1457 PIF_WCSR(DTX_CONTROL, 0x00000515604000e4ULL); DELAY(50);
1458 PIF_WCSR(DTX_CONTROL, 0x00000515204000e4ULL); DELAY(50);
1459 PIF_WCSR(DTX_CONTROL, 0x00000515204000ecULL); DELAY(50);
1460
1461 #if 0
1462
1463 val = PIF_RCSR(DTX_CONTROL);
1464 if (val != 0x5152040001cULL) {
1465 printf("%s: DTX_CONTROL: %llx != %llx\n",
1466 XNAME, val, 0x5152040001cULL);
1467 return (1);
1468 }
1469 #endif
1470
1471 PIF_WCSR(MDIO_CONTROL, 0x0018040000000000ULL); DELAY(50);
1472 PIF_WCSR(MDIO_CONTROL, 0x00180400000000e0ULL); DELAY(50);
1473 PIF_WCSR(MDIO_CONTROL, 0x00180400000000ecULL); DELAY(50);
1474
1475 #if 0
1476
1477 val = PIF_RCSR(MDIO_CONTROL);
1478 if (val != 0x1804001c0f001cULL) {
1479 printf("%s: MDIO_CONTROL2: %llx != %llx\n",
1480 XNAME, val, 0x1804001c0f001cULL);
1481 return (1);
1482 }
1483 #endif
1484 return (0);
1485 }
1486
1487 int
1488 xge_setup_xgxs_herc(struct xge_softc *sc)
1489 {
1490 int dtx_cnt = 0;
1491
1492 while (herc_dtx_cfg[dtx_cnt] != END_SIGN) {
1493 PIF_WCSR(DTX_CONTROL, herc_dtx_cfg[dtx_cnt]);
1494 DELAY(100);
1495 dtx_cnt++;
1496 }
1497
1498 return (0);
1499 }