This source file includes following definitions.
- bnx_probe
- nswaph
- bnx_read_firmware
- bnx_attach
- bnx_attachhook
- bnx_detach
- bnx_shutdown
- bnx_reg_rd_ind
- bnx_reg_wr_ind
- bnx_ctx_wr
- bnx_miibus_read_reg
- bnx_miibus_write_reg
- bnx_miibus_statchg
- bnx_acquire_nvram_lock
- bnx_release_nvram_lock
- bnx_enable_nvram_write
- bnx_disable_nvram_write
- bnx_enable_nvram_access
- bnx_disable_nvram_access
- bnx_nvram_erase_page
- bnx_nvram_read_dword
- bnx_nvram_write_dword
- bnx_init_nvram
- bnx_nvram_read
- bnx_nvram_write
- bnx_nvram_test
- bnx_dma_free
- bnx_dma_alloc
- bnx_release_resources
- bnx_fw_sync
- bnx_load_rv2p_fw
- bnx_load_cpu_fw
- bnx_init_cpus
- bnx_init_context
- bnx_get_mac_addr
- bnx_set_mac_addr
- bnx_stop
- bnx_reset
- bnx_chipinit
- bnx_blockinit
- bnx_get_buf
- bnx_init_tx_chain
- bnx_free_tx_chain
- bnx_init_rx_chain
- bnx_free_rx_chain
- bnx_ifmedia_upd
- bnx_ifmedia_sts
- bnx_phy_intr
- bnx_rx_intr
- bnx_tx_intr
- bnx_disable_intr
- bnx_enable_intr
- bnx_init
- bnx_mgmt_init
- bnx_tx_encap
- bnx_start
- bnx_ioctl
- bnx_watchdog
- bnx_intr
- bnx_set_rx_mode
- bnx_stats_update
- bnx_tick
- bnx_dump_mbuf
- bnx_dump_tx_mbuf_chain
- bnx_dump_rx_mbuf_chain
- bnx_dump_txbd
- bnx_dump_rxbd
- bnx_dump_l2fhdr
- bnx_dump_tx_chain
- bnx_dump_rx_chain
- bnx_dump_status_block
- bnx_dump_stats_block
- bnx_dump_driver_state
- bnx_dump_hw_state
- bnx_breakpoint
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33 #if 0
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD: src/sys/dev/bce/if_bce.c,v 1.3 2006/04/13 14:12:26 ru Exp $");
36 #endif
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52 #include <dev/pci/if_bnxreg.h>
53
54 int bnx_COM_b06FwReleaseMajor;
55 int bnx_COM_b06FwReleaseMinor;
56 int bnx_COM_b06FwReleaseFix;
57 u_int32_t bnx_COM_b06FwStartAddr;
58 u_int32_t bnx_COM_b06FwTextAddr;
59 int bnx_COM_b06FwTextLen;
60 u_int32_t bnx_COM_b06FwDataAddr;
61 int bnx_COM_b06FwDataLen;
62 u_int32_t bnx_COM_b06FwRodataAddr;
63 int bnx_COM_b06FwRodataLen;
64 u_int32_t bnx_COM_b06FwBssAddr;
65 int bnx_COM_b06FwBssLen;
66 u_int32_t bnx_COM_b06FwSbssAddr;
67 int bnx_COM_b06FwSbssLen;
68
69 int bnx_RXP_b06FwReleaseMajor;
70 int bnx_RXP_b06FwReleaseMinor;
71 int bnx_RXP_b06FwReleaseFix;
72 u_int32_t bnx_RXP_b06FwStartAddr;
73 u_int32_t bnx_RXP_b06FwTextAddr;
74 int bnx_RXP_b06FwTextLen;
75 u_int32_t bnx_RXP_b06FwDataAddr;
76 int bnx_RXP_b06FwDataLen;
77 u_int32_t bnx_RXP_b06FwRodataAddr;
78 int bnx_RXP_b06FwRodataLen;
79 u_int32_t bnx_RXP_b06FwBssAddr;
80 int bnx_RXP_b06FwBssLen;
81 u_int32_t bnx_RXP_b06FwSbssAddr;
82 int bnx_RXP_b06FwSbssLen;
83
84 int bnx_TPAT_b06FwReleaseMajor;
85 int bnx_TPAT_b06FwReleaseMinor;
86 int bnx_TPAT_b06FwReleaseFix;
87 u_int32_t bnx_TPAT_b06FwStartAddr;
88 u_int32_t bnx_TPAT_b06FwTextAddr;
89 int bnx_TPAT_b06FwTextLen;
90 u_int32_t bnx_TPAT_b06FwDataAddr;
91 int bnx_TPAT_b06FwDataLen;
92 u_int32_t bnx_TPAT_b06FwRodataAddr;
93 int bnx_TPAT_b06FwRodataLen;
94 u_int32_t bnx_TPAT_b06FwBssAddr;
95 int bnx_TPAT_b06FwBssLen;
96 u_int32_t bnx_TPAT_b06FwSbssAddr;
97 int bnx_TPAT_b06FwSbssLen;
98
99 int bnx_TXP_b06FwReleaseMajor;
100 int bnx_TXP_b06FwReleaseMinor;
101 int bnx_TXP_b06FwReleaseFix;
102 u_int32_t bnx_TXP_b06FwStartAddr;
103 u_int32_t bnx_TXP_b06FwTextAddr;
104 int bnx_TXP_b06FwTextLen;
105 u_int32_t bnx_TXP_b06FwDataAddr;
106 int bnx_TXP_b06FwDataLen;
107 u_int32_t bnx_TXP_b06FwRodataAddr;
108 int bnx_TXP_b06FwRodataLen;
109 u_int32_t bnx_TXP_b06FwBssAddr;
110 int bnx_TXP_b06FwBssLen;
111 u_int32_t bnx_TXP_b06FwSbssAddr;
112 int bnx_TXP_b06FwSbssLen;
113
114 int bnx_rv2p_proc1len;
115 int bnx_rv2p_proc2len;
116
117 u_int32_t *bnx_COM_b06FwText;
118 u_int32_t *bnx_COM_b06FwData;
119 u_int32_t *bnx_COM_b06FwRodata;
120 u_int32_t *bnx_COM_b06FwBss;
121 u_int32_t *bnx_COM_b06FwSbss;
122
123 u_int32_t *bnx_RXP_b06FwText;
124 u_int32_t *bnx_RXP_b06FwData;
125 u_int32_t *bnx_RXP_b06FwRodata;
126 u_int32_t *bnx_RXP_b06FwBss;
127 u_int32_t *bnx_RXP_b06FwSbss;
128
129 u_int32_t *bnx_TPAT_b06FwText;
130 u_int32_t *bnx_TPAT_b06FwData;
131 u_int32_t *bnx_TPAT_b06FwRodata;
132 u_int32_t *bnx_TPAT_b06FwBss;
133 u_int32_t *bnx_TPAT_b06FwSbss;
134
135 u_int32_t *bnx_TXP_b06FwText;
136 u_int32_t *bnx_TXP_b06FwData;
137 u_int32_t *bnx_TXP_b06FwRodata;
138 u_int32_t *bnx_TXP_b06FwBss;
139 u_int32_t *bnx_TXP_b06FwSbss;
140
141 u_int32_t *bnx_rv2p_proc1;
142 u_int32_t *bnx_rv2p_proc2;
143
144 void nswaph(u_int32_t *p, int wcount);
145
146
147
148
149 char bnx_driver_version[] = "v0.9.6";
150
151
152
153
154 #ifdef BNX_DEBUG
155 u_int32_t bnx_debug = BNX_WARN;
156
157
158
159
160
161
162
163
164
165
166
167
168 int bnx_debug_l2fhdr_status_check = 0;
169
170
171 int bnx_debug_unexpected_attention = 0;
172
173
174 int bnx_debug_mbuf_allocation_failure = 0;
175
176
177 int bnx_debug_dma_map_addr_failure = 0;
178
179
180 int bnx_debug_bootcode_running_failure = 0;
181 #endif
182
183
184
185
186
187
188 const struct pci_matchid bnx_devices[] = {
189 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706 },
190 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706S },
191 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708 },
192 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708S }
193 #if 0
194 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5709 },
195 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5709S }
196 #endif
197 };
198
199
200
201
202 static struct flash_spec flash_table[] =
203 {
204
205 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
206 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
207 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
208 "EEPROM - slow"},
209
210 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
211 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
212 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
213 "Entry 0001"},
214
215
216 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
217 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
218 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
219 "Non-buffered flash (128kB)"},
220
221
222 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
223 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
225 "Non-buffered flash (256kB)"},
226
227 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
228 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
229 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
230 "Entry 0100"},
231
232 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
233 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
234 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
235 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
236
237 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
238 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
239 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
240 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
241
242
243 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
244 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
245 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
246 "Non-buffered flash (64kB)"},
247
248 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
249 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
250 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
251 "EEPROM - fast"},
252
253 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
254 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
255 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
256 "Entry 1001"},
257
258 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
259 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
260 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
261 "Entry 1010"},
262
263 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
264 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
265 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
266 "Buffered flash (128kB)"},
267
268 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
269 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
270 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
271 "Entry 1100"},
272
273 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
274 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
275 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
276 "Entry 1101"},
277
278 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
279 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
280 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
281 "Entry 1110 (Atmel)"},
282
283 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
284 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
285 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
286 "Buffered flash (256kB)"},
287 };
288
289
290
291
292 int bnx_probe(struct device *, void *, void *);
293 void bnx_attach(struct device *, struct device *, void *);
294 void bnx_attachhook(void *);
295 int bnx_read_firmware(struct bnx_softc *sc);
296 #if 0
297 void bnx_detach(void *);
298 #endif
299 void bnx_shutdown(void *);
300
301
302
303
304 #ifdef BNX_DEBUG
305 void bnx_dump_mbuf(struct bnx_softc *, struct mbuf *);
306 void bnx_dump_tx_mbuf_chain(struct bnx_softc *, int, int);
307 void bnx_dump_rx_mbuf_chain(struct bnx_softc *, int, int);
308 void bnx_dump_txbd(struct bnx_softc *, int, struct tx_bd *);
309 void bnx_dump_rxbd(struct bnx_softc *, int, struct rx_bd *);
310 void bnx_dump_l2fhdr(struct bnx_softc *, int, struct l2_fhdr *);
311 void bnx_dump_tx_chain(struct bnx_softc *, int, int);
312 void bnx_dump_rx_chain(struct bnx_softc *, int, int);
313 void bnx_dump_status_block(struct bnx_softc *);
314 void bnx_dump_stats_block(struct bnx_softc *);
315 void bnx_dump_driver_state(struct bnx_softc *);
316 void bnx_dump_hw_state(struct bnx_softc *);
317 void bnx_breakpoint(struct bnx_softc *);
318 #endif
319
320
321
322
323 u_int32_t bnx_reg_rd_ind(struct bnx_softc *, u_int32_t);
324 void bnx_reg_wr_ind(struct bnx_softc *, u_int32_t, u_int32_t);
325 void bnx_ctx_wr(struct bnx_softc *, u_int32_t, u_int32_t, u_int32_t);
326 int bnx_miibus_read_reg(struct device *, int, int);
327 void bnx_miibus_write_reg(struct device *, int, int, int);
328 void bnx_miibus_statchg(struct device *);
329
330
331
332
333 int bnx_acquire_nvram_lock(struct bnx_softc *);
334 int bnx_release_nvram_lock(struct bnx_softc *);
335 void bnx_enable_nvram_access(struct bnx_softc *);
336 void bnx_disable_nvram_access(struct bnx_softc *);
337 int bnx_nvram_read_dword(struct bnx_softc *, u_int32_t, u_int8_t *,
338 u_int32_t);
339 int bnx_init_nvram(struct bnx_softc *);
340 int bnx_nvram_read(struct bnx_softc *, u_int32_t, u_int8_t *, int);
341 int bnx_nvram_test(struct bnx_softc *);
342 #ifdef BNX_NVRAM_WRITE_SUPPORT
343 int bnx_enable_nvram_write(struct bnx_softc *);
344 void bnx_disable_nvram_write(struct bnx_softc *);
345 int bnx_nvram_erase_page(struct bnx_softc *, u_int32_t);
346 int bnx_nvram_write_dword(struct bnx_softc *, u_int32_t, u_int8_t *,
347 u_int32_t);
348 int bnx_nvram_write(struct bnx_softc *, u_int32_t, u_int8_t *, int);
349 #endif
350
351
352
353
354 int bnx_dma_alloc(struct bnx_softc *);
355 void bnx_dma_free(struct bnx_softc *);
356 void bnx_release_resources(struct bnx_softc *);
357
358
359
360
361 int bnx_fw_sync(struct bnx_softc *, u_int32_t);
362 void bnx_load_rv2p_fw(struct bnx_softc *, u_int32_t *, u_int32_t,
363 u_int32_t);
364 void bnx_load_cpu_fw(struct bnx_softc *, struct cpu_reg *,
365 struct fw_info *);
366 void bnx_init_cpus(struct bnx_softc *);
367
368 void bnx_stop(struct bnx_softc *);
369 int bnx_reset(struct bnx_softc *, u_int32_t);
370 int bnx_chipinit(struct bnx_softc *);
371 int bnx_blockinit(struct bnx_softc *);
372 int bnx_get_buf(struct bnx_softc *, struct mbuf *, u_int16_t *,
373 u_int16_t *, u_int32_t *);
374
375 int bnx_init_tx_chain(struct bnx_softc *);
376 int bnx_init_rx_chain(struct bnx_softc *);
377 void bnx_free_rx_chain(struct bnx_softc *);
378 void bnx_free_tx_chain(struct bnx_softc *);
379
380 int bnx_tx_encap(struct bnx_softc *, struct mbuf **);
381 void bnx_start(struct ifnet *);
382 int bnx_ioctl(struct ifnet *, u_long, caddr_t);
383 void bnx_watchdog(struct ifnet *);
384 int bnx_ifmedia_upd(struct ifnet *);
385 void bnx_ifmedia_sts(struct ifnet *, struct ifmediareq *);
386 void bnx_init(void *);
387 void bnx_mgmt_init(struct bnx_softc *sc);
388
389 void bnx_init_context(struct bnx_softc *);
390 void bnx_get_mac_addr(struct bnx_softc *);
391 void bnx_set_mac_addr(struct bnx_softc *);
392 void bnx_phy_intr(struct bnx_softc *);
393 void bnx_rx_intr(struct bnx_softc *);
394 void bnx_tx_intr(struct bnx_softc *);
395 void bnx_disable_intr(struct bnx_softc *);
396 void bnx_enable_intr(struct bnx_softc *);
397
398 int bnx_intr(void *);
399 void bnx_set_rx_mode(struct bnx_softc *);
400 void bnx_stats_update(struct bnx_softc *);
401 void bnx_tick(void *);
402
403
404
405
406 struct cfattach bnx_ca = {
407 sizeof(struct bnx_softc), bnx_probe, bnx_attach
408 };
409
410 struct cfdriver bnx_cd = {
411 0, "bnx", DV_IFNET
412 };
413
414
415
416
417
418
419
420
421
422
423 int
424 bnx_probe(struct device *parent, void *match, void *aux)
425 {
426 return (pci_matchbyid((struct pci_attach_args *)aux, bnx_devices,
427 sizeof(bnx_devices)/sizeof(bnx_devices[0])));
428 }
429
430 void
431 nswaph(u_int32_t *p, int wcount)
432 {
433 for (; wcount; wcount -=4) {
434 *p = ntohl(*p);
435 p++;
436 }
437 }
438
439 int
440 bnx_read_firmware(struct bnx_softc *sc)
441 {
442 static struct bnx_firmware_header *hdr;
443 u_char *p, *q;
444 size_t size;
445 int error;
446
447 if (hdr)
448 return (0);
449
450 if ((error = loadfirmware("bnx", &p, &size)) != 0)
451 return error;
452
453 if (size < sizeof (struct bnx_firmware_header)) {
454 free(p, M_DEVBUF);
455 return EINVAL;
456 }
457
458 hdr = (struct bnx_firmware_header *)p;
459
460 bnx_COM_b06FwReleaseMajor = ntohl(hdr->bnx_COM_b06FwReleaseMajor);
461 bnx_COM_b06FwReleaseMinor = ntohl(hdr->bnx_COM_b06FwReleaseMinor);
462 bnx_COM_b06FwReleaseFix = ntohl(hdr->bnx_COM_b06FwReleaseFix);
463 bnx_COM_b06FwStartAddr = ntohl(hdr->bnx_COM_b06FwStartAddr);
464 bnx_COM_b06FwTextAddr = ntohl(hdr->bnx_COM_b06FwTextAddr);
465 bnx_COM_b06FwTextLen = ntohl(hdr->bnx_COM_b06FwTextLen);
466 bnx_COM_b06FwDataAddr = ntohl(hdr->bnx_COM_b06FwDataAddr);
467 bnx_COM_b06FwDataLen = ntohl(hdr->bnx_COM_b06FwDataLen);
468 bnx_COM_b06FwRodataAddr = ntohl(hdr->bnx_COM_b06FwRodataAddr);
469 bnx_COM_b06FwRodataLen = ntohl(hdr->bnx_COM_b06FwRodataLen);
470 bnx_COM_b06FwBssAddr = ntohl(hdr->bnx_COM_b06FwBssAddr);
471 bnx_COM_b06FwBssLen = ntohl(hdr->bnx_COM_b06FwBssLen);
472 bnx_COM_b06FwSbssAddr = ntohl(hdr->bnx_COM_b06FwSbssAddr);
473 bnx_COM_b06FwSbssLen = ntohl(hdr->bnx_COM_b06FwSbssLen);
474
475 bnx_RXP_b06FwReleaseMajor = ntohl(hdr->bnx_RXP_b06FwReleaseMajor);
476 bnx_RXP_b06FwReleaseMinor = ntohl(hdr->bnx_RXP_b06FwReleaseMinor);
477 bnx_RXP_b06FwReleaseFix = ntohl(hdr->bnx_RXP_b06FwReleaseFix);
478 bnx_RXP_b06FwStartAddr = ntohl(hdr->bnx_RXP_b06FwStartAddr);
479 bnx_RXP_b06FwTextAddr = ntohl(hdr->bnx_RXP_b06FwTextAddr);
480 bnx_RXP_b06FwTextLen = ntohl(hdr->bnx_RXP_b06FwTextLen);
481 bnx_RXP_b06FwDataAddr = ntohl(hdr->bnx_RXP_b06FwDataAddr);
482 bnx_RXP_b06FwDataLen = ntohl(hdr->bnx_RXP_b06FwDataLen);
483 bnx_RXP_b06FwRodataAddr = ntohl(hdr->bnx_RXP_b06FwRodataAddr);
484 bnx_RXP_b06FwRodataLen = ntohl(hdr->bnx_RXP_b06FwRodataLen);
485 bnx_RXP_b06FwBssAddr = ntohl(hdr->bnx_RXP_b06FwBssAddr);
486 bnx_RXP_b06FwBssLen = ntohl(hdr->bnx_RXP_b06FwBssLen);
487 bnx_RXP_b06FwSbssAddr = ntohl(hdr->bnx_RXP_b06FwSbssAddr);
488 bnx_RXP_b06FwSbssLen = ntohl(hdr->bnx_RXP_b06FwSbssLen);
489
490 bnx_TPAT_b06FwReleaseMajor = ntohl(hdr->bnx_TPAT_b06FwReleaseMajor);
491 bnx_TPAT_b06FwReleaseMinor = ntohl(hdr->bnx_TPAT_b06FwReleaseMinor);
492 bnx_TPAT_b06FwReleaseFix = ntohl(hdr->bnx_TPAT_b06FwReleaseFix);
493 bnx_TPAT_b06FwStartAddr = ntohl(hdr->bnx_TPAT_b06FwStartAddr);
494 bnx_TPAT_b06FwTextAddr = ntohl(hdr->bnx_TPAT_b06FwTextAddr);
495 bnx_TPAT_b06FwTextLen = ntohl(hdr->bnx_TPAT_b06FwTextLen);
496 bnx_TPAT_b06FwDataAddr = ntohl(hdr->bnx_TPAT_b06FwDataAddr);
497 bnx_TPAT_b06FwDataLen = ntohl(hdr->bnx_TPAT_b06FwDataLen);
498 bnx_TPAT_b06FwRodataAddr = ntohl(hdr->bnx_TPAT_b06FwRodataAddr);
499 bnx_TPAT_b06FwRodataLen = ntohl(hdr->bnx_TPAT_b06FwRodataLen);
500 bnx_TPAT_b06FwBssAddr = ntohl(hdr->bnx_TPAT_b06FwBssAddr);
501 bnx_TPAT_b06FwBssLen = ntohl(hdr->bnx_TPAT_b06FwBssLen);
502 bnx_TPAT_b06FwSbssAddr = ntohl(hdr->bnx_TPAT_b06FwSbssAddr);
503 bnx_TPAT_b06FwSbssLen = ntohl(hdr->bnx_TPAT_b06FwSbssLen);
504
505 bnx_TXP_b06FwReleaseMajor = ntohl(hdr->bnx_TXP_b06FwReleaseMajor);
506 bnx_TXP_b06FwReleaseMinor = ntohl(hdr->bnx_TXP_b06FwReleaseMinor);
507 bnx_TXP_b06FwReleaseFix = ntohl(hdr->bnx_TXP_b06FwReleaseFix);
508 bnx_TXP_b06FwStartAddr = ntohl(hdr->bnx_TXP_b06FwStartAddr);
509 bnx_TXP_b06FwTextAddr = ntohl(hdr->bnx_TXP_b06FwTextAddr);
510 bnx_TXP_b06FwTextLen = ntohl(hdr->bnx_TXP_b06FwTextLen);
511 bnx_TXP_b06FwDataAddr = ntohl(hdr->bnx_TXP_b06FwDataAddr);
512 bnx_TXP_b06FwDataLen = ntohl(hdr->bnx_TXP_b06FwDataLen);
513 bnx_TXP_b06FwRodataAddr = ntohl(hdr->bnx_TXP_b06FwRodataAddr);
514 bnx_TXP_b06FwRodataLen = ntohl(hdr->bnx_TXP_b06FwRodataLen);
515 bnx_TXP_b06FwBssAddr = ntohl(hdr->bnx_TXP_b06FwBssAddr);
516 bnx_TXP_b06FwBssLen = ntohl(hdr->bnx_TXP_b06FwBssLen);
517 bnx_TXP_b06FwSbssAddr = ntohl(hdr->bnx_TXP_b06FwSbssAddr);
518 bnx_TXP_b06FwSbssLen = ntohl(hdr->bnx_TXP_b06FwSbssLen);
519
520 bnx_rv2p_proc1len = ntohl(hdr->bnx_rv2p_proc1len);
521 bnx_rv2p_proc2len = ntohl(hdr->bnx_rv2p_proc2len);
522
523 q = p + sizeof(*hdr);
524
525 bnx_COM_b06FwText = (u_int32_t *)q;
526 q += bnx_COM_b06FwTextLen;
527 nswaph(bnx_COM_b06FwText, bnx_COM_b06FwTextLen);
528 bnx_COM_b06FwData = (u_int32_t *)q;
529 q += bnx_COM_b06FwDataLen;
530 nswaph(bnx_COM_b06FwData, bnx_COM_b06FwDataLen);
531 bnx_COM_b06FwRodata = (u_int32_t *)q;
532 q += bnx_COM_b06FwRodataLen;
533 nswaph(bnx_COM_b06FwRodata, bnx_COM_b06FwRodataLen);
534 bnx_COM_b06FwBss = (u_int32_t *)q;
535 q += bnx_COM_b06FwBssLen;
536 nswaph(bnx_COM_b06FwBss, bnx_COM_b06FwBssLen);
537 bnx_COM_b06FwSbss = (u_int32_t *)q;
538 q += bnx_COM_b06FwSbssLen;
539 nswaph(bnx_COM_b06FwSbss, bnx_COM_b06FwSbssLen);
540
541 bnx_RXP_b06FwText = (u_int32_t *)q;
542 q += bnx_RXP_b06FwTextLen;
543 nswaph(bnx_RXP_b06FwText, bnx_RXP_b06FwTextLen);
544 bnx_RXP_b06FwData = (u_int32_t *)q;
545 q += bnx_RXP_b06FwDataLen;
546 nswaph(bnx_RXP_b06FwData, bnx_RXP_b06FwDataLen);
547 bnx_RXP_b06FwRodata = (u_int32_t *)q;
548 q += bnx_RXP_b06FwRodataLen;
549 nswaph(bnx_RXP_b06FwRodata, bnx_RXP_b06FwRodataLen);
550 bnx_RXP_b06FwBss = (u_int32_t *)q;
551 q += bnx_RXP_b06FwBssLen;
552 nswaph(bnx_RXP_b06FwBss, bnx_RXP_b06FwBssLen);
553 bnx_RXP_b06FwSbss = (u_int32_t *)q;
554 q += bnx_RXP_b06FwSbssLen;
555 nswaph(bnx_RXP_b06FwSbss, bnx_RXP_b06FwSbssLen);
556
557 bnx_TPAT_b06FwText = (u_int32_t *)q;
558 q += bnx_TPAT_b06FwTextLen;
559 nswaph(bnx_TPAT_b06FwText, bnx_TPAT_b06FwTextLen);
560 bnx_TPAT_b06FwData = (u_int32_t *)q;
561 q += bnx_TPAT_b06FwDataLen;
562 nswaph(bnx_TPAT_b06FwData, bnx_TPAT_b06FwDataLen);
563 bnx_TPAT_b06FwRodata = (u_int32_t *)q;
564 q += bnx_TPAT_b06FwRodataLen;
565 nswaph(bnx_TPAT_b06FwRodata, bnx_TPAT_b06FwRodataLen);
566 bnx_TPAT_b06FwBss = (u_int32_t *)q;
567 q += bnx_TPAT_b06FwBssLen;
568 nswaph(bnx_TPAT_b06FwBss, bnx_TPAT_b06FwBssLen);
569 bnx_TPAT_b06FwSbss = (u_int32_t *)q;
570 q += bnx_TPAT_b06FwSbssLen;
571 nswaph(bnx_TPAT_b06FwSbss, bnx_TPAT_b06FwSbssLen);
572
573 bnx_TXP_b06FwText = (u_int32_t *)q;
574 q += bnx_TXP_b06FwTextLen;
575 nswaph(bnx_TXP_b06FwText, bnx_TXP_b06FwTextLen);
576 bnx_TXP_b06FwData = (u_int32_t *)q;
577 q += bnx_TXP_b06FwDataLen;
578 nswaph(bnx_TXP_b06FwData, bnx_TXP_b06FwDataLen);
579 bnx_TXP_b06FwRodata = (u_int32_t *)q;
580 q += bnx_TXP_b06FwRodataLen;
581 nswaph(bnx_TXP_b06FwRodata, bnx_TXP_b06FwRodataLen);
582 bnx_TXP_b06FwBss = (u_int32_t *)q;
583 q += bnx_TXP_b06FwBssLen;
584 nswaph(bnx_TXP_b06FwBss, bnx_TXP_b06FwBssLen);
585 bnx_TXP_b06FwSbss = (u_int32_t *)q;
586 q += bnx_TXP_b06FwSbssLen;
587 nswaph(bnx_TXP_b06FwSbss, bnx_TXP_b06FwSbssLen);
588
589 bnx_rv2p_proc1 = (u_int32_t *)q;
590 q += bnx_rv2p_proc1len;
591 nswaph(bnx_rv2p_proc1, bnx_rv2p_proc1len);
592 bnx_rv2p_proc2 = (u_int32_t *)q;
593 q += bnx_rv2p_proc2len;
594 nswaph(bnx_rv2p_proc2, bnx_rv2p_proc2len);
595
596 if (q - p != size) {
597 free(p, M_DEVBUF);
598 hdr = NULL;
599 return EINVAL;
600 }
601
602 return (0);
603 }
604
605
606
607
608
609
610
611
612
613
614
615
616 void
617 bnx_attach(struct device *parent, struct device *self, void *aux)
618 {
619 struct bnx_softc *sc = (struct bnx_softc *)self;
620 struct pci_attach_args *pa = aux;
621 pci_chipset_tag_t pc = pa->pa_pc;
622 u_int32_t val;
623 pcireg_t memtype;
624 const char *intrstr = NULL;
625
626 sc->bnx_pa = *pa;
627
628
629
630
631 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BNX_PCI_BAR0);
632 switch (memtype) {
633 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
634 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
635 if (pci_mapreg_map(pa, BNX_PCI_BAR0,
636 memtype, 0, &sc->bnx_btag, &sc->bnx_bhandle,
637 NULL, &sc->bnx_size, 0) == 0)
638 break;
639 default:
640 printf(": can't find mem space\n");
641 return;
642 }
643
644 if (pci_intr_map(pa, &sc->bnx_ih)) {
645 printf(": couldn't map interrupt\n");
646 goto bnx_attach_fail;
647 }
648 intrstr = pci_intr_string(pc, sc->bnx_ih);
649
650
651
652
653
654
655
656 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG,
657 BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
658 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
659
660
661 sc->bnx_chipid = REG_RD(sc, BNX_MISC_ID);
662
663
664 switch(BNX_CHIP_ID(sc)) {
665 case BNX_CHIP_ID_5706_A0:
666 case BNX_CHIP_ID_5706_A1:
667 case BNX_CHIP_ID_5708_A0:
668 case BNX_CHIP_ID_5708_B0:
669 printf(": unsupported controller revision (%c%d)!\n",
670 (((pci_conf_read(pa->pa_pc, pa->pa_tag, 0x08) & 0xf0) >> 4)
671 + 'A'), (pci_conf_read(pa->pa_pc, pa->pa_tag, 0x08) & 0xf));
672 goto bnx_attach_fail;
673 }
674
675 if (BNX_CHIP_BOND_ID(sc) & BNX_CHIP_BOND_ID_SERDES_BIT) {
676 printf(": SerDes controllers are not supported!\n");
677 goto bnx_attach_fail;
678 }
679
680
681
682
683
684
685 val = REG_RD_IND(sc, BNX_SHM_HDR_SIGNATURE);
686 if ((val & BNX_SHM_HDR_SIGNATURE_SIG_MASK) == BNX_SHM_HDR_SIGNATURE_SIG)
687 sc->bnx_shmem_base = REG_RD_IND(sc, BNX_SHM_HDR_ADDR_0);
688 else
689 sc->bnx_shmem_base = HOST_VIEW_SHMEM_BASE;
690
691 DBPRINT(sc, BNX_INFO, "bnx_shmem_base = 0x%08X\n", sc->bnx_shmem_base);
692
693
694 sc->bnx_flags = 0;
695 sc->bnx_phy_flags = 0;
696
697
698 val = REG_RD(sc, BNX_PCICFG_MISC_STATUS);
699 if (val & BNX_PCICFG_MISC_STATUS_PCIX_DET) {
700 u_int32_t clkreg;
701
702 sc->bnx_flags |= BNX_PCIX_FLAG;
703
704 clkreg = REG_RD(sc, BNX_PCICFG_PCI_CLOCK_CONTROL_BITS);
705
706 clkreg &= BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
707 switch (clkreg) {
708 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
709 sc->bus_speed_mhz = 133;
710 break;
711
712 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
713 sc->bus_speed_mhz = 100;
714 break;
715
716 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
717 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
718 sc->bus_speed_mhz = 66;
719 break;
720
721 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
722 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
723 sc->bus_speed_mhz = 50;
724 break;
725
726 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
727 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
728 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
729 sc->bus_speed_mhz = 33;
730 break;
731 }
732 } else if (val & BNX_PCICFG_MISC_STATUS_M66EN)
733 sc->bus_speed_mhz = 66;
734 else
735 sc->bus_speed_mhz = 33;
736
737 if (val & BNX_PCICFG_MISC_STATUS_32BIT_DET)
738 sc->bnx_flags |= BNX_PCI_32BIT_FLAG;
739
740 printf(": %s\n", intrstr);
741
742
743 sc->bnx_intrhand = pci_intr_establish(pc, sc->bnx_ih, IPL_NET,
744 bnx_intr, sc, sc->bnx_dev.dv_xname);
745 if (sc->bnx_intrhand == NULL) {
746 printf("%s: couldn't establish interrupt\n",
747 sc->bnx_dev.dv_xname);
748 goto bnx_attach_fail;
749 }
750
751 mountroothook_establish(bnx_attachhook, sc);
752 return;
753
754 bnx_attach_fail:
755 bnx_release_resources(sc);
756 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
757 }
758
759 void
760 bnx_attachhook(void *xsc)
761 {
762 struct bnx_softc *sc = xsc;
763 struct pci_attach_args *pa = &sc->bnx_pa;
764 struct ifnet *ifp;
765 u_int32_t val;
766 int error;
767
768 if ((error = bnx_read_firmware(sc)) != 0) {
769 printf("%s: could not read firmware (error=%d)\n",
770 sc->bnx_dev.dv_xname, error);
771 return;
772 }
773
774
775 if (bnx_reset(sc, BNX_DRV_MSG_CODE_RESET))
776 goto bnx_attach_fail;
777
778
779 if (bnx_chipinit(sc)) {
780 printf("%s: Controller initialization failed!\n",
781 sc->bnx_dev.dv_xname);
782 goto bnx_attach_fail;
783 }
784
785
786 if (bnx_nvram_test(sc)) {
787 printf("%s: NVRAM test failed!\n",
788 sc->bnx_dev.dv_xname);
789 goto bnx_attach_fail;
790 }
791
792
793 bnx_get_mac_addr(sc);
794
795
796
797
798
799
800
801
802
803
804 #ifdef BNX_DEBUG
805
806 sc->bnx_tx_quick_cons_trip_int = 1;
807 sc->bnx_tx_quick_cons_trip = 1;
808 sc->bnx_tx_ticks_int = 0;
809 sc->bnx_tx_ticks = 0;
810
811 sc->bnx_rx_quick_cons_trip_int = 1;
812 sc->bnx_rx_quick_cons_trip = 1;
813 sc->bnx_rx_ticks_int = 0;
814 sc->bnx_rx_ticks = 0;
815 #else
816 sc->bnx_tx_quick_cons_trip_int = 20;
817 sc->bnx_tx_quick_cons_trip = 20;
818 sc->bnx_tx_ticks_int = 80;
819 sc->bnx_tx_ticks = 80;
820
821 sc->bnx_rx_quick_cons_trip_int = 6;
822 sc->bnx_rx_quick_cons_trip = 6;
823 sc->bnx_rx_ticks_int = 18;
824 sc->bnx_rx_ticks = 18;
825 #endif
826
827
828 sc->bnx_stats_ticks = 1000000 & 0xffff00;
829
830
831
832
833
834
835
836 sc->bnx_phy_addr = 1;
837
838 if (BNX_CHIP_BOND_ID(sc) & BNX_CHIP_BOND_ID_SERDES_BIT) {
839 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG;
840 sc->bnx_flags |= BNX_NO_WOL_FLAG;
841 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5708) {
842 sc->bnx_phy_addr = 2;
843 val = REG_RD_IND(sc, sc->bnx_shmem_base +
844 BNX_SHARED_HW_CFG_CONFIG);
845 if (val & BNX_SHARED_HW_CFG_PHY_2_5G)
846 sc->bnx_phy_flags |= BNX_PHY_2_5G_CAPABLE_FLAG;
847 }
848 }
849
850 if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG) {
851 printf(": SerDes is not supported by this driver!\n");
852 goto bnx_attach_fail;
853 }
854
855
856 sc->bnx_dmatag = pa->pa_dmat;
857 if (bnx_dma_alloc(sc)) {
858 printf("%s: DMA resource allocation failed!\n",
859 sc->bnx_dev.dv_xname);
860 goto bnx_attach_fail;
861 }
862
863
864 ifp = &sc->arpcom.ac_if;
865 ifp->if_softc = sc;
866 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
867 ifp->if_ioctl = bnx_ioctl;
868 ifp->if_start = bnx_start;
869 ifp->if_watchdog = bnx_watchdog;
870 if (sc->bnx_phy_flags & BNX_PHY_2_5G_CAPABLE_FLAG)
871 ifp->if_baudrate = IF_Gbps(2.5);
872 else
873 ifp->if_baudrate = IF_Gbps(1);
874 IFQ_SET_MAXLEN(&ifp->if_snd, USABLE_TX_BD - 1);
875 IFQ_SET_READY(&ifp->if_snd);
876 bcopy(sc->eaddr, sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
877 bcopy(sc->bnx_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
878
879 ifp->if_capabilities = IFCAP_VLAN_MTU;
880
881 #ifdef BNX_CSUM
882 ifp->if_capabilities |= IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
883 #endif
884
885 #if NVLAN > 0
886 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
887 #endif
888
889 sc->mbuf_alloc_size = BNX_MAX_MRU;
890
891 printf("%s: address %s\n", sc->bnx_dev.dv_xname,
892 ether_sprintf(sc->arpcom.ac_enaddr));
893
894 sc->bnx_mii.mii_ifp = ifp;
895 sc->bnx_mii.mii_readreg = bnx_miibus_read_reg;
896 sc->bnx_mii.mii_writereg = bnx_miibus_write_reg;
897 sc->bnx_mii.mii_statchg = bnx_miibus_statchg;
898
899
900 ifmedia_init(&sc->bnx_mii.mii_media, 0, bnx_ifmedia_upd,
901 bnx_ifmedia_sts);
902 mii_attach(&sc->bnx_dev, &sc->bnx_mii, 0xffffffff,
903 MII_PHY_ANY, MII_OFFSET_ANY, 0);
904
905 if (LIST_FIRST(&sc->bnx_mii.mii_phys) == NULL) {
906 printf("%s: no PHY found!\n", sc->bnx_dev.dv_xname);
907 ifmedia_add(&sc->bnx_mii.mii_media,
908 IFM_ETHER|IFM_MANUAL, 0, NULL);
909 ifmedia_set(&sc->bnx_mii.mii_media,
910 IFM_ETHER|IFM_MANUAL);
911 } else {
912 ifmedia_set(&sc->bnx_mii.mii_media,
913 IFM_ETHER|IFM_AUTO);
914 }
915
916
917 if_attach(ifp);
918 ether_ifattach(ifp);
919
920 timeout_set(&sc->bnx_timeout, bnx_tick, sc);
921
922
923 DBRUN(BNX_INFO, bnx_dump_driver_state(sc));
924
925
926 bnx_mgmt_init(sc);
927
928
929 sc->bnx_flags |= BNX_ACTIVE_FLAG;
930
931 goto bnx_attach_exit;
932
933 bnx_attach_fail:
934 bnx_release_resources(sc);
935
936 bnx_attach_exit:
937 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
938 }
939
940
941
942
943
944
945
946
947
948 #if 0
949 void
950 bnx_detach(void *xsc)
951 {
952 struct bnx_softc *sc;
953 struct ifnet *ifp = &sc->arpcom.ac_if;
954
955 sc = device_get_softc(dev);
956
957 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
958
959
960 bnx_stop(sc);
961 bnx_reset(sc, BNX_DRV_MSG_CODE_RESET);
962
963 ether_ifdetach(ifp);
964
965
966 if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG) {
967 ifmedia_removeall(&sc->bnx_ifmedia);
968 } else {
969 bus_generic_detach(dev);
970 device_delete_child(dev, sc->bnx_mii);
971 }
972
973
974 bnx_release_resources(sc);
975
976 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
977
978 return(0);
979 }
980 #endif
981
982
983
984
985
986
987
988
989
990 void
991 bnx_shutdown(void *xsc)
992 {
993 struct bnx_softc *sc = (struct bnx_softc *)xsc;
994
995 bnx_stop(sc);
996 bnx_reset(sc, BNX_DRV_MSG_CODE_RESET);
997 }
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009 u_int32_t
1010 bnx_reg_rd_ind(struct bnx_softc *sc, u_int32_t offset)
1011 {
1012 struct pci_attach_args *pa = &(sc->bnx_pa);
1013
1014 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS,
1015 offset);
1016 #ifdef BNX_DEBUG
1017 {
1018 u_int32_t val;
1019 val = pci_conf_read(pa->pa_pc, pa->pa_tag,
1020 BNX_PCICFG_REG_WINDOW);
1021 DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, "
1022 "val = 0x%08X\n", __FUNCTION__, offset, val);
1023 return (val);
1024 }
1025 #else
1026 return pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW);
1027 #endif
1028 }
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040 void
1041 bnx_reg_wr_ind(struct bnx_softc *sc, u_int32_t offset, u_int32_t val)
1042 {
1043 struct pci_attach_args *pa = &(sc->bnx_pa);
1044
1045 DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
1046 __FUNCTION__, offset, val);
1047
1048 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS,
1049 offset);
1050 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW, val);
1051 }
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062 void
1063 bnx_ctx_wr(struct bnx_softc *sc, u_int32_t cid_addr, u_int32_t offset,
1064 u_int32_t val)
1065 {
1066
1067 DBPRINT(sc, BNX_EXCESSIVE, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
1068 "val = 0x%08X\n", __FUNCTION__, cid_addr, offset, val);
1069
1070 offset += cid_addr;
1071 REG_WR(sc, BNX_CTX_DATA_ADR, offset);
1072 REG_WR(sc, BNX_CTX_DATA, val);
1073 }
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083 int
1084 bnx_miibus_read_reg(struct device *dev, int phy, int reg)
1085 {
1086 struct bnx_softc *sc = (struct bnx_softc *)dev;
1087 u_int32_t val;
1088 int i;
1089
1090
1091 if (phy != sc->bnx_phy_addr) {
1092 DBPRINT(sc, BNX_VERBOSE,
1093 "Invalid PHY address %d for PHY read!\n", phy);
1094 return(0);
1095 }
1096
1097 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1098 val = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1099 val &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL;
1100
1101 REG_WR(sc, BNX_EMAC_MDIO_MODE, val);
1102 REG_RD(sc, BNX_EMAC_MDIO_MODE);
1103
1104 DELAY(40);
1105 }
1106
1107 val = BNX_MIPHY(phy) | BNX_MIREG(reg) |
1108 BNX_EMAC_MDIO_COMM_COMMAND_READ | BNX_EMAC_MDIO_COMM_DISEXT |
1109 BNX_EMAC_MDIO_COMM_START_BUSY;
1110 REG_WR(sc, BNX_EMAC_MDIO_COMM, val);
1111
1112 for (i = 0; i < BNX_PHY_TIMEOUT; i++) {
1113 DELAY(10);
1114
1115 val = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1116 if (!(val & BNX_EMAC_MDIO_COMM_START_BUSY)) {
1117 DELAY(5);
1118
1119 val = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1120 val &= BNX_EMAC_MDIO_COMM_DATA;
1121
1122 break;
1123 }
1124 }
1125
1126 if (val & BNX_EMAC_MDIO_COMM_START_BUSY) {
1127 BNX_PRINTF(sc, "%s(%d): Error: PHY read timeout! phy = %d, "
1128 "reg = 0x%04X\n", __FILE__, __LINE__, phy, reg);
1129 val = 0x0;
1130 } else
1131 val = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1132
1133 DBPRINT(sc, BNX_EXCESSIVE,
1134 "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n", __FUNCTION__, phy,
1135 (u_int16_t) reg & 0xffff, (u_int16_t) val & 0xffff);
1136
1137 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1138 val = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1139 val |= BNX_EMAC_MDIO_MODE_AUTO_POLL;
1140
1141 REG_WR(sc, BNX_EMAC_MDIO_MODE, val);
1142 REG_RD(sc, BNX_EMAC_MDIO_MODE);
1143
1144 DELAY(40);
1145 }
1146
1147 return (val & 0xffff);
1148 }
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158 void
1159 bnx_miibus_write_reg(struct device *dev, int phy, int reg, int val)
1160 {
1161 struct bnx_softc *sc = (struct bnx_softc *)dev;
1162 u_int32_t val1;
1163 int i;
1164
1165
1166 if (phy != sc->bnx_phy_addr) {
1167 DBPRINT(sc, BNX_WARN, "Invalid PHY address %d for PHY write!\n",
1168 phy);
1169 return;
1170 }
1171
1172 DBPRINT(sc, BNX_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, "
1173 "val = 0x%04X\n", __FUNCTION__,
1174 phy, (u_int16_t) reg & 0xffff, (u_int16_t) val & 0xffff);
1175
1176 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1177 val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1178 val1 &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL;
1179
1180 REG_WR(sc, BNX_EMAC_MDIO_MODE, val1);
1181 REG_RD(sc, BNX_EMAC_MDIO_MODE);
1182
1183 DELAY(40);
1184 }
1185
1186 val1 = BNX_MIPHY(phy) | BNX_MIREG(reg) | val |
1187 BNX_EMAC_MDIO_COMM_COMMAND_WRITE |
1188 BNX_EMAC_MDIO_COMM_START_BUSY | BNX_EMAC_MDIO_COMM_DISEXT;
1189 REG_WR(sc, BNX_EMAC_MDIO_COMM, val1);
1190
1191 for (i = 0; i < BNX_PHY_TIMEOUT; i++) {
1192 DELAY(10);
1193
1194 val1 = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1195 if (!(val1 & BNX_EMAC_MDIO_COMM_START_BUSY)) {
1196 DELAY(5);
1197 break;
1198 }
1199 }
1200
1201 if (val1 & BNX_EMAC_MDIO_COMM_START_BUSY) {
1202 BNX_PRINTF(sc, "%s(%d): PHY write timeout!\n", __FILE__,
1203 __LINE__);
1204 }
1205
1206 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1207 val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1208 val1 |= BNX_EMAC_MDIO_MODE_AUTO_POLL;
1209
1210 REG_WR(sc, BNX_EMAC_MDIO_MODE, val1);
1211 REG_RD(sc, BNX_EMAC_MDIO_MODE);
1212
1213 DELAY(40);
1214 }
1215 }
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226 void
1227 bnx_miibus_statchg(struct device *dev)
1228 {
1229 struct bnx_softc *sc = (struct bnx_softc *)dev;
1230 struct mii_data *mii = &sc->bnx_mii;
1231
1232 BNX_CLRBIT(sc, BNX_EMAC_MODE, BNX_EMAC_MODE_PORT);
1233
1234
1235 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
1236 DBPRINT(sc, BNX_INFO, "Setting GMII interface.\n");
1237 BNX_SETBIT(sc, BNX_EMAC_MODE, BNX_EMAC_MODE_PORT_GMII);
1238 } else {
1239 DBPRINT(sc, BNX_INFO, "Setting MII interface.\n");
1240 BNX_SETBIT(sc, BNX_EMAC_MODE, BNX_EMAC_MODE_PORT_MII);
1241 }
1242
1243
1244
1245
1246 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1247 DBPRINT(sc, BNX_INFO, "Setting Full-Duplex interface.\n");
1248 BNX_CLRBIT(sc, BNX_EMAC_MODE, BNX_EMAC_MODE_HALF_DUPLEX);
1249 } else {
1250 DBPRINT(sc, BNX_INFO, "Setting Half-Duplex interface.\n");
1251 BNX_SETBIT(sc, BNX_EMAC_MODE, BNX_EMAC_MODE_HALF_DUPLEX);
1252 }
1253 }
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265 int
1266 bnx_acquire_nvram_lock(struct bnx_softc *sc)
1267 {
1268 u_int32_t val;
1269 int j;
1270
1271 DBPRINT(sc, BNX_VERBOSE, "Acquiring NVRAM lock.\n");
1272
1273
1274 REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_SET2);
1275 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1276 val = REG_RD(sc, BNX_NVM_SW_ARB);
1277 if (val & BNX_NVM_SW_ARB_ARB_ARB2)
1278 break;
1279
1280 DELAY(5);
1281 }
1282
1283 if (j >= NVRAM_TIMEOUT_COUNT) {
1284 DBPRINT(sc, BNX_WARN, "Timeout acquiring NVRAM lock!\n");
1285 return (EBUSY);
1286 }
1287
1288 return (0);
1289 }
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301 int
1302 bnx_release_nvram_lock(struct bnx_softc *sc)
1303 {
1304 int j;
1305 u_int32_t val;
1306
1307 DBPRINT(sc, BNX_VERBOSE, "Releasing NVRAM lock.\n");
1308
1309
1310 REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_CLR2);
1311
1312 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1313 val = REG_RD(sc, BNX_NVM_SW_ARB);
1314 if (!(val & BNX_NVM_SW_ARB_ARB_ARB2))
1315 break;
1316
1317 DELAY(5);
1318 }
1319
1320 if (j >= NVRAM_TIMEOUT_COUNT) {
1321 DBPRINT(sc, BNX_WARN, "Timeout reeasing NVRAM lock!\n");
1322 return (EBUSY);
1323 }
1324
1325 return (0);
1326 }
1327
1328 #ifdef BNX_NVRAM_WRITE_SUPPORT
1329
1330
1331
1332
1333
1334
1335
1336
1337 int
1338 bnx_enable_nvram_write(struct bnx_softc *sc)
1339 {
1340 u_int32_t val;
1341
1342 DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM write.\n");
1343
1344 val = REG_RD(sc, BNX_MISC_CFG);
1345 REG_WR(sc, BNX_MISC_CFG, val | BNX_MISC_CFG_NVM_WR_EN_PCI);
1346
1347 if (!sc->bnx_flash_info->buffered) {
1348 int j;
1349
1350 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1351 REG_WR(sc, BNX_NVM_COMMAND,
1352 BNX_NVM_COMMAND_WREN | BNX_NVM_COMMAND_DOIT);
1353
1354 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1355 DELAY(5);
1356
1357 val = REG_RD(sc, BNX_NVM_COMMAND);
1358 if (val & BNX_NVM_COMMAND_DONE)
1359 break;
1360 }
1361
1362 if (j >= NVRAM_TIMEOUT_COUNT) {
1363 DBPRINT(sc, BNX_WARN, "Timeout writing NVRAM!\n");
1364 return (EBUSY);
1365 }
1366 }
1367
1368 return (0);
1369 }
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380 void
1381 bnx_disable_nvram_write(struct bnx_softc *sc)
1382 {
1383 u_int32_t val;
1384
1385 DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM write.\n");
1386
1387 val = REG_RD(sc, BNX_MISC_CFG);
1388 REG_WR(sc, BNX_MISC_CFG, val & ~BNX_MISC_CFG_NVM_WR_EN);
1389 }
1390 #endif
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401 void
1402 bnx_enable_nvram_access(struct bnx_softc *sc)
1403 {
1404 u_int32_t val;
1405
1406 DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM access.\n");
1407
1408 val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE);
1409
1410 REG_WR(sc, BNX_NVM_ACCESS_ENABLE,
1411 val | BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN);
1412 }
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422 void
1423 bnx_disable_nvram_access(struct bnx_softc *sc)
1424 {
1425 u_int32_t val;
1426
1427 DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM access.\n");
1428
1429 val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE);
1430
1431
1432 REG_WR(sc, BNX_NVM_ACCESS_ENABLE,
1433 val & ~(BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN));
1434 }
1435
1436 #ifdef BNX_NVRAM_WRITE_SUPPORT
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446 int
1447 bnx_nvram_erase_page(struct bnx_softc *sc, u_int32_t offset)
1448 {
1449 u_int32_t cmd;
1450 int j;
1451
1452
1453 if (sc->bnx_flash_info->buffered)
1454 return (0);
1455
1456 DBPRINT(sc, BNX_VERBOSE, "Erasing NVRAM page.\n");
1457
1458
1459 cmd = BNX_NVM_COMMAND_ERASE | BNX_NVM_COMMAND_WR |
1460 BNX_NVM_COMMAND_DOIT;
1461
1462
1463
1464
1465
1466 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1467 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1468 REG_WR(sc, BNX_NVM_COMMAND, cmd);
1469
1470
1471 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1472 u_int32_t val;
1473
1474 DELAY(5);
1475
1476 val = REG_RD(sc, BNX_NVM_COMMAND);
1477 if (val & BNX_NVM_COMMAND_DONE)
1478 break;
1479 }
1480
1481 if (j >= NVRAM_TIMEOUT_COUNT) {
1482 DBPRINT(sc, BNX_WARN, "Timeout erasing NVRAM.\n");
1483 return (EBUSY);
1484 }
1485
1486 return (0);
1487 }
1488 #endif
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499 int
1500 bnx_nvram_read_dword(struct bnx_softc *sc, u_int32_t offset,
1501 u_int8_t *ret_val, u_int32_t cmd_flags)
1502 {
1503 u_int32_t cmd;
1504 int i, rc = 0;
1505
1506
1507 cmd = BNX_NVM_COMMAND_DOIT | cmd_flags;
1508
1509
1510 if (sc->bnx_flash_info->buffered)
1511 offset = ((offset / sc->bnx_flash_info->page_size) <<
1512 sc->bnx_flash_info->page_bits) +
1513 (offset % sc->bnx_flash_info->page_size);
1514
1515
1516
1517
1518
1519 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1520 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1521 REG_WR(sc, BNX_NVM_COMMAND, cmd);
1522
1523
1524 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1525 u_int32_t val;
1526
1527 DELAY(5);
1528
1529 val = REG_RD(sc, BNX_NVM_COMMAND);
1530 if (val & BNX_NVM_COMMAND_DONE) {
1531 val = REG_RD(sc, BNX_NVM_READ);
1532
1533 val = bnx_be32toh(val);
1534 memcpy(ret_val, &val, 4);
1535 break;
1536 }
1537 }
1538
1539
1540 if (i >= NVRAM_TIMEOUT_COUNT) {
1541 BNX_PRINTF(sc, "%s(%d): Timeout error reading NVRAM at "
1542 "offset 0x%08X!\n", __FILE__, __LINE__, offset);
1543 rc = EBUSY;
1544 }
1545
1546 return(rc);
1547 }
1548
1549 #ifdef BNX_NVRAM_WRITE_SUPPORT
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560 int
1561 bnx_nvram_write_dword(struct bnx_softc *sc, u_int32_t offset, u_int8_t *val,
1562 u_int32_t cmd_flags)
1563 {
1564 u_int32_t cmd, val32;
1565 int j;
1566
1567
1568 cmd = BNX_NVM_COMMAND_DOIT | BNX_NVM_COMMAND_WR | cmd_flags;
1569
1570
1571 if (sc->bnx_flash_info->buffered)
1572 offset = ((offset / sc->bnx_flash_info->page_size) <<
1573 sc->bnx_flash_info->page_bits) +
1574 (offset % sc->bnx_flash_info->page_size);
1575
1576
1577
1578
1579
1580 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1581 memcpy(&val32, val, 4);
1582 val32 = htobe32(val32);
1583 REG_WR(sc, BNX_NVM_WRITE, val32);
1584 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1585 REG_WR(sc, BNX_NVM_COMMAND, cmd);
1586
1587
1588 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1589 DELAY(5);
1590
1591 if (REG_RD(sc, BNX_NVM_COMMAND) & BNX_NVM_COMMAND_DONE)
1592 break;
1593 }
1594 if (j >= NVRAM_TIMEOUT_COUNT) {
1595 BNX_PRINTF(sc, "%s(%d): Timeout error writing NVRAM at "
1596 "offset 0x%08X\n", __FILE__, __LINE__, offset);
1597 return (EBUSY);
1598 }
1599
1600 return (0);
1601 }
1602 #endif
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613 int
1614 bnx_init_nvram(struct bnx_softc *sc)
1615 {
1616 u_int32_t val;
1617 int j, entry_count, rc;
1618 struct flash_spec *flash;
1619
1620 DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
1621
1622
1623 val = REG_RD(sc, BNX_NVM_CFG1);
1624
1625 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1626
1627 rc = 0;
1628
1629
1630
1631
1632
1633
1634
1635
1636 if (val & 0x40000000) {
1637
1638
1639 DBPRINT(sc,BNX_INFO_LOAD,
1640 "bnx_init_nvram(): Flash WAS reconfigured.\n");
1641
1642 for (j = 0, flash = &flash_table[0]; j < entry_count;
1643 j++, flash++) {
1644 if ((val & FLASH_BACKUP_STRAP_MASK) ==
1645 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1646 sc->bnx_flash_info = flash;
1647 break;
1648 }
1649 }
1650 } else {
1651
1652 u_int32_t mask;
1653
1654 DBPRINT(sc,BNX_INFO_LOAD,
1655 "bnx_init_nvram(): Flash was NOT reconfigured.\n");
1656
1657 if (val & (1 << 23))
1658 mask = FLASH_BACKUP_STRAP_MASK;
1659 else
1660 mask = FLASH_STRAP_MASK;
1661
1662
1663 for (j = 0, flash = &flash_table[0]; j < entry_count;
1664 j++, flash++) {
1665
1666 if ((val & mask) == (flash->strapping & mask)) {
1667
1668 sc->bnx_flash_info = flash;
1669
1670
1671 if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1672 return (rc);
1673
1674
1675 bnx_enable_nvram_access(sc);
1676 REG_WR(sc, BNX_NVM_CFG1, flash->config1);
1677 REG_WR(sc, BNX_NVM_CFG2, flash->config2);
1678 REG_WR(sc, BNX_NVM_CFG3, flash->config3);
1679 REG_WR(sc, BNX_NVM_WRITE1, flash->write1);
1680 bnx_disable_nvram_access(sc);
1681 bnx_release_nvram_lock(sc);
1682
1683 break;
1684 }
1685 }
1686 }
1687
1688
1689 if (j == entry_count) {
1690 sc->bnx_flash_info = NULL;
1691 BNX_PRINTF(sc, "%s(%d): Unknown Flash NVRAM found!\n",
1692 __FILE__, __LINE__);
1693 rc = ENODEV;
1694 }
1695
1696
1697 val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_SHARED_HW_CFG_CONFIG2);
1698 val &= BNX_SHARED_HW_CFG2_NVM_SIZE_MASK;
1699 if (val)
1700 sc->bnx_flash_size = val;
1701 else
1702 sc->bnx_flash_size = sc->bnx_flash_info->total_size;
1703
1704 DBPRINT(sc, BNX_INFO_LOAD, "bnx_init_nvram() flash->total_size = "
1705 "0x%08X\n", sc->bnx_flash_info->total_size);
1706
1707 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
1708
1709 return (rc);
1710 }
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721 int
1722 bnx_nvram_read(struct bnx_softc *sc, u_int32_t offset, u_int8_t *ret_buf,
1723 int buf_size)
1724 {
1725 int rc = 0;
1726 u_int32_t cmd_flags, offset32, len32, extra;
1727
1728 if (buf_size == 0)
1729 return (0);
1730
1731
1732 if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1733 return (rc);
1734
1735
1736 bnx_enable_nvram_access(sc);
1737
1738 len32 = buf_size;
1739 offset32 = offset;
1740 extra = 0;
1741
1742 cmd_flags = 0;
1743
1744 if (offset32 & 3) {
1745 u_int8_t buf[4];
1746 u_int32_t pre_len;
1747
1748 offset32 &= ~3;
1749 pre_len = 4 - (offset & 3);
1750
1751 if (pre_len >= len32) {
1752 pre_len = len32;
1753 cmd_flags =
1754 BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST;
1755 } else
1756 cmd_flags = BNX_NVM_COMMAND_FIRST;
1757
1758 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1759
1760 if (rc)
1761 return (rc);
1762
1763 memcpy(ret_buf, buf + (offset & 3), pre_len);
1764
1765 offset32 += 4;
1766 ret_buf += pre_len;
1767 len32 -= pre_len;
1768 }
1769
1770 if (len32 & 3) {
1771 extra = 4 - (len32 & 3);
1772 len32 = (len32 + 4) & ~3;
1773 }
1774
1775 if (len32 == 4) {
1776 u_int8_t buf[4];
1777
1778 if (cmd_flags)
1779 cmd_flags = BNX_NVM_COMMAND_LAST;
1780 else
1781 cmd_flags =
1782 BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST;
1783
1784 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1785
1786 memcpy(ret_buf, buf, 4 - extra);
1787 } else if (len32 > 0) {
1788 u_int8_t buf[4];
1789
1790
1791 if (cmd_flags)
1792 cmd_flags = 0;
1793 else
1794 cmd_flags = BNX_NVM_COMMAND_FIRST;
1795
1796 rc = bnx_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1797
1798
1799 offset32 += 4;
1800 ret_buf += 4;
1801 len32 -= 4;
1802
1803 while (len32 > 4 && rc == 0) {
1804 rc = bnx_nvram_read_dword(sc, offset32, ret_buf, 0);
1805
1806
1807 offset32 += 4;
1808 ret_buf += 4;
1809 len32 -= 4;
1810 }
1811
1812 if (rc)
1813 return (rc);
1814
1815 cmd_flags = BNX_NVM_COMMAND_LAST;
1816 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1817
1818 memcpy(ret_buf, buf, 4 - extra);
1819 }
1820
1821
1822 bnx_disable_nvram_access(sc);
1823 bnx_release_nvram_lock(sc);
1824
1825 return (rc);
1826 }
1827
1828 #ifdef BNX_NVRAM_WRITE_SUPPORT
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839 int
1840 bnx_nvram_write(struct bnx_softc *sc, u_int32_t offset, u_int8_t *data_buf,
1841 int buf_size)
1842 {
1843 u_int32_t written, offset32, len32;
1844 u_int8_t *buf, start[4], end[4];
1845 int rc = 0;
1846 int align_start, align_end;
1847
1848 buf = data_buf;
1849 offset32 = offset;
1850 len32 = buf_size;
1851 align_start = align_end = 0;
1852
1853 if ((align_start = (offset32 & 3))) {
1854 offset32 &= ~3;
1855 len32 += align_start;
1856 if ((rc = bnx_nvram_read(sc, offset32, start, 4)))
1857 return (rc);
1858 }
1859
1860 if (len32 & 3) {
1861 if ((len32 > 4) || !align_start) {
1862 align_end = 4 - (len32 & 3);
1863 len32 += align_end;
1864 if ((rc = bnx_nvram_read(sc, offset32 + len32 - 4,
1865 end, 4))) {
1866 return (rc);
1867 }
1868 }
1869 }
1870
1871 if (align_start || align_end) {
1872 buf = malloc(len32, M_DEVBUF, M_NOWAIT);
1873 if (buf == 0)
1874 return (ENOMEM);
1875
1876 if (align_start)
1877 memcpy(buf, start, 4);
1878
1879 if (align_end)
1880 memcpy(buf + len32 - 4, end, 4);
1881
1882 memcpy(buf + align_start, data_buf, buf_size);
1883 }
1884
1885 written = 0;
1886 while ((written < len32) && (rc == 0)) {
1887 u_int32_t page_start, page_end, data_start, data_end;
1888 u_int32_t addr, cmd_flags;
1889 int i;
1890 u_int8_t flash_buffer[264];
1891
1892
1893 page_start = offset32 + written;
1894 page_start -= (page_start % sc->bnx_flash_info->page_size);
1895
1896 page_end = page_start + sc->bnx_flash_info->page_size;
1897
1898 data_start = (written == 0) ? offset32 : page_start;
1899
1900 data_end = (page_end > offset32 + len32) ?
1901 (offset32 + len32) : page_end;
1902
1903
1904 if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1905 goto nvram_write_end;
1906
1907
1908 bnx_enable_nvram_access(sc);
1909
1910 cmd_flags = BNX_NVM_COMMAND_FIRST;
1911 if (sc->bnx_flash_info->buffered == 0) {
1912 int j;
1913
1914
1915
1916 for (j = 0; j < sc->bnx_flash_info->page_size; j += 4) {
1917 if (j == (sc->bnx_flash_info->page_size - 4))
1918 cmd_flags |= BNX_NVM_COMMAND_LAST;
1919
1920 rc = bnx_nvram_read_dword(sc,
1921 page_start + j,
1922 &flash_buffer[j],
1923 cmd_flags);
1924
1925 if (rc)
1926 goto nvram_write_end;
1927
1928 cmd_flags = 0;
1929 }
1930 }
1931
1932
1933 if ((rc = bnx_enable_nvram_write(sc)) != 0)
1934 goto nvram_write_end;
1935
1936
1937 if ((rc = bnx_nvram_erase_page(sc, page_start)) != 0)
1938 goto nvram_write_end;
1939
1940
1941 bnx_enable_nvram_write(sc);
1942
1943
1944
1945 i = 0;
1946 if (sc->bnx_flash_info->buffered == 0) {
1947 for (addr = page_start; addr < data_start;
1948 addr += 4, i += 4) {
1949
1950 rc = bnx_nvram_write_dword(sc, addr,
1951 &flash_buffer[i], cmd_flags);
1952
1953 if (rc != 0)
1954 goto nvram_write_end;
1955
1956 cmd_flags = 0;
1957 }
1958 }
1959
1960
1961 for (addr = data_start; addr < data_end; addr += 4, i++) {
1962 if ((addr == page_end - 4) ||
1963 ((sc->bnx_flash_info->buffered) &&
1964 (addr == data_end - 4))) {
1965
1966 cmd_flags |= BNX_NVM_COMMAND_LAST;
1967 }
1968
1969 rc = bnx_nvram_write_dword(sc, addr, buf, cmd_flags);
1970
1971 if (rc != 0)
1972 goto nvram_write_end;
1973
1974 cmd_flags = 0;
1975 buf += 4;
1976 }
1977
1978
1979
1980 if (sc->bnx_flash_info->buffered == 0) {
1981 for (addr = data_end; addr < page_end;
1982 addr += 4, i += 4) {
1983
1984 if (addr == page_end-4)
1985 cmd_flags = BNX_NVM_COMMAND_LAST;
1986
1987 rc = bnx_nvram_write_dword(sc, addr,
1988 &flash_buffer[i], cmd_flags);
1989
1990 if (rc != 0)
1991 goto nvram_write_end;
1992
1993 cmd_flags = 0;
1994 }
1995 }
1996
1997
1998 bnx_disable_nvram_write(sc);
1999
2000
2001 bnx_disable_nvram_access(sc);
2002 bnx_release_nvram_lock(sc);
2003
2004
2005 written += data_end - data_start;
2006 }
2007
2008 nvram_write_end:
2009 if (align_start || align_end)
2010 free(buf, M_DEVBUF);
2011
2012 return (rc);
2013 }
2014 #endif
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025 int
2026 bnx_nvram_test(struct bnx_softc *sc)
2027 {
2028 u_int32_t buf[BNX_NVRAM_SIZE / 4];
2029 u_int8_t *data = (u_int8_t *) buf;
2030 int rc = 0;
2031 u_int32_t magic, csum;
2032
2033
2034
2035
2036
2037 if ((rc = bnx_nvram_read(sc, 0, data, 4)) != 0)
2038 goto bnx_nvram_test_done;
2039
2040 magic = bnx_be32toh(buf[0]);
2041 if (magic != BNX_NVRAM_MAGIC) {
2042 rc = ENODEV;
2043 BNX_PRINTF(sc, "%s(%d): Invalid NVRAM magic value! "
2044 "Expected: 0x%08X, Found: 0x%08X\n",
2045 __FILE__, __LINE__, BNX_NVRAM_MAGIC, magic);
2046 goto bnx_nvram_test_done;
2047 }
2048
2049
2050
2051
2052
2053 if ((rc = bnx_nvram_read(sc, 0x100, data, BNX_NVRAM_SIZE)) != 0)
2054 goto bnx_nvram_test_done;
2055
2056 csum = ether_crc32_le(data, 0x100);
2057 if (csum != BNX_CRC32_RESIDUAL) {
2058 rc = ENODEV;
2059 BNX_PRINTF(sc, "%s(%d): Invalid Manufacturing Information "
2060 "NVRAM CRC! Expected: 0x%08X, Found: 0x%08X\n",
2061 __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum);
2062 goto bnx_nvram_test_done;
2063 }
2064
2065 csum = ether_crc32_le(data + 0x100, 0x100);
2066 if (csum != BNX_CRC32_RESIDUAL) {
2067 BNX_PRINTF(sc, "%s(%d): Invalid Feature Configuration "
2068 "Information NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
2069 __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum);
2070 rc = ENODEV;
2071 }
2072
2073 bnx_nvram_test_done:
2074 return (rc);
2075 }
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086 void
2087 bnx_dma_free(struct bnx_softc *sc)
2088 {
2089 int i;
2090
2091 DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2092
2093
2094 if (sc->status_block != NULL && sc->status_map != NULL) {
2095 bus_dmamap_unload(sc->bnx_dmatag, sc->status_map);
2096 bus_dmamem_unmap(sc->bnx_dmatag, (caddr_t)sc->status_block,
2097 BNX_STATUS_BLK_SZ);
2098 bus_dmamem_free(sc->bnx_dmatag, &sc->status_seg,
2099 sc->status_rseg);
2100 bus_dmamap_destroy(sc->bnx_dmatag, sc->status_map);
2101 sc->status_block = NULL;
2102 sc->status_map = NULL;
2103 }
2104
2105
2106 if (sc->stats_block != NULL && sc->stats_map != NULL) {
2107 bus_dmamap_unload(sc->bnx_dmatag, sc->stats_map);
2108 bus_dmamem_unmap(sc->bnx_dmatag, (caddr_t)sc->stats_block,
2109 BNX_STATS_BLK_SZ);
2110 bus_dmamem_free(sc->bnx_dmatag, &sc->stats_seg,
2111 sc->stats_rseg);
2112 bus_dmamap_destroy(sc->bnx_dmatag, sc->stats_map);
2113 sc->stats_block = NULL;
2114 sc->stats_map = NULL;
2115 }
2116
2117
2118 for (i = 0; i < TX_PAGES; i++ ) {
2119 if (sc->tx_bd_chain[i] != NULL &&
2120 sc->tx_bd_chain_map[i] != NULL) {
2121 bus_dmamap_unload(sc->bnx_dmatag,
2122 sc->tx_bd_chain_map[i]);
2123 bus_dmamem_unmap(sc->bnx_dmatag,
2124 (caddr_t)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ);
2125 bus_dmamem_free(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i],
2126 sc->tx_bd_chain_rseg[i]);
2127 bus_dmamap_destroy(sc->bnx_dmatag,
2128 sc->tx_bd_chain_map[i]);
2129 sc->tx_bd_chain[i] = NULL;
2130 sc->tx_bd_chain_map[i] = NULL;
2131 }
2132 }
2133
2134
2135 for (i = 0; i < TOTAL_TX_BD; i++) {
2136 if (sc->tx_mbuf_map[i] != NULL) {
2137 bus_dmamap_unload(sc->bnx_dmatag, sc->tx_mbuf_map[i]);
2138 bus_dmamap_destroy(sc->bnx_dmatag, sc->tx_mbuf_map[i]);
2139 }
2140 }
2141
2142
2143 for (i = 0; i < RX_PAGES; i++ ) {
2144 if (sc->rx_bd_chain[i] != NULL &&
2145 sc->rx_bd_chain_map[i] != NULL) {
2146 bus_dmamap_unload(sc->bnx_dmatag,
2147 sc->rx_bd_chain_map[i]);
2148 bus_dmamem_unmap(sc->bnx_dmatag,
2149 (caddr_t)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ);
2150 bus_dmamem_free(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i],
2151 sc->rx_bd_chain_rseg[i]);
2152
2153 bus_dmamap_destroy(sc->bnx_dmatag,
2154 sc->rx_bd_chain_map[i]);
2155 sc->rx_bd_chain[i] = NULL;
2156 sc->rx_bd_chain_map[i] = NULL;
2157 }
2158 }
2159
2160
2161 for (i = 0; i < TOTAL_RX_BD; i++) {
2162 if (sc->rx_mbuf_map[i] != NULL) {
2163 bus_dmamap_unload(sc->bnx_dmatag, sc->rx_mbuf_map[i]);
2164 bus_dmamap_destroy(sc->bnx_dmatag, sc->rx_mbuf_map[i]);
2165 }
2166 }
2167
2168 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2169 }
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180 int
2181 bnx_dma_alloc(struct bnx_softc *sc)
2182 {
2183 int i, rc = 0;
2184
2185 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2186
2187
2188
2189
2190
2191 if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATUS_BLK_SZ, 1,
2192 BNX_STATUS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->status_map)) {
2193 printf(": Could not create status block DMA map!\n");
2194 rc = ENOMEM;
2195 goto bnx_dma_alloc_exit;
2196 }
2197
2198 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATUS_BLK_SZ,
2199 BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->status_seg, 1,
2200 &sc->status_rseg, BUS_DMA_NOWAIT)) {
2201 printf(": Could not allocate status block DMA memory!\n");
2202 rc = ENOMEM;
2203 goto bnx_dma_alloc_exit;
2204 }
2205
2206 if (bus_dmamem_map(sc->bnx_dmatag, &sc->status_seg, sc->status_rseg,
2207 BNX_STATUS_BLK_SZ, (caddr_t *)&sc->status_block, BUS_DMA_NOWAIT)) {
2208 printf(": Could not map status block DMA memory!\n");
2209 rc = ENOMEM;
2210 goto bnx_dma_alloc_exit;
2211 }
2212
2213 if (bus_dmamap_load(sc->bnx_dmatag, sc->status_map,
2214 sc->status_block, BNX_STATUS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) {
2215 printf(": Could not load status block DMA memory!\n");
2216 rc = ENOMEM;
2217 goto bnx_dma_alloc_exit;
2218 }
2219
2220 sc->status_block_paddr = sc->status_map->dm_segs[0].ds_addr;
2221 bzero(sc->status_block, BNX_STATUS_BLK_SZ);
2222
2223
2224 DBPRINT(sc, BNX_INFO, "status_block_paddr = 0x%08X\n",
2225 (u_int32_t) sc->status_block_paddr);
2226
2227
2228
2229
2230
2231 if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATS_BLK_SZ, 1,
2232 BNX_STATS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->stats_map)) {
2233 printf(": Could not create stats block DMA map!\n");
2234 rc = ENOMEM;
2235 goto bnx_dma_alloc_exit;
2236 }
2237
2238 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATS_BLK_SZ,
2239 BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->stats_seg, 1,
2240 &sc->stats_rseg, BUS_DMA_NOWAIT)) {
2241 printf(": Could not allocate stats block DMA memory!\n");
2242 rc = ENOMEM;
2243 goto bnx_dma_alloc_exit;
2244 }
2245
2246 if (bus_dmamem_map(sc->bnx_dmatag, &sc->stats_seg, sc->stats_rseg,
2247 BNX_STATS_BLK_SZ, (caddr_t *)&sc->stats_block, BUS_DMA_NOWAIT)) {
2248 printf(": Could not map stats block DMA memory!\n");
2249 rc = ENOMEM;
2250 goto bnx_dma_alloc_exit;
2251 }
2252
2253 if (bus_dmamap_load(sc->bnx_dmatag, sc->stats_map,
2254 sc->stats_block, BNX_STATS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) {
2255 printf(": Could not load status block DMA memory!\n");
2256 rc = ENOMEM;
2257 goto bnx_dma_alloc_exit;
2258 }
2259
2260 sc->stats_block_paddr = sc->stats_map->dm_segs[0].ds_addr;
2261 bzero(sc->stats_block, BNX_STATS_BLK_SZ);
2262
2263
2264 DBPRINT(sc,BNX_INFO, "stats_block_paddr = 0x%08X\n",
2265 (u_int32_t) sc->stats_block_paddr);
2266
2267
2268
2269
2270
2271 for (i = 0; i < TX_PAGES; i++) {
2272 if (bus_dmamap_create(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ, 1,
2273 BNX_TX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT,
2274 &sc->tx_bd_chain_map[i])) {
2275 printf(": Could not create Tx desc %d DMA map!\n", i);
2276 rc = ENOMEM;
2277 goto bnx_dma_alloc_exit;
2278 }
2279
2280 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ,
2281 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->tx_bd_chain_seg[i], 1,
2282 &sc->tx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) {
2283 printf(": Could not allocate TX desc %d DMA memory!\n",
2284 i);
2285 rc = ENOMEM;
2286 goto bnx_dma_alloc_exit;
2287 }
2288
2289 if (bus_dmamem_map(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i],
2290 sc->tx_bd_chain_rseg[i], BNX_TX_CHAIN_PAGE_SZ,
2291 (caddr_t *)&sc->tx_bd_chain[i], BUS_DMA_NOWAIT)) {
2292 printf(": Could not map TX desc %d DMA memory!\n", i);
2293 rc = ENOMEM;
2294 goto bnx_dma_alloc_exit;
2295 }
2296
2297 if (bus_dmamap_load(sc->bnx_dmatag, sc->tx_bd_chain_map[i],
2298 (caddr_t)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ, NULL,
2299 BUS_DMA_NOWAIT)) {
2300 printf(": Could not load TX desc %d DMA memory!\n", i);
2301 rc = ENOMEM;
2302 goto bnx_dma_alloc_exit;
2303 }
2304
2305 sc->tx_bd_chain_paddr[i] =
2306 sc->tx_bd_chain_map[i]->dm_segs[0].ds_addr;
2307
2308
2309 DBPRINT(sc, BNX_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n",
2310 i, (u_int32_t) sc->tx_bd_chain_paddr[i]);
2311 }
2312
2313
2314
2315
2316 for (i = 0; i < TOTAL_TX_BD; i++) {
2317 if (bus_dmamap_create(sc->bnx_dmatag,
2318 MCLBYTES * BNX_MAX_SEGMENTS,
2319 USABLE_TX_BD - BNX_TX_SLACK_SPACE,
2320 MCLBYTES, 0, BUS_DMA_NOWAIT,
2321 &sc->tx_mbuf_map[i])) {
2322 printf(": Could not create Tx mbuf %d DMA map!\n", i);
2323 rc = ENOMEM;
2324 goto bnx_dma_alloc_exit;
2325 }
2326 }
2327
2328
2329
2330
2331
2332 for (i = 0; i < RX_PAGES; i++) {
2333 if (bus_dmamap_create(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ, 1,
2334 BNX_RX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT,
2335 &sc->rx_bd_chain_map[i])) {
2336 printf(": Could not create Rx desc %d DMA map!\n", i);
2337 rc = ENOMEM;
2338 goto bnx_dma_alloc_exit;
2339 }
2340
2341 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ,
2342 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->rx_bd_chain_seg[i], 1,
2343 &sc->rx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) {
2344 printf(": Could not allocate Rx desc %d DMA memory!\n",
2345 i);
2346 rc = ENOMEM;
2347 goto bnx_dma_alloc_exit;
2348 }
2349
2350 if (bus_dmamem_map(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i],
2351 sc->rx_bd_chain_rseg[i], BNX_RX_CHAIN_PAGE_SZ,
2352 (caddr_t *)&sc->rx_bd_chain[i], BUS_DMA_NOWAIT)) {
2353 printf(": Could not map Rx desc %d DMA memory!\n", i);
2354 rc = ENOMEM;
2355 goto bnx_dma_alloc_exit;
2356 }
2357
2358 if (bus_dmamap_load(sc->bnx_dmatag, sc->rx_bd_chain_map[i],
2359 (caddr_t)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ, NULL,
2360 BUS_DMA_NOWAIT)) {
2361 printf(": Could not load Rx desc %d DMA memory!\n", i);
2362 rc = ENOMEM;
2363 goto bnx_dma_alloc_exit;
2364 }
2365
2366 bzero(sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ);
2367 sc->rx_bd_chain_paddr[i] =
2368 sc->rx_bd_chain_map[i]->dm_segs[0].ds_addr;
2369
2370
2371 DBPRINT(sc, BNX_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n",
2372 i, (u_int32_t) sc->rx_bd_chain_paddr[i]);
2373 }
2374
2375
2376
2377
2378 for (i = 0; i < TOTAL_RX_BD; i++) {
2379 if (bus_dmamap_create(sc->bnx_dmatag, BNX_MAX_MRU,
2380 BNX_MAX_SEGMENTS, BNX_MAX_MRU, 0, BUS_DMA_NOWAIT,
2381 &sc->rx_mbuf_map[i])) {
2382 printf(": Could not create Rx mbuf %d DMA map!\n", i);
2383 rc = ENOMEM;
2384 goto bnx_dma_alloc_exit;
2385 }
2386 }
2387
2388 bnx_dma_alloc_exit:
2389 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2390
2391 return(rc);
2392 }
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403 void
2404 bnx_release_resources(struct bnx_softc *sc)
2405 {
2406 struct pci_attach_args *pa = &(sc->bnx_pa);
2407
2408 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2409
2410 bnx_dma_free(sc);
2411
2412 if (sc->bnx_intrhand != NULL)
2413 pci_intr_disestablish(pa->pa_pc, sc->bnx_intrhand);
2414
2415 if (sc->bnx_size)
2416 bus_space_unmap(sc->bnx_btag, sc->bnx_bhandle, sc->bnx_size);
2417
2418 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2419 }
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430 int
2431 bnx_fw_sync(struct bnx_softc *sc, u_int32_t msg_data)
2432 {
2433 int i, rc = 0;
2434 u_int32_t val;
2435
2436
2437 if (sc->bnx_fw_timed_out) {
2438 rc = EBUSY;
2439 goto bnx_fw_sync_exit;
2440 }
2441
2442
2443 sc->bnx_fw_wr_seq++;
2444 msg_data |= sc->bnx_fw_wr_seq;
2445
2446 DBPRINT(sc, BNX_VERBOSE, "bnx_fw_sync(): msg_data = 0x%08X\n",
2447 msg_data);
2448
2449
2450 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data);
2451
2452
2453 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2454
2455 val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_FW_MB);
2456 if ((val & BNX_FW_MSG_ACK) == (msg_data & BNX_DRV_MSG_SEQ))
2457 break;
2458 DELAY(1000);
2459 }
2460
2461
2462 if (((val & BNX_FW_MSG_ACK) != (msg_data & BNX_DRV_MSG_SEQ)) &&
2463 ((msg_data & BNX_DRV_MSG_DATA) != BNX_DRV_MSG_DATA_WAIT0)) {
2464 BNX_PRINTF(sc, "%s(%d): Firmware synchronization timeout! "
2465 "msg_data = 0x%08X\n", __FILE__, __LINE__, msg_data);
2466
2467 msg_data &= ~BNX_DRV_MSG_CODE;
2468 msg_data |= BNX_DRV_MSG_CODE_FW_TIMEOUT;
2469
2470 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data);
2471
2472 sc->bnx_fw_timed_out = 1;
2473 rc = EBUSY;
2474 }
2475
2476 bnx_fw_sync_exit:
2477 return (rc);
2478 }
2479
2480
2481
2482
2483
2484
2485
2486 void
2487 bnx_load_rv2p_fw(struct bnx_softc *sc, u_int32_t *rv2p_code,
2488 u_int32_t rv2p_code_len, u_int32_t rv2p_proc)
2489 {
2490 int i;
2491 u_int32_t val;
2492
2493 for (i = 0; i < rv2p_code_len; i += 8) {
2494 REG_WR(sc, BNX_RV2P_INSTR_HIGH, *rv2p_code);
2495 rv2p_code++;
2496 REG_WR(sc, BNX_RV2P_INSTR_LOW, *rv2p_code);
2497 rv2p_code++;
2498
2499 if (rv2p_proc == RV2P_PROC1) {
2500 val = (i / 8) | BNX_RV2P_PROC1_ADDR_CMD_RDWR;
2501 REG_WR(sc, BNX_RV2P_PROC1_ADDR_CMD, val);
2502 }
2503 else {
2504 val = (i / 8) | BNX_RV2P_PROC2_ADDR_CMD_RDWR;
2505 REG_WR(sc, BNX_RV2P_PROC2_ADDR_CMD, val);
2506 }
2507 }
2508
2509
2510 if (rv2p_proc == RV2P_PROC1)
2511 REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC1_RESET);
2512 else
2513 REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC2_RESET);
2514 }
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525 void
2526 bnx_load_cpu_fw(struct bnx_softc *sc, struct cpu_reg *cpu_reg,
2527 struct fw_info *fw)
2528 {
2529 u_int32_t offset;
2530 u_int32_t val;
2531
2532
2533 val = REG_RD_IND(sc, cpu_reg->mode);
2534 val |= cpu_reg->mode_value_halt;
2535 REG_WR_IND(sc, cpu_reg->mode, val);
2536 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2537
2538
2539 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2540 if (fw->text) {
2541 int j;
2542
2543 for (j = 0; j < (fw->text_len / 4); j++, offset += 4)
2544 REG_WR_IND(sc, offset, fw->text[j]);
2545 }
2546
2547
2548 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2549 if (fw->data) {
2550 int j;
2551
2552 for (j = 0; j < (fw->data_len / 4); j++, offset += 4)
2553 REG_WR_IND(sc, offset, fw->data[j]);
2554 }
2555
2556
2557 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2558 if (fw->sbss) {
2559 int j;
2560
2561 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4)
2562 REG_WR_IND(sc, offset, fw->sbss[j]);
2563 }
2564
2565
2566 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2567 if (fw->bss) {
2568 int j;
2569
2570 for (j = 0; j < (fw->bss_len/4); j++, offset += 4)
2571 REG_WR_IND(sc, offset, fw->bss[j]);
2572 }
2573
2574
2575 offset = cpu_reg->spad_base +
2576 (fw->rodata_addr - cpu_reg->mips_view_base);
2577 if (fw->rodata) {
2578 int j;
2579
2580 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4)
2581 REG_WR_IND(sc, offset, fw->rodata[j]);
2582 }
2583
2584
2585 REG_WR_IND(sc, cpu_reg->inst, 0);
2586 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2587
2588
2589 val = REG_RD_IND(sc, cpu_reg->mode);
2590 val &= ~cpu_reg->mode_value_halt;
2591 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2592 REG_WR_IND(sc, cpu_reg->mode, val);
2593 }
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603 void
2604 bnx_init_cpus(struct bnx_softc *sc)
2605 {
2606 struct cpu_reg cpu_reg;
2607 struct fw_info fw;
2608
2609
2610 bnx_load_rv2p_fw(sc, bnx_rv2p_proc1, bnx_rv2p_proc1len,
2611 RV2P_PROC1);
2612 bnx_load_rv2p_fw(sc, bnx_rv2p_proc2, bnx_rv2p_proc2len,
2613 RV2P_PROC2);
2614
2615
2616 cpu_reg.mode = BNX_RXP_CPU_MODE;
2617 cpu_reg.mode_value_halt = BNX_RXP_CPU_MODE_SOFT_HALT;
2618 cpu_reg.mode_value_sstep = BNX_RXP_CPU_MODE_STEP_ENA;
2619 cpu_reg.state = BNX_RXP_CPU_STATE;
2620 cpu_reg.state_value_clear = 0xffffff;
2621 cpu_reg.gpr0 = BNX_RXP_CPU_REG_FILE;
2622 cpu_reg.evmask = BNX_RXP_CPU_EVENT_MASK;
2623 cpu_reg.pc = BNX_RXP_CPU_PROGRAM_COUNTER;
2624 cpu_reg.inst = BNX_RXP_CPU_INSTRUCTION;
2625 cpu_reg.bp = BNX_RXP_CPU_HW_BREAKPOINT;
2626 cpu_reg.spad_base = BNX_RXP_SCRATCH;
2627 cpu_reg.mips_view_base = 0x8000000;
2628
2629 fw.ver_major = bnx_RXP_b06FwReleaseMajor;
2630 fw.ver_minor = bnx_RXP_b06FwReleaseMinor;
2631 fw.ver_fix = bnx_RXP_b06FwReleaseFix;
2632 fw.start_addr = bnx_RXP_b06FwStartAddr;
2633
2634 fw.text_addr = bnx_RXP_b06FwTextAddr;
2635 fw.text_len = bnx_RXP_b06FwTextLen;
2636 fw.text_index = 0;
2637 fw.text = bnx_RXP_b06FwText;
2638
2639 fw.data_addr = bnx_RXP_b06FwDataAddr;
2640 fw.data_len = bnx_RXP_b06FwDataLen;
2641 fw.data_index = 0;
2642 fw.data = bnx_RXP_b06FwData;
2643
2644 fw.sbss_addr = bnx_RXP_b06FwSbssAddr;
2645 fw.sbss_len = bnx_RXP_b06FwSbssLen;
2646 fw.sbss_index = 0;
2647 fw.sbss = bnx_RXP_b06FwSbss;
2648
2649 fw.bss_addr = bnx_RXP_b06FwBssAddr;
2650 fw.bss_len = bnx_RXP_b06FwBssLen;
2651 fw.bss_index = 0;
2652 fw.bss = bnx_RXP_b06FwBss;
2653
2654 fw.rodata_addr = bnx_RXP_b06FwRodataAddr;
2655 fw.rodata_len = bnx_RXP_b06FwRodataLen;
2656 fw.rodata_index = 0;
2657 fw.rodata = bnx_RXP_b06FwRodata;
2658
2659 DBPRINT(sc, BNX_INFO_RESET, "Loading RX firmware.\n");
2660 bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2661
2662
2663 cpu_reg.mode = BNX_TXP_CPU_MODE;
2664 cpu_reg.mode_value_halt = BNX_TXP_CPU_MODE_SOFT_HALT;
2665 cpu_reg.mode_value_sstep = BNX_TXP_CPU_MODE_STEP_ENA;
2666 cpu_reg.state = BNX_TXP_CPU_STATE;
2667 cpu_reg.state_value_clear = 0xffffff;
2668 cpu_reg.gpr0 = BNX_TXP_CPU_REG_FILE;
2669 cpu_reg.evmask = BNX_TXP_CPU_EVENT_MASK;
2670 cpu_reg.pc = BNX_TXP_CPU_PROGRAM_COUNTER;
2671 cpu_reg.inst = BNX_TXP_CPU_INSTRUCTION;
2672 cpu_reg.bp = BNX_TXP_CPU_HW_BREAKPOINT;
2673 cpu_reg.spad_base = BNX_TXP_SCRATCH;
2674 cpu_reg.mips_view_base = 0x8000000;
2675
2676 fw.ver_major = bnx_TXP_b06FwReleaseMajor;
2677 fw.ver_minor = bnx_TXP_b06FwReleaseMinor;
2678 fw.ver_fix = bnx_TXP_b06FwReleaseFix;
2679 fw.start_addr = bnx_TXP_b06FwStartAddr;
2680
2681 fw.text_addr = bnx_TXP_b06FwTextAddr;
2682 fw.text_len = bnx_TXP_b06FwTextLen;
2683 fw.text_index = 0;
2684 fw.text = bnx_TXP_b06FwText;
2685
2686 fw.data_addr = bnx_TXP_b06FwDataAddr;
2687 fw.data_len = bnx_TXP_b06FwDataLen;
2688 fw.data_index = 0;
2689 fw.data = bnx_TXP_b06FwData;
2690
2691 fw.sbss_addr = bnx_TXP_b06FwSbssAddr;
2692 fw.sbss_len = bnx_TXP_b06FwSbssLen;
2693 fw.sbss_index = 0;
2694 fw.sbss = bnx_TXP_b06FwSbss;
2695
2696 fw.bss_addr = bnx_TXP_b06FwBssAddr;
2697 fw.bss_len = bnx_TXP_b06FwBssLen;
2698 fw.bss_index = 0;
2699 fw.bss = bnx_TXP_b06FwBss;
2700
2701 fw.rodata_addr = bnx_TXP_b06FwRodataAddr;
2702 fw.rodata_len = bnx_TXP_b06FwRodataLen;
2703 fw.rodata_index = 0;
2704 fw.rodata = bnx_TXP_b06FwRodata;
2705
2706 DBPRINT(sc, BNX_INFO_RESET, "Loading TX firmware.\n");
2707 bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2708
2709
2710 cpu_reg.mode = BNX_TPAT_CPU_MODE;
2711 cpu_reg.mode_value_halt = BNX_TPAT_CPU_MODE_SOFT_HALT;
2712 cpu_reg.mode_value_sstep = BNX_TPAT_CPU_MODE_STEP_ENA;
2713 cpu_reg.state = BNX_TPAT_CPU_STATE;
2714 cpu_reg.state_value_clear = 0xffffff;
2715 cpu_reg.gpr0 = BNX_TPAT_CPU_REG_FILE;
2716 cpu_reg.evmask = BNX_TPAT_CPU_EVENT_MASK;
2717 cpu_reg.pc = BNX_TPAT_CPU_PROGRAM_COUNTER;
2718 cpu_reg.inst = BNX_TPAT_CPU_INSTRUCTION;
2719 cpu_reg.bp = BNX_TPAT_CPU_HW_BREAKPOINT;
2720 cpu_reg.spad_base = BNX_TPAT_SCRATCH;
2721 cpu_reg.mips_view_base = 0x8000000;
2722
2723 fw.ver_major = bnx_TPAT_b06FwReleaseMajor;
2724 fw.ver_minor = bnx_TPAT_b06FwReleaseMinor;
2725 fw.ver_fix = bnx_TPAT_b06FwReleaseFix;
2726 fw.start_addr = bnx_TPAT_b06FwStartAddr;
2727
2728 fw.text_addr = bnx_TPAT_b06FwTextAddr;
2729 fw.text_len = bnx_TPAT_b06FwTextLen;
2730 fw.text_index = 0;
2731 fw.text = bnx_TPAT_b06FwText;
2732
2733 fw.data_addr = bnx_TPAT_b06FwDataAddr;
2734 fw.data_len = bnx_TPAT_b06FwDataLen;
2735 fw.data_index = 0;
2736 fw.data = bnx_TPAT_b06FwData;
2737
2738 fw.sbss_addr = bnx_TPAT_b06FwSbssAddr;
2739 fw.sbss_len = bnx_TPAT_b06FwSbssLen;
2740 fw.sbss_index = 0;
2741 fw.sbss = bnx_TPAT_b06FwSbss;
2742
2743 fw.bss_addr = bnx_TPAT_b06FwBssAddr;
2744 fw.bss_len = bnx_TPAT_b06FwBssLen;
2745 fw.bss_index = 0;
2746 fw.bss = bnx_TPAT_b06FwBss;
2747
2748 fw.rodata_addr = bnx_TPAT_b06FwRodataAddr;
2749 fw.rodata_len = bnx_TPAT_b06FwRodataLen;
2750 fw.rodata_index = 0;
2751 fw.rodata = bnx_TPAT_b06FwRodata;
2752
2753 DBPRINT(sc, BNX_INFO_RESET, "Loading TPAT firmware.\n");
2754 bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2755
2756
2757 cpu_reg.mode = BNX_COM_CPU_MODE;
2758 cpu_reg.mode_value_halt = BNX_COM_CPU_MODE_SOFT_HALT;
2759 cpu_reg.mode_value_sstep = BNX_COM_CPU_MODE_STEP_ENA;
2760 cpu_reg.state = BNX_COM_CPU_STATE;
2761 cpu_reg.state_value_clear = 0xffffff;
2762 cpu_reg.gpr0 = BNX_COM_CPU_REG_FILE;
2763 cpu_reg.evmask = BNX_COM_CPU_EVENT_MASK;
2764 cpu_reg.pc = BNX_COM_CPU_PROGRAM_COUNTER;
2765 cpu_reg.inst = BNX_COM_CPU_INSTRUCTION;
2766 cpu_reg.bp = BNX_COM_CPU_HW_BREAKPOINT;
2767 cpu_reg.spad_base = BNX_COM_SCRATCH;
2768 cpu_reg.mips_view_base = 0x8000000;
2769
2770 fw.ver_major = bnx_COM_b06FwReleaseMajor;
2771 fw.ver_minor = bnx_COM_b06FwReleaseMinor;
2772 fw.ver_fix = bnx_COM_b06FwReleaseFix;
2773 fw.start_addr = bnx_COM_b06FwStartAddr;
2774
2775 fw.text_addr = bnx_COM_b06FwTextAddr;
2776 fw.text_len = bnx_COM_b06FwTextLen;
2777 fw.text_index = 0;
2778 fw.text = bnx_COM_b06FwText;
2779
2780 fw.data_addr = bnx_COM_b06FwDataAddr;
2781 fw.data_len = bnx_COM_b06FwDataLen;
2782 fw.data_index = 0;
2783 fw.data = bnx_COM_b06FwData;
2784
2785 fw.sbss_addr = bnx_COM_b06FwSbssAddr;
2786 fw.sbss_len = bnx_COM_b06FwSbssLen;
2787 fw.sbss_index = 0;
2788 fw.sbss = bnx_COM_b06FwSbss;
2789
2790 fw.bss_addr = bnx_COM_b06FwBssAddr;
2791 fw.bss_len = bnx_COM_b06FwBssLen;
2792 fw.bss_index = 0;
2793 fw.bss = bnx_COM_b06FwBss;
2794
2795 fw.rodata_addr = bnx_COM_b06FwRodataAddr;
2796 fw.rodata_len = bnx_COM_b06FwRodataLen;
2797 fw.rodata_index = 0;
2798 fw.rodata = bnx_COM_b06FwRodata;
2799
2800 DBPRINT(sc, BNX_INFO_RESET, "Loading COM firmware.\n");
2801 bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2802 }
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812 void
2813 bnx_init_context(struct bnx_softc *sc)
2814 {
2815 u_int32_t vcid;
2816
2817 vcid = 96;
2818 while (vcid) {
2819 u_int32_t vcid_addr, pcid_addr, offset;
2820
2821 vcid--;
2822
2823 vcid_addr = GET_CID_ADDR(vcid);
2824 pcid_addr = vcid_addr;
2825
2826 REG_WR(sc, BNX_CTX_VIRT_ADDR, 0x00);
2827 REG_WR(sc, BNX_CTX_PAGE_TBL, pcid_addr);
2828
2829
2830 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2831 CTX_WR(sc, 0x00, offset, 0);
2832
2833 REG_WR(sc, BNX_CTX_VIRT_ADDR, vcid_addr);
2834 REG_WR(sc, BNX_CTX_PAGE_TBL, pcid_addr);
2835 }
2836 }
2837
2838
2839
2840
2841
2842
2843
2844 void
2845 bnx_get_mac_addr(struct bnx_softc *sc)
2846 {
2847 u_int32_t mac_lo = 0, mac_hi = 0;
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858 mac_hi = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_UPPER);
2859 mac_lo = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_LOWER);
2860
2861 if ((mac_lo == 0) && (mac_hi == 0)) {
2862 BNX_PRINTF(sc, "%s(%d): Invalid Ethernet address!\n",
2863 __FILE__, __LINE__);
2864 } else {
2865 sc->eaddr[0] = (u_char)(mac_hi >> 8);
2866 sc->eaddr[1] = (u_char)(mac_hi >> 0);
2867 sc->eaddr[2] = (u_char)(mac_lo >> 24);
2868 sc->eaddr[3] = (u_char)(mac_lo >> 16);
2869 sc->eaddr[4] = (u_char)(mac_lo >> 8);
2870 sc->eaddr[5] = (u_char)(mac_lo >> 0);
2871 }
2872
2873 DBPRINT(sc, BNX_INFO, "Permanent Ethernet address = "
2874 "%6D\n", sc->eaddr, ":");
2875 }
2876
2877
2878
2879
2880
2881
2882
2883 void
2884 bnx_set_mac_addr(struct bnx_softc *sc)
2885 {
2886 u_int32_t val;
2887 u_int8_t *mac_addr = sc->eaddr;
2888
2889 DBPRINT(sc, BNX_INFO, "Setting Ethernet address = "
2890 "%6D\n", sc->eaddr, ":");
2891
2892 val = (mac_addr[0] << 8) | mac_addr[1];
2893
2894 REG_WR(sc, BNX_EMAC_MAC_MATCH0, val);
2895
2896 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2897 (mac_addr[4] << 8) | mac_addr[5];
2898
2899 REG_WR(sc, BNX_EMAC_MAC_MATCH1, val);
2900 }
2901
2902
2903
2904
2905
2906
2907
2908 void
2909 bnx_stop(struct bnx_softc *sc)
2910 {
2911 struct ifnet *ifp = &sc->arpcom.ac_if;
2912 struct mii_data *mii = NULL;
2913
2914 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2915
2916 mii = &sc->bnx_mii;
2917
2918 timeout_del(&sc->bnx_timeout);
2919
2920 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2921
2922
2923 REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS, 0x5ffffff);
2924 REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS);
2925 DELAY(20);
2926
2927 bnx_disable_intr(sc);
2928
2929
2930 bnx_reset(sc, BNX_DRV_MSG_CODE_SUSPEND_NO_WOL);
2931
2932
2933 bnx_free_rx_chain(sc);
2934
2935
2936 bnx_free_tx_chain(sc);
2937
2938 ifp->if_timer = 0;
2939
2940 sc->bnx_link = 0;
2941
2942 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2943
2944 bnx_mgmt_init(sc);
2945 }
2946
2947 int
2948 bnx_reset(struct bnx_softc *sc, u_int32_t reset_code)
2949 {
2950 u_int32_t val;
2951 int i, rc = 0;
2952
2953 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2954
2955
2956 REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS,
2957 BNX_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
2958 BNX_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
2959 BNX_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
2960 BNX_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
2961 val = REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS);
2962 DELAY(5);
2963
2964
2965 sc->bnx_fw_timed_out = 0;
2966
2967
2968 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT0 | reset_code);
2969 if (rc)
2970 goto bnx_reset_exit;
2971
2972
2973 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_RESET_SIGNATURE,
2974 BNX_DRV_RESET_SIGNATURE_MAGIC);
2975
2976
2977 val = REG_RD(sc, BNX_MISC_ID);
2978
2979
2980 val = BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
2981 BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
2982 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
2983 REG_WR(sc, BNX_PCICFG_MISC_CONFIG, val);
2984
2985
2986 for (i = 0; i < 10; i++) {
2987 val = REG_RD(sc, BNX_PCICFG_MISC_CONFIG);
2988 if ((val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
2989 BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
2990 break;
2991
2992 DELAY(10);
2993 }
2994
2995
2996 if (val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
2997 BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
2998 BNX_PRINTF(sc, "%s(%d): Reset failed!\n", __FILE__, __LINE__);
2999 rc = EBUSY;
3000 goto bnx_reset_exit;
3001 }
3002
3003
3004 val = REG_RD(sc, BNX_PCI_SWAP_DIAG0);
3005 if (val != 0x01020304) {
3006 BNX_PRINTF(sc, "%s(%d): Byte swap is incorrect!\n",
3007 __FILE__, __LINE__);
3008 rc = ENODEV;
3009 goto bnx_reset_exit;
3010 }
3011
3012
3013 sc->bnx_fw_timed_out = 0;
3014
3015
3016 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT1 | reset_code);
3017 if (rc)
3018 BNX_PRINTF(sc, "%s(%d): Firmware did not complete "
3019 "initialization!\n", __FILE__, __LINE__);
3020
3021 bnx_reset_exit:
3022 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3023
3024 return (rc);
3025 }
3026
3027 int
3028 bnx_chipinit(struct bnx_softc *sc)
3029 {
3030 struct pci_attach_args *pa = &(sc->bnx_pa);
3031 u_int32_t val;
3032 int rc = 0;
3033
3034 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3035
3036
3037 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT);
3038
3039
3040
3041 val = BNX_DMA_CONFIG_DATA_BYTE_SWAP |
3042 BNX_DMA_CONFIG_DATA_WORD_SWAP |
3043 #if BYTE_ORDER == BIG_ENDIAN
3044 BNX_DMA_CONFIG_CNTL_BYTE_SWAP |
3045 #endif
3046 BNX_DMA_CONFIG_CNTL_WORD_SWAP |
3047 DMA_READ_CHANS << 12 |
3048 DMA_WRITE_CHANS << 16;
3049
3050 val |= (0x2 << 20) | BNX_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3051
3052 if ((sc->bnx_flags & BNX_PCIX_FLAG) && (sc->bus_speed_mhz == 133))
3053 val |= BNX_DMA_CONFIG_PCI_FAST_CLK_CMP;
3054
3055
3056
3057
3058
3059
3060 if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) &&
3061 (BNX_CHIP_ID(sc) != BNX_CHIP_ID_5706_A0) &&
3062 !(sc->bnx_flags & BNX_PCIX_FLAG))
3063 val |= BNX_DMA_CONFIG_CNTL_PING_PONG_DMA;
3064
3065 REG_WR(sc, BNX_DMA_CONFIG, val);
3066
3067
3068 if (sc->bnx_flags & BNX_PCIX_FLAG) {
3069 u_int16_t val;
3070
3071 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD);
3072 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD,
3073 val & ~0x2);
3074 }
3075
3076
3077 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS,
3078 BNX_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3079 BNX_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3080 BNX_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3081
3082
3083 bnx_init_context(sc);
3084
3085
3086 bnx_init_cpus(sc);
3087
3088
3089 if (bnx_init_nvram(sc)) {
3090 rc = ENODEV;
3091 goto bnx_chipinit_exit;
3092 }
3093
3094
3095 val = REG_RD(sc, BNX_MQ_CONFIG);
3096 val &= ~BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3097 val |= BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3098 REG_WR(sc, BNX_MQ_CONFIG, val);
3099
3100 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3101 REG_WR(sc, BNX_MQ_KNL_BYP_WIND_START, val);
3102 REG_WR(sc, BNX_MQ_KNL_WIND_END, val);
3103
3104 val = (BCM_PAGE_BITS - 8) << 24;
3105 REG_WR(sc, BNX_RV2P_CONFIG, val);
3106
3107
3108 val = REG_RD(sc, BNX_TBDR_CONFIG);
3109 val &= ~BNX_TBDR_CONFIG_PAGE_SIZE;
3110 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3111 REG_WR(sc, BNX_TBDR_CONFIG, val);
3112
3113 bnx_chipinit_exit:
3114 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3115
3116 return(rc);
3117 }
3118
3119
3120
3121
3122
3123
3124
3125 int
3126 bnx_blockinit(struct bnx_softc *sc)
3127 {
3128 u_int32_t reg, val;
3129 int rc = 0;
3130
3131 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3132
3133
3134 bnx_set_mac_addr(sc);
3135
3136
3137 val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) +
3138 (sc->eaddr[3]) + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16);
3139 REG_WR(sc, BNX_EMAC_BACKOFF_SEED, val);
3140
3141 sc->last_status_idx = 0;
3142 sc->rx_mode = BNX_EMAC_RX_MODE_SORT_MODE;
3143
3144
3145 REG_WR(sc, BNX_EMAC_ATTENTION_ENA, BNX_EMAC_ATTENTION_ENA_LINK);
3146
3147
3148 REG_WR(sc, BNX_HC_STATUS_ADDR_L, (u_int32_t)(sc->status_block_paddr));
3149 REG_WR(sc, BNX_HC_STATUS_ADDR_H,
3150 (u_int32_t)((u_int64_t)sc->status_block_paddr >> 32));
3151
3152
3153 REG_WR(sc, BNX_HC_STATISTICS_ADDR_L,
3154 (u_int32_t)(sc->stats_block_paddr));
3155 REG_WR(sc, BNX_HC_STATISTICS_ADDR_H,
3156 (u_int32_t)((u_int64_t)sc->stats_block_paddr >> 32));
3157
3158
3159 REG_WR(sc, BNX_HC_TX_QUICK_CONS_TRIP, (sc->bnx_tx_quick_cons_trip_int
3160 << 16) | sc->bnx_tx_quick_cons_trip);
3161 REG_WR(sc, BNX_HC_RX_QUICK_CONS_TRIP, (sc->bnx_rx_quick_cons_trip_int
3162 << 16) | sc->bnx_rx_quick_cons_trip);
3163 REG_WR(sc, BNX_HC_COMP_PROD_TRIP, (sc->bnx_comp_prod_trip_int << 16) |
3164 sc->bnx_comp_prod_trip);
3165 REG_WR(sc, BNX_HC_TX_TICKS, (sc->bnx_tx_ticks_int << 16) |
3166 sc->bnx_tx_ticks);
3167 REG_WR(sc, BNX_HC_RX_TICKS, (sc->bnx_rx_ticks_int << 16) |
3168 sc->bnx_rx_ticks);
3169 REG_WR(sc, BNX_HC_COM_TICKS, (sc->bnx_com_ticks_int << 16) |
3170 sc->bnx_com_ticks);
3171 REG_WR(sc, BNX_HC_CMD_TICKS, (sc->bnx_cmd_ticks_int << 16) |
3172 sc->bnx_cmd_ticks);
3173 REG_WR(sc, BNX_HC_STATS_TICKS, (sc->bnx_stats_ticks & 0xffff00));
3174 REG_WR(sc, BNX_HC_STAT_COLLECT_TICKS, 0xbb8);
3175 REG_WR(sc, BNX_HC_CONFIG,
3176 (BNX_HC_CONFIG_RX_TMR_MODE | BNX_HC_CONFIG_TX_TMR_MODE |
3177 BNX_HC_CONFIG_COLLECT_STATS));
3178
3179
3180 REG_WR(sc, BNX_HC_COMMAND, BNX_HC_COMMAND_CLR_STAT_NOW);
3181
3182
3183 reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_DEV_INFO_SIGNATURE);
3184
3185 DBRUNIF(DB_RANDOMTRUE(bnx_debug_bootcode_running_failure),
3186 BNX_PRINTF(sc, "%s(%d): Simulating bootcode failure.\n",
3187 __FILE__, __LINE__); reg = 0);
3188
3189 if ((reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3190 BNX_DEV_INFO_SIGNATURE_MAGIC) {
3191 BNX_PRINTF(sc, "%s(%d): Bootcode not running! Found: 0x%08X, "
3192 "Expected: 08%08X\n", __FILE__, __LINE__,
3193 (reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK),
3194 BNX_DEV_INFO_SIGNATURE_MAGIC);
3195 rc = ENODEV;
3196 goto bnx_blockinit_exit;
3197 }
3198
3199
3200 reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_FEATURE);
3201 if (reg & (BNX_PORT_FEATURE_ASF_ENABLED |
3202 BNX_PORT_FEATURE_IMD_ENABLED)) {
3203 DBPRINT(sc, BNX_INFO, "Management F/W Enabled.\n");
3204 sc->bnx_flags |= BNX_MFW_ENABLE_FLAG;
3205 }
3206
3207 sc->bnx_fw_ver = REG_RD_IND(sc, sc->bnx_shmem_base +
3208 BNX_DEV_INFO_BC_REV);
3209
3210 DBPRINT(sc, BNX_INFO, "bootcode rev = 0x%08X\n", sc->bnx_fw_ver);
3211
3212
3213 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT2 | BNX_DRV_MSG_CODE_RESET);
3214
3215
3216 REG_WR(sc, BNX_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3217
3218
3219 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 0x5ffffff);
3220 REG_RD(sc, BNX_MISC_ENABLE_SET_BITS);
3221 DELAY(20);
3222
3223 bnx_blockinit_exit:
3224 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3225
3226 return (rc);
3227 }
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239 int
3240 bnx_get_buf(struct bnx_softc *sc, struct mbuf *m, u_int16_t *prod,
3241 u_int16_t *chain_prod, u_int32_t *prod_bseq)
3242 {
3243 bus_dmamap_t map;
3244 struct mbuf *m_new = NULL;
3245 struct rx_bd *rxbd;
3246 int i, rc = 0;
3247 u_int32_t addr;
3248 #ifdef BNX_DEBUG
3249 u_int16_t debug_chain_prod = *chain_prod;
3250 #endif
3251 u_int16_t first_chain_prod;
3252
3253 DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Entering %s()\n",
3254 __FUNCTION__);
3255
3256
3257 DBRUNIF((*chain_prod > MAX_RX_BD),
3258 printf("%s: RX producer out of range: 0x%04X > 0x%04X\n",
3259 *chain_prod, (u_int16_t) MAX_RX_BD));
3260
3261 DBPRINT(sc, BNX_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = "
3262 "0x%04X, prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod,
3263 *prod_bseq);
3264
3265 if (m == NULL) {
3266 DBRUNIF(DB_RANDOMTRUE(bnx_debug_mbuf_allocation_failure),
3267 BNX_PRINTF(sc, "Simulating mbuf allocation failure.\n");
3268
3269 sc->mbuf_alloc_failed++;
3270 rc = ENOBUFS;
3271 goto bnx_get_buf_exit);
3272
3273
3274 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
3275 if (m_new == NULL) {
3276 DBPRINT(sc, BNX_WARN,
3277 "%s(%d): RX mbuf header allocation failed!\n",
3278 __FILE__, __LINE__);
3279
3280 DBRUNIF(1, sc->mbuf_alloc_failed++);
3281
3282 rc = ENOBUFS;
3283 goto bnx_get_buf_exit;
3284 }
3285
3286 DBRUNIF(1, sc->rx_mbuf_alloc++);
3287 MCLGET(m_new, M_DONTWAIT);
3288 if (!(m_new->m_flags & M_EXT)) {
3289 DBPRINT(sc, BNX_WARN,
3290 "%s(%d): RX mbuf chain allocation failed!\n",
3291 __FILE__, __LINE__);
3292
3293 m_freem(m_new);
3294
3295 DBRUNIF(1, sc->rx_mbuf_alloc--);
3296 DBRUNIF(1, sc->mbuf_alloc_failed++);
3297
3298 rc = ENOBUFS;
3299 goto bnx_get_buf_exit;
3300 }
3301
3302 m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size;
3303 } else {
3304 m_new = m;
3305 m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size;
3306 m_new->m_data = m_new->m_ext.ext_buf;
3307 }
3308
3309
3310 map = sc->rx_mbuf_map[*chain_prod];
3311 first_chain_prod = *chain_prod;
3312 if (bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m_new, BUS_DMA_NOWAIT)) {
3313 BNX_PRINTF(sc, "%s(%d): Error mapping mbuf into RX chain!\n",
3314 __FILE__, __LINE__);
3315
3316 m_freem(m_new);
3317
3318 DBRUNIF(1, sc->rx_mbuf_alloc--);
3319
3320 rc = ENOBUFS;
3321 goto bnx_get_buf_exit;
3322 }
3323
3324
3325 DBRUNIF((sc->free_rx_bd > USABLE_RX_BD),
3326 printf("%s: Too many free rx_bd (0x%04X > 0x%04X)!\n",
3327 sc->free_rx_bd, (u_int16_t) USABLE_RX_BD));
3328
3329 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3330 sc->rx_low_watermark = sc->free_rx_bd);
3331
3332
3333 rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3334
3335 addr = (u_int32_t)(map->dm_segs[0].ds_addr);
3336 rxbd->rx_bd_haddr_lo = htole32(addr);
3337 addr = (u_int32_t)((u_int64_t)map->dm_segs[0].ds_addr >> 32);
3338 rxbd->rx_bd_haddr_hi = htole32(addr);
3339 rxbd->rx_bd_len = htole32(map->dm_segs[0].ds_len);
3340 rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START);
3341 *prod_bseq += map->dm_segs[0].ds_len;
3342
3343 for (i = 1; i < map->dm_nsegs; i++) {
3344 *prod = NEXT_RX_BD(*prod);
3345 *chain_prod = RX_CHAIN_IDX(*prod);
3346
3347 rxbd =
3348 &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3349
3350 addr = (u_int32_t)(map->dm_segs[i].ds_addr);
3351 rxbd->rx_bd_haddr_lo = htole32(addr);
3352 addr = (u_int32_t)((u_int64_t)map->dm_segs[i].ds_addr >> 32);
3353 rxbd->rx_bd_haddr_hi = htole32(addr);
3354 rxbd->rx_bd_len = htole32(map->dm_segs[i].ds_len);
3355 rxbd->rx_bd_flags = 0;
3356 *prod_bseq += map->dm_segs[i].ds_len;
3357 }
3358
3359 rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END);
3360
3361
3362
3363
3364
3365
3366 sc->rx_mbuf_ptr[*chain_prod] = m_new;
3367 sc->rx_mbuf_map[first_chain_prod] = sc->rx_mbuf_map[*chain_prod];
3368 sc->rx_mbuf_map[*chain_prod] = map;
3369 sc->free_rx_bd -= map->dm_nsegs;
3370
3371 DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_mbuf_chain(sc, debug_chain_prod,
3372 map->dm_nsegs));
3373
3374 DBPRINT(sc, BNX_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod "
3375 "= 0x%04X, prod_bseq = 0x%08X\n", __FUNCTION__, *prod,
3376 *chain_prod, *prod_bseq);
3377
3378 bnx_get_buf_exit:
3379 DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Exiting %s()\n",
3380 __FUNCTION__);
3381
3382 return(rc);
3383 }
3384
3385
3386
3387
3388
3389
3390
3391 int
3392 bnx_init_tx_chain(struct bnx_softc *sc)
3393 {
3394 struct tx_bd *txbd;
3395 u_int32_t val, addr;
3396 int i, rc = 0;
3397
3398 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3399
3400
3401 sc->tx_prod = 0;
3402 sc->tx_cons = 0;
3403 sc->tx_prod_bseq = 0;
3404 sc->used_tx_bd = 0;
3405 DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD);
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418 for (i = 0; i < TX_PAGES; i++) {
3419 int j;
3420
3421 txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
3422
3423
3424 if (i == (TX_PAGES - 1))
3425 j = 0;
3426 else
3427 j = i + 1;
3428
3429 addr = (u_int32_t)(sc->tx_bd_chain_paddr[j]);
3430 txbd->tx_bd_haddr_lo = htole32(addr);
3431 addr = (u_int32_t)((u_int64_t)sc->tx_bd_chain_paddr[j] >> 32);
3432 txbd->tx_bd_haddr_hi = htole32(addr);
3433 }
3434
3435
3436
3437
3438 val = BNX_L2CTX_TYPE_TYPE_L2;
3439 val |= BNX_L2CTX_TYPE_SIZE_L2;
3440 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE, val);
3441
3442 val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3443 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE, val);
3444
3445
3446 val = (u_int32_t)((u_int64_t)sc->tx_bd_chain_paddr[0] >> 32);
3447 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_HI, val);
3448 val = (u_int32_t)(sc->tx_bd_chain_paddr[0]);
3449 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_LO, val);
3450
3451 DBRUN(BNX_VERBOSE_SEND, bnx_dump_tx_chain(sc, 0, TOTAL_TX_BD));
3452
3453 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3454
3455 return(rc);
3456 }
3457
3458
3459
3460
3461
3462
3463
3464 void
3465 bnx_free_tx_chain(struct bnx_softc *sc)
3466 {
3467 int i;
3468
3469 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3470
3471
3472 for (i = 0; i < TOTAL_TX_BD; i++) {
3473 if (sc->tx_mbuf_ptr[i] != NULL) {
3474 if (sc->tx_mbuf_map != NULL)
3475 bus_dmamap_sync(sc->bnx_dmatag,
3476 sc->tx_mbuf_map[i], 0,
3477 sc->tx_mbuf_map[i]->dm_mapsize,
3478 BUS_DMASYNC_POSTWRITE);
3479 m_freem(sc->tx_mbuf_ptr[i]);
3480 sc->tx_mbuf_ptr[i] = NULL;
3481 DBRUNIF(1, sc->tx_mbuf_alloc--);
3482 }
3483 }
3484
3485
3486 for (i = 0; i < TX_PAGES; i++)
3487 bzero((char *)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ);
3488
3489
3490 DBRUNIF((sc->tx_mbuf_alloc),
3491 printf("%s: Memory leak! Lost %d mbufs from tx chain!\n",
3492 sc->tx_mbuf_alloc));
3493
3494 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3495 }
3496
3497
3498
3499
3500
3501
3502
3503 int
3504 bnx_init_rx_chain(struct bnx_softc *sc)
3505 {
3506 struct rx_bd *rxbd;
3507 int i, rc = 0;
3508 u_int16_t prod, chain_prod;
3509 u_int32_t prod_bseq, val, addr;
3510
3511 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3512
3513
3514 sc->rx_prod = 0;
3515 sc->rx_cons = 0;
3516 sc->rx_prod_bseq = 0;
3517 sc->free_rx_bd = BNX_RX_SLACK_SPACE;
3518 DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD);
3519
3520
3521 for (i = 0; i < RX_PAGES; i++) {
3522 int j;
3523
3524 rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
3525
3526
3527 if (i == (RX_PAGES - 1))
3528 j = 0;
3529 else
3530 j = i + 1;
3531
3532
3533 addr = (u_int32_t)((u_int64_t)sc->rx_bd_chain_paddr[j] >> 32);
3534 rxbd->rx_bd_haddr_hi = htole32(addr);
3535 addr = (u_int32_t)(sc->rx_bd_chain_paddr[j]);
3536 rxbd->rx_bd_haddr_lo = htole32(addr);
3537 }
3538
3539
3540 val = BNX_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3541 val |= BNX_L2CTX_CTX_TYPE_SIZE_L2;
3542 val |= 0x02 << 8;
3543 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_CTX_TYPE, val);
3544
3545
3546 val = (u_int32_t)((u_int64_t)sc->rx_bd_chain_paddr[0] >> 32);
3547 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_HI, val);
3548 val = (u_int32_t)(sc->rx_bd_chain_paddr[0]);
3549 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_LO, val);
3550
3551
3552 prod = prod_bseq = 0;
3553 while (prod < BNX_RX_SLACK_SPACE) {
3554 chain_prod = RX_CHAIN_IDX(prod);
3555 if (bnx_get_buf(sc, NULL, &prod, &chain_prod, &prod_bseq)) {
3556 BNX_PRINTF(sc, "Error filling RX chain: rx_bd[0x%04X]!\n",
3557 chain_prod);
3558 rc = ENOBUFS;
3559 break;
3560 }
3561 prod = NEXT_RX_BD(prod);
3562 }
3563
3564
3565 sc->rx_prod = prod;
3566 sc->rx_prod_bseq = prod_bseq;
3567
3568 for (i = 0; i < RX_PAGES; i++)
3569 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 0,
3570 sc->rx_bd_chain_map[i]->dm_mapsize,
3571 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3572
3573
3574 REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod);
3575 REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
3576
3577 DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_chain(sc, 0, TOTAL_RX_BD));
3578
3579 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3580
3581 return(rc);
3582 }
3583
3584
3585
3586
3587
3588
3589
3590 void
3591 bnx_free_rx_chain(struct bnx_softc *sc)
3592 {
3593 int i;
3594
3595 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3596
3597
3598 for (i = 0; i < TOTAL_RX_BD; i++) {
3599 if (sc->rx_mbuf_ptr[i] != NULL) {
3600 if (sc->rx_mbuf_map[i] != NULL)
3601 bus_dmamap_sync(sc->bnx_dmatag,
3602 sc->rx_mbuf_map[i], 0,
3603 sc->rx_mbuf_map[i]->dm_mapsize,
3604 BUS_DMASYNC_POSTREAD);
3605 m_freem(sc->rx_mbuf_ptr[i]);
3606 sc->rx_mbuf_ptr[i] = NULL;
3607 DBRUNIF(1, sc->rx_mbuf_alloc--);
3608 }
3609 }
3610
3611
3612 for (i = 0; i < RX_PAGES; i++)
3613 bzero((char *)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ);
3614
3615
3616 DBRUNIF((sc->rx_mbuf_alloc),
3617 printf("%s: Memory leak! Lost %d mbufs from rx chain!\n",
3618 sc->rx_mbuf_alloc));
3619
3620 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3621 }
3622
3623
3624
3625
3626
3627
3628
3629 int
3630 bnx_ifmedia_upd(struct ifnet *ifp)
3631 {
3632 struct bnx_softc *sc;
3633 struct mii_data *mii;
3634 struct ifmedia *ifm;
3635 int rc = 0;
3636
3637 sc = ifp->if_softc;
3638 ifm = &sc->bnx_ifmedia;
3639
3640
3641
3642 mii = &sc->bnx_mii;
3643 sc->bnx_link = 0;
3644 if (mii->mii_instance) {
3645 struct mii_softc *miisc;
3646 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3647 mii_phy_reset(miisc);
3648 }
3649 mii_mediachg(mii);
3650
3651 return(rc);
3652 }
3653
3654
3655
3656
3657
3658
3659
3660 void
3661 bnx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3662 {
3663 struct bnx_softc *sc;
3664 struct mii_data *mii;
3665 int s;
3666
3667 sc = ifp->if_softc;
3668
3669 s = splnet();
3670
3671 mii = &sc->bnx_mii;
3672
3673
3674
3675 mii_pollstat(mii);
3676 ifmr->ifm_active = mii->mii_media_active;
3677 ifmr->ifm_status = mii->mii_media_status;
3678
3679 splx(s);
3680 }
3681
3682
3683
3684
3685
3686
3687
3688 void
3689 bnx_phy_intr(struct bnx_softc *sc)
3690 {
3691 u_int32_t new_link_state, old_link_state;
3692
3693 new_link_state = sc->status_block->status_attn_bits &
3694 STATUS_ATTN_BITS_LINK_STATE;
3695 old_link_state = sc->status_block->status_attn_bits_ack &
3696 STATUS_ATTN_BITS_LINK_STATE;
3697
3698
3699 if (new_link_state != old_link_state) {
3700 DBRUN(BNX_VERBOSE_INTR, bnx_dump_status_block(sc));
3701
3702 sc->bnx_link = 0;
3703 timeout_del(&sc->bnx_timeout);
3704 bnx_tick(sc);
3705
3706
3707 if (new_link_state) {
3708 REG_WR(sc, BNX_PCICFG_STATUS_BIT_SET_CMD,
3709 STATUS_ATTN_BITS_LINK_STATE);
3710 DBPRINT(sc, BNX_INFO, "Link is now UP.\n");
3711 } else {
3712 REG_WR(sc, BNX_PCICFG_STATUS_BIT_CLEAR_CMD,
3713 STATUS_ATTN_BITS_LINK_STATE);
3714 DBPRINT(sc, BNX_INFO, "Link is now DOWN.\n");
3715 }
3716 }
3717
3718
3719 REG_WR(sc, BNX_EMAC_STATUS, BNX_EMAC_STATUS_LINK_CHANGE);
3720 }
3721
3722
3723
3724
3725
3726
3727
3728 void
3729 bnx_rx_intr(struct bnx_softc *sc)
3730 {
3731 struct status_block *sblk = sc->status_block;
3732 struct ifnet *ifp = &sc->arpcom.ac_if;
3733 u_int16_t hw_cons, sw_cons, sw_chain_cons;
3734 u_int16_t sw_prod, sw_chain_prod;
3735 u_int32_t sw_prod_bseq;
3736 struct l2_fhdr *l2fhdr;
3737 int i;
3738
3739 DBRUNIF(1, sc->rx_interrupts++);
3740
3741
3742 for (i = 0; i < RX_PAGES; i++)
3743 bus_dmamap_sync(sc->bnx_dmatag,
3744 sc->rx_bd_chain_map[i], 0,
3745 sc->rx_bd_chain_map[i]->dm_mapsize,
3746 BUS_DMASYNC_POSTWRITE);
3747
3748
3749 hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
3750 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
3751 hw_cons++;
3752
3753
3754 sw_cons = sc->rx_cons;
3755 sw_prod = sc->rx_prod;
3756 sw_prod_bseq = sc->rx_prod_bseq;
3757
3758 DBPRINT(sc, BNX_INFO_RECV, "%s(enter): sw_prod = 0x%04X, "
3759 "sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n",
3760 __FUNCTION__, sw_prod, sw_cons, sw_prod_bseq);
3761
3762
3763 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
3764 BUS_SPACE_BARRIER_READ);
3765
3766 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3767 sc->rx_low_watermark = sc->free_rx_bd);
3768
3769
3770
3771
3772
3773 while (sw_cons != hw_cons) {
3774 struct mbuf *m;
3775 struct rx_bd *rxbd;
3776 unsigned int len;
3777 u_int32_t status;
3778
3779
3780
3781
3782 sw_chain_cons = RX_CHAIN_IDX(sw_cons);
3783 sw_chain_prod = RX_CHAIN_IDX(sw_prod);
3784
3785
3786 rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)][RX_IDX(sw_chain_cons)];
3787 sc->free_rx_bd++;
3788
3789 DBRUN(BNX_VERBOSE_RECV, printf("%s(): ", __FUNCTION__);
3790 bnx_dump_rxbd(sc, sw_chain_cons, rxbd));
3791
3792
3793 if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) {
3794
3795 DBRUNIF((!(rxbd->rx_bd_flags & RX_BD_FLAGS_END)),
3796 printf("%s: Unexpected mbuf found in "
3797 "rx_bd[0x%04X]!\n", sw_chain_cons);
3798 bnx_breakpoint(sc));
3799
3800
3801
3802
3803
3804
3805
3806
3807 bus_dmamap_sync(sc->bnx_dmatag,
3808 sc->rx_mbuf_map[sw_chain_cons], 0,
3809 sc->rx_mbuf_map[sw_chain_cons]->dm_mapsize,
3810 BUS_DMASYNC_POSTREAD);
3811 bus_dmamap_unload(sc->bnx_dmatag,
3812 sc->rx_mbuf_map[sw_chain_cons]);
3813
3814
3815 m = sc->rx_mbuf_ptr[sw_chain_cons];
3816 sc->rx_mbuf_ptr[sw_chain_cons] = NULL;
3817
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827 l2fhdr = mtod(m, struct l2_fhdr *);
3828
3829 len = l2fhdr->l2_fhdr_pkt_len;
3830 status = l2fhdr->l2_fhdr_status;
3831
3832 DBRUNIF(DB_RANDOMTRUE(bnx_debug_l2fhdr_status_check),
3833 printf("Simulating l2_fhdr status error.\n");
3834 status = status | L2_FHDR_ERRORS_PHY_DECODE);
3835
3836
3837 DBRUNIF(((len < BNX_MIN_MTU) ||
3838 (len > BNX_MAX_JUMBO_ETHER_MTU_VLAN)),
3839 printf("%s: Unusual frame size found. "
3840 "Min(%d), Actual(%d), Max(%d)\n", (int)BNX_MIN_MTU,
3841 len, (int) BNX_MAX_JUMBO_ETHER_MTU_VLAN);
3842
3843 bnx_dump_mbuf(sc, m);
3844 bnx_breakpoint(sc));
3845
3846 len -= ETHER_CRC_LEN;
3847
3848
3849 if (status & (L2_FHDR_ERRORS_BAD_CRC |
3850 L2_FHDR_ERRORS_PHY_DECODE |
3851 L2_FHDR_ERRORS_ALIGNMENT |
3852 L2_FHDR_ERRORS_TOO_SHORT |
3853 L2_FHDR_ERRORS_GIANT_FRAME)) {
3854 ifp->if_ierrors++;
3855 DBRUNIF(1, sc->l2fhdr_status_errors++);
3856
3857
3858 if (bnx_get_buf(sc, m, &sw_prod,
3859 &sw_chain_prod, &sw_prod_bseq)) {
3860 DBRUNIF(1, bnx_breakpoint(sc));
3861 panic("%s: Can't reuse RX mbuf!\n",
3862 sc->bnx_dev.dv_xname);
3863 }
3864 goto bnx_rx_int_next_rx;
3865 }
3866
3867
3868
3869
3870
3871
3872
3873 if (bnx_get_buf(sc, NULL, &sw_prod, &sw_chain_prod,
3874 &sw_prod_bseq)) {
3875 DBRUN(BNX_WARN, BNX_PRINTF(sc, "Failed to allocate "
3876 "new mbuf, incoming frame dropped!\n"));
3877
3878 ifp->if_ierrors++;
3879
3880
3881 if (bnx_get_buf(sc, m, &sw_prod,
3882 &sw_chain_prod, &sw_prod_bseq)) {
3883 DBRUNIF(1, bnx_breakpoint(sc));
3884 panic("%s: Double mbuf allocation "
3885 "failure!", sc->bnx_dev.dv_xname);
3886 }
3887 goto bnx_rx_int_next_rx;
3888 }
3889
3890
3891
3892
3893 m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
3894
3895
3896 m->m_pkthdr.len = m->m_len = len;
3897
3898
3899 m->m_pkthdr.rcvif = ifp;
3900
3901 DBRUN(BNX_VERBOSE_RECV,
3902 struct ether_header *eh;
3903 eh = mtod(m, struct ether_header *);
3904 printf("%s: to: %6D, from: %6D, type: 0x%04X\n",
3905 __FUNCTION__, eh->ether_dhost, ":",
3906 eh->ether_shost, ":", htons(eh->ether_type)));
3907
3908
3909
3910
3911 if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
3912
3913 if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff)
3914 == 0)
3915 m->m_pkthdr.csum_flags |=
3916 M_IPV4_CSUM_IN_OK;
3917 else
3918 DBPRINT(sc, BNX_WARN_SEND,
3919 "%s(): Invalid IP checksum "
3920 "= 0x%04X!\n",
3921 __FUNCTION__,
3922 l2fhdr->l2_fhdr_ip_xsum
3923 );
3924 }
3925
3926
3927 if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3928 L2_FHDR_STATUS_UDP_DATAGRAM)) {
3929
3930 if ((status &
3931 (L2_FHDR_ERRORS_TCP_XSUM |
3932 L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
3933 m->m_pkthdr.csum_flags |=
3934 M_TCP_CSUM_IN_OK |
3935 M_UDP_CSUM_IN_OK;
3936 } else {
3937 DBPRINT(sc, BNX_WARN_SEND,
3938 "%s(): Invalid TCP/UDP "
3939 "checksum = 0x%04X!\n",
3940 __FUNCTION__,
3941 l2fhdr->l2_fhdr_tcp_udp_xsum);
3942 }
3943 }
3944
3945
3946
3947
3948
3949 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3950 !(sc->rx_mode & BNX_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
3951 #if NVLAN > 0
3952 struct ether_vlan_header vh;
3953
3954 DBPRINT(sc, BNX_VERBOSE_SEND,
3955 "%s(): VLAN tag = 0x%04X\n",
3956 __FUNCTION__,
3957 l2fhdr->l2_fhdr_vlan_tag);
3958
3959 if (m->m_pkthdr.len < ETHER_HDR_LEN) {
3960 m_freem(m);
3961 goto bnx_rx_int_next_rx;
3962 }
3963 m_copydata(m, 0, ETHER_HDR_LEN, (caddr_t)&vh);
3964 vh.evl_proto = vh.evl_encap_proto;
3965 vh.evl_tag = htons(l2fhdr->l2_fhdr_vlan_tag);
3966 vh.evl_encap_proto = htons(ETHERTYPE_VLAN);
3967 m_adj(m, ETHER_HDR_LEN);
3968 M_PREPEND(m, sizeof(vh), M_DONTWAIT);
3969 if (m == NULL)
3970 goto bnx_rx_int_next_rx;
3971 m_copyback(m, 0, sizeof(vh), &vh);
3972 #else
3973 m_freem(m);
3974 goto bnx_rx_int_next_rx;
3975 #endif
3976 }
3977
3978 #if NBPFILTER > 0
3979
3980
3981
3982
3983 if (ifp->if_bpf)
3984 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
3985 #endif
3986
3987
3988 ifp->if_ipackets++;
3989 DBPRINT(sc, BNX_VERBOSE_RECV,
3990 "%s(): Passing received frame up.\n", __FUNCTION__);
3991 ether_input_mbuf(ifp, m);
3992 DBRUNIF(1, sc->rx_mbuf_alloc--);
3993
3994 bnx_rx_int_next_rx:
3995 sw_prod = NEXT_RX_BD(sw_prod);
3996 }
3997
3998 sw_cons = NEXT_RX_BD(sw_cons);
3999
4000
4001 if (sw_cons == hw_cons) {
4002 hw_cons = sc->hw_rx_cons =
4003 sblk->status_rx_quick_consumer_index0;
4004 if ((hw_cons & USABLE_RX_BD_PER_PAGE) ==
4005 USABLE_RX_BD_PER_PAGE)
4006 hw_cons++;
4007 }
4008
4009
4010
4011
4012 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4013 BUS_SPACE_BARRIER_READ);
4014 }
4015
4016 for (i = 0; i < RX_PAGES; i++)
4017 bus_dmamap_sync(sc->bnx_dmatag,
4018 sc->rx_bd_chain_map[i], 0,
4019 sc->rx_bd_chain_map[i]->dm_mapsize,
4020 BUS_DMASYNC_PREWRITE);
4021
4022 sc->rx_cons = sw_cons;
4023 sc->rx_prod = sw_prod;
4024 sc->rx_prod_bseq = sw_prod_bseq;
4025
4026 REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod);
4027 REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
4028
4029 DBPRINT(sc, BNX_INFO_RECV, "%s(exit): rx_prod = 0x%04X, "
4030 "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
4031 __FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
4032 }
4033
4034
4035
4036
4037
4038
4039
4040 void
4041 bnx_tx_intr(struct bnx_softc *sc)
4042 {
4043 struct status_block *sblk = sc->status_block;
4044 struct ifnet *ifp = &sc->arpcom.ac_if;
4045 u_int16_t hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
4046
4047 DBRUNIF(1, sc->tx_interrupts++);
4048
4049
4050 hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
4051
4052
4053 if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4054 hw_tx_cons++;
4055
4056 sw_tx_cons = sc->tx_cons;
4057
4058
4059 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4060 BUS_SPACE_BARRIER_READ);
4061
4062
4063 while (sw_tx_cons != hw_tx_cons) {
4064 #ifdef BNX_DEBUG
4065 struct tx_bd *txbd = NULL;
4066 #endif
4067 sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
4068
4069 DBPRINT(sc, BNX_INFO_SEND, "%s(): hw_tx_cons = 0x%04X, "
4070 "sw_tx_cons = 0x%04X, sw_tx_chain_cons = 0x%04X\n",
4071 __FUNCTION__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
4072
4073 DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
4074 printf("%s: TX chain consumer out of range! "
4075 " 0x%04X > 0x%04X\n", sw_tx_chain_cons, (int)MAX_TX_BD);
4076 bnx_breakpoint(sc));
4077
4078 DBRUNIF(1, txbd = &sc->tx_bd_chain
4079 [TX_PAGE(sw_tx_chain_cons)][TX_IDX(sw_tx_chain_cons)]);
4080
4081 DBRUNIF((txbd == NULL),
4082 printf("%s: Unexpected NULL tx_bd[0x%04X]!\n",
4083 sw_tx_chain_cons);
4084 bnx_breakpoint(sc));
4085
4086 DBRUN(BNX_INFO_SEND, printf("%s: ", __FUNCTION__);
4087 bnx_dump_txbd(sc, sw_tx_chain_cons, txbd));
4088
4089
4090
4091
4092
4093
4094 if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) {
4095
4096 DBRUNIF((!(txbd->tx_bd_flags & TX_BD_FLAGS_END)),
4097 printf("%s: tx_bd END flag not set but "
4098 "txmbuf == NULL!\n");
4099 bnx_breakpoint(sc));
4100
4101 DBRUN(BNX_INFO_SEND,
4102 printf("%s: Unloading map/freeing mbuf "
4103 "from tx_bd[0x%04X]\n",
4104 __FUNCTION__, sw_tx_chain_cons));
4105
4106
4107 bus_dmamap_unload(sc->bnx_dmatag,
4108 sc->tx_mbuf_map[sw_tx_chain_cons]);
4109
4110
4111 m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]);
4112 sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL;
4113 DBRUNIF(1, sc->tx_mbuf_alloc--);
4114
4115 ifp->if_opackets++;
4116 }
4117
4118 sc->used_tx_bd--;
4119 sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4120
4121
4122 hw_tx_cons = sc->hw_tx_cons =
4123 sblk->status_tx_quick_consumer_index0;
4124 if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) ==
4125 USABLE_TX_BD_PER_PAGE)
4126 hw_tx_cons++;
4127
4128
4129
4130
4131 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4132 BUS_SPACE_BARRIER_READ);
4133 }
4134
4135
4136 ifp->if_timer = 0;
4137
4138
4139 if ((sc->used_tx_bd + BNX_TX_SLACK_SPACE) < USABLE_TX_BD) {
4140 DBRUNIF((ifp->if_flags & IFF_OACTIVE),
4141 printf("%s: TX chain is open for business! Used "
4142 "tx_bd = %d\n", sc->used_tx_bd));
4143 ifp->if_flags &= ~IFF_OACTIVE;
4144 }
4145
4146 sc->tx_cons = sw_tx_cons;
4147 }
4148
4149
4150
4151
4152
4153
4154
4155 void
4156 bnx_disable_intr(struct bnx_softc *sc)
4157 {
4158 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT);
4159 REG_RD(sc, BNX_PCICFG_INT_ACK_CMD);
4160 }
4161
4162
4163
4164
4165
4166
4167
4168 void
4169 bnx_enable_intr(struct bnx_softc *sc)
4170 {
4171 u_int32_t val;
4172
4173 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID |
4174 BNX_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
4175
4176 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID |
4177 sc->last_status_idx);
4178
4179 val = REG_RD(sc, BNX_HC_COMMAND);
4180 REG_WR(sc, BNX_HC_COMMAND, val | BNX_HC_COMMAND_COAL_NOW);
4181 }
4182
4183
4184
4185
4186
4187
4188
4189 void
4190 bnx_init(void *xsc)
4191 {
4192 struct bnx_softc *sc = (struct bnx_softc *)xsc;
4193 struct ifnet *ifp = &sc->arpcom.ac_if;
4194 u_int32_t ether_mtu;
4195 int s;
4196
4197 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4198
4199 s = splnet();
4200
4201 bnx_stop(sc);
4202
4203 if (bnx_reset(sc, BNX_DRV_MSG_CODE_RESET)) {
4204 BNX_PRINTF(sc, "Controller reset failed!\n");
4205 goto bnx_init_exit;
4206 }
4207
4208 if (bnx_chipinit(sc)) {
4209 BNX_PRINTF(sc, "Controller initialization failed!\n");
4210 goto bnx_init_exit;
4211 }
4212
4213 if (bnx_blockinit(sc)) {
4214 BNX_PRINTF(sc, "Block initialization failed!\n");
4215 goto bnx_init_exit;
4216 }
4217
4218
4219 bcopy(sc->arpcom.ac_enaddr, sc->eaddr, ETHER_ADDR_LEN);
4220 bnx_set_mac_addr(sc);
4221
4222
4223 ether_mtu = BNX_MAX_STD_ETHER_MTU_VLAN;
4224
4225 DBPRINT(sc, BNX_INFO, "%s(): setting MRU = %d\n",
4226 __FUNCTION__, ether_mtu);
4227
4228
4229
4230
4231
4232 REG_WR(sc, BNX_EMAC_RX_MTU_SIZE, ether_mtu |
4233 BNX_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4234
4235
4236 sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8;
4237
4238 DBPRINT(sc, BNX_INFO, "%s(): mclbytes = %d, mbuf_alloc_size = %d, "
4239 "max_frame_size = %d\n", __FUNCTION__, (int)MCLBYTES,
4240 sc->mbuf_alloc_size, sc->max_frame_size);
4241
4242
4243 bnx_set_rx_mode(sc);
4244
4245
4246 bnx_init_rx_chain(sc);
4247
4248
4249 bnx_init_tx_chain(sc);
4250
4251
4252 bnx_enable_intr(sc);
4253
4254 bnx_ifmedia_upd(ifp);
4255
4256 ifp->if_flags |= IFF_RUNNING;
4257 ifp->if_flags &= ~IFF_OACTIVE;
4258
4259 timeout_add(&sc->bnx_timeout, hz);
4260
4261 bnx_init_exit:
4262 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4263
4264 splx(s);
4265
4266 return;
4267 }
4268
4269 void
4270 bnx_mgmt_init(struct bnx_softc *sc)
4271 {
4272 struct ifnet *ifp = &sc->arpcom.ac_if;
4273 u_int32_t val;
4274
4275
4276 if (ifp->if_flags & IFF_RUNNING)
4277 goto bnx_mgmt_init_exit;
4278
4279
4280 bnx_init_cpus(sc);
4281
4282 val = (BCM_PAGE_BITS - 8) << 24;
4283 REG_WR(sc, BNX_RV2P_CONFIG, val);
4284
4285
4286 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS,
4287 BNX_MISC_ENABLE_SET_BITS_RX_V2P_ENABLE |
4288 BNX_MISC_ENABLE_SET_BITS_RX_DMA_ENABLE |
4289 BNX_MISC_ENABLE_SET_BITS_COMPLETION_ENABLE);
4290 REG_RD(sc, BNX_MISC_ENABLE_SET_BITS);
4291 DELAY(20);
4292
4293 bnx_ifmedia_upd(ifp);
4294
4295 bnx_mgmt_init_exit:
4296 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4297 }
4298
4299
4300
4301
4302
4303
4304
4305
4306 int
4307 bnx_tx_encap(struct bnx_softc *sc, struct mbuf **m_head)
4308 {
4309 bus_dmamap_t map;
4310 struct tx_bd *txbd = NULL;
4311 struct mbuf *m0;
4312 u_int16_t vlan_tag = 0, flags = 0;
4313 u_int16_t chain_prod, prod;
4314 #ifdef BNX_DEBUG
4315 u_int16_t debug_prod;
4316 #endif
4317 u_int32_t addr, prod_bseq;
4318 int i, error, rc = 0;
4319
4320 m0 = *m_head;
4321
4322 if (m0->m_pkthdr.csum_flags) {
4323 if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
4324 flags |= TX_BD_FLAGS_IP_CKSUM;
4325 if (m0->m_pkthdr.csum_flags &
4326 (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT))
4327 flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4328 }
4329
4330 #if NVLAN > 0
4331
4332 if ((m0->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
4333 m0->m_pkthdr.rcvif != NULL) {
4334 struct ifvlan *ifv = m0->m_pkthdr.rcvif->if_softc;
4335 flags |= TX_BD_FLAGS_VLAN_TAG;
4336 vlan_tag = ifv->ifv_tag;
4337 }
4338 #endif
4339
4340
4341 prod = sc->tx_prod;
4342 chain_prod = TX_CHAIN_IDX(prod);
4343 map = sc->tx_mbuf_map[chain_prod];
4344
4345
4346 error = bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m0, BUS_DMA_NOWAIT);
4347 if (error != 0) {
4348 printf("%s: Error mapping mbuf into TX chain!\n",
4349 sc->bnx_dev.dv_xname);
4350 m_freem(m0);
4351 *m_head = NULL;
4352 return (error);
4353 }
4354
4355
4356
4357
4358
4359
4360
4361 if (map->dm_nsegs > (USABLE_TX_BD - sc->used_tx_bd - BNX_TX_SLACK_SPACE)) {
4362 bus_dmamap_unload(sc->bnx_dmatag, map);
4363 return (ENOBUFS);
4364 }
4365
4366
4367 prod_bseq = sc->tx_prod_bseq;
4368 #ifdef BNX_DEBUG
4369 debug_prod = chain_prod;
4370 #endif
4371
4372 DBPRINT(sc, BNX_INFO_SEND,
4373 "%s(): Start: prod = 0x%04X, chain_prod = %04X, "
4374 "prod_bseq = 0x%08X\n",
4375 __FUNCTION__, *prod, chain_prod, prod_bseq);
4376
4377
4378
4379
4380
4381
4382
4383 for (i = 0; i < map->dm_nsegs ; i++) {
4384 chain_prod = TX_CHAIN_IDX(prod);
4385 txbd = &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
4386
4387 addr = (u_int32_t)(map->dm_segs[i].ds_addr);
4388 txbd->tx_bd_haddr_lo = htole32(addr);
4389 addr = (u_int32_t)((u_int64_t)map->dm_segs[i].ds_addr >> 32);
4390 txbd->tx_bd_haddr_hi = htole32(addr);
4391 txbd->tx_bd_mss_nbytes = htole16(map->dm_segs[i].ds_len);
4392 txbd->tx_bd_vlan_tag = htole16(vlan_tag);
4393 txbd->tx_bd_flags = htole16(flags);
4394 prod_bseq += map->dm_segs[i].ds_len;
4395 if (i == 0)
4396 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START);
4397 prod = NEXT_TX_BD(prod);
4398 }
4399
4400
4401 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END);
4402
4403 DBRUN(BNX_INFO_SEND, bnx_dump_tx_chain(sc, debug_prod, nseg));
4404
4405 DBPRINT(sc, BNX_INFO_SEND,
4406 "%s(): End: prod = 0x%04X, chain_prod = %04X, "
4407 "prod_bseq = 0x%08X\n",
4408 __FUNCTION__, prod, chain_prod, prod_bseq);
4409
4410
4411
4412
4413
4414
4415
4416
4417
4418
4419 sc->tx_mbuf_ptr[chain_prod] = m0;
4420 sc->used_tx_bd += map->dm_nsegs;
4421
4422 DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark),
4423 sc->tx_hi_watermark = sc->used_tx_bd);
4424
4425 DBRUNIF(1, sc->tx_mbuf_alloc++);
4426
4427 DBRUN(BNX_VERBOSE_SEND, bnx_dump_tx_mbuf_chain(sc, chain_prod,
4428 map_arg.maxsegs));
4429
4430
4431 sc->tx_prod = prod;
4432 sc->tx_prod_bseq = prod_bseq;
4433
4434 return (rc);
4435 }
4436
4437
4438
4439
4440
4441
4442
4443 void
4444 bnx_start(struct ifnet *ifp)
4445 {
4446 struct bnx_softc *sc = ifp->if_softc;
4447 struct mbuf *m_head = NULL;
4448 int count = 0;
4449 u_int16_t tx_prod, tx_chain_prod;
4450
4451
4452 if (!sc->bnx_link || IFQ_IS_EMPTY(&ifp->if_snd)) {
4453 DBPRINT(sc, BNX_INFO_SEND,
4454 "%s(): No link or transmit queue empty.\n", __FUNCTION__);
4455 goto bnx_start_exit;
4456 }
4457
4458
4459 tx_prod = sc->tx_prod;
4460 tx_chain_prod = TX_CHAIN_IDX(tx_prod);
4461
4462 DBPRINT(sc, BNX_INFO_SEND, "%s(): Start: tx_prod = 0x%04X, "
4463 "tx_chain_prod = %04X, tx_prod_bseq = 0x%08X\n",
4464 __FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq);
4465
4466
4467
4468
4469
4470 while (sc->used_tx_bd < USABLE_TX_BD - BNX_TX_SLACK_SPACE) {
4471
4472 IFQ_POLL(&ifp->if_snd, m_head);
4473 if (m_head == NULL)
4474 break;
4475
4476
4477
4478
4479
4480
4481 if (bnx_tx_encap(sc, &m_head)) {
4482 ifp->if_flags |= IFF_OACTIVE;
4483 DBPRINT(sc, BNX_INFO_SEND, "TX chain is closed for "
4484 "business! Total tx_bd used = %d\n",
4485 sc->used_tx_bd);
4486 break;
4487 }
4488
4489 IFQ_DEQUEUE(&ifp->if_snd, m_head);
4490 count++;
4491
4492 #if NBPFILTER > 0
4493
4494 if (ifp->if_bpf)
4495 bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
4496 #endif
4497 }
4498
4499 if (count == 0) {
4500
4501 DBPRINT(sc, BNX_VERBOSE_SEND,
4502 "%s(): No packets were dequeued\n", __FUNCTION__);
4503 goto bnx_start_exit;
4504 }
4505
4506
4507 tx_chain_prod = TX_CHAIN_IDX(sc->tx_prod);
4508
4509 DBPRINT(sc, BNX_INFO_SEND, "%s(): End: tx_prod = 0x%04X, tx_chain_prod "
4510 "= 0x%04X, tx_prod_bseq = 0x%08X\n", __FUNCTION__, tx_prod,
4511 tx_chain_prod, sc->tx_prod_bseq);
4512
4513
4514 REG_WR16(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BIDX, sc->tx_prod);
4515 REG_WR(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq);
4516
4517
4518 ifp->if_timer = BNX_TX_TIMEOUT;
4519
4520 bnx_start_exit:
4521 return;
4522 }
4523
4524
4525
4526
4527
4528
4529
4530 int
4531 bnx_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4532 {
4533 struct bnx_softc *sc = ifp->if_softc;
4534 struct ifreq *ifr = (struct ifreq *) data;
4535 struct ifaddr *ifa = (struct ifaddr *)data;
4536 struct mii_data *mii;
4537 int s, error = 0;
4538
4539 s = splnet();
4540
4541 if ((error = ether_ioctl(ifp, &sc->arpcom, command, data)) > 0) {
4542 splx(s);
4543 return (error);
4544 }
4545
4546 switch (command) {
4547 case SIOCSIFADDR:
4548 ifp->if_flags |= IFF_UP;
4549 if (!(ifp->if_flags & IFF_RUNNING))
4550 bnx_init(sc);
4551 #ifdef INET
4552 if (ifa->ifa_addr->sa_family == AF_INET)
4553 arp_ifinit(&sc->arpcom, ifa);
4554 #endif
4555 break;
4556
4557 case SIOCSIFMTU:
4558 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ifp->if_hardmtu)
4559 error = EINVAL;
4560 else if (ifp->if_mtu != ifr->ifr_mtu)
4561 ifp->if_mtu = ifr->ifr_mtu;
4562 break;
4563
4564 case SIOCSIFFLAGS:
4565 if (ifp->if_flags & IFF_UP) {
4566 if ((ifp->if_flags & IFF_RUNNING) &&
4567 ((ifp->if_flags ^ sc->bnx_if_flags) &
4568 (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
4569 bnx_set_rx_mode(sc);
4570 } else {
4571 if (!(ifp->if_flags & IFF_RUNNING))
4572 bnx_init(sc);
4573 }
4574 } else {
4575 if (ifp->if_flags & IFF_RUNNING)
4576 bnx_stop(sc);
4577 }
4578 sc->bnx_if_flags = ifp->if_flags;
4579 break;
4580
4581 case SIOCADDMULTI:
4582 case SIOCDELMULTI:
4583 error = (command == SIOCADDMULTI)
4584 ? ether_addmulti(ifr, &sc->arpcom)
4585 : ether_delmulti(ifr, &sc->arpcom);
4586
4587 if (error == ENETRESET) {
4588 if (ifp->if_flags & IFF_RUNNING)
4589 bnx_set_rx_mode(sc);
4590 error = 0;
4591 }
4592 break;
4593
4594 case SIOCSIFMEDIA:
4595 case SIOCGIFMEDIA:
4596 DBPRINT(sc, BNX_VERBOSE, "bnx_phy_flags = 0x%08X\n",
4597 sc->bnx_phy_flags);
4598
4599 if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG)
4600 error = ifmedia_ioctl(ifp, ifr,
4601 &sc->bnx_ifmedia, command);
4602 else {
4603 mii = &sc->bnx_mii;
4604 error = ifmedia_ioctl(ifp, ifr,
4605 &mii->mii_media, command);
4606 }
4607 break;
4608
4609 default:
4610 error = ENOTTY;
4611 break;
4612 }
4613
4614 splx(s);
4615
4616 return (error);
4617 }
4618
4619
4620
4621
4622
4623
4624
4625 void
4626 bnx_watchdog(struct ifnet *ifp)
4627 {
4628 struct bnx_softc *sc = ifp->if_softc;
4629
4630 DBRUN(BNX_WARN_SEND, bnx_dump_driver_state(sc);
4631 bnx_dump_status_block(sc));
4632
4633 printf("%s: Watchdog timeout occurred, resetting!\n",
4634 ifp->if_xname);
4635
4636
4637
4638 bnx_init(sc);
4639
4640 ifp->if_oerrors++;
4641 }
4642
4643
4644
4645
4646
4647
4648
4649
4650
4651
4652
4653
4654 int
4655 bnx_intr(void *xsc)
4656 {
4657 struct bnx_softc *sc;
4658 struct ifnet *ifp;
4659 u_int32_t status_attn_bits;
4660
4661 sc = xsc;
4662 if ((sc->bnx_flags & BNX_ACTIVE_FLAG) == 0)
4663 return (0);
4664
4665 ifp = &sc->arpcom.ac_if;
4666
4667 DBRUNIF(1, sc->interrupts_generated++);
4668
4669 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
4670 sc->status_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4671
4672
4673
4674
4675
4676
4677
4678 if ((sc->status_block->status_idx == sc->last_status_idx) &&
4679 (REG_RD(sc, BNX_PCICFG_MISC_STATUS) &
4680 BNX_PCICFG_MISC_STATUS_INTA_VALUE))
4681 return (0);
4682
4683
4684 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD,
4685 BNX_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
4686 BNX_PCICFG_INT_ACK_CMD_MASK_INT);
4687
4688
4689 for (;;) {
4690 status_attn_bits = sc->status_block->status_attn_bits;
4691
4692 DBRUNIF(DB_RANDOMTRUE(bnx_debug_unexpected_attention),
4693 printf("Simulating unexpected status attention bit set.");
4694 status_attn_bits = status_attn_bits |
4695 STATUS_ATTN_BITS_PARITY_ERROR);
4696
4697
4698 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
4699 (sc->status_block->status_attn_bits_ack &
4700 STATUS_ATTN_BITS_LINK_STATE))
4701 bnx_phy_intr(sc);
4702
4703
4704 if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
4705 (sc->status_block->status_attn_bits_ack &
4706 ~STATUS_ATTN_BITS_LINK_STATE))) {
4707 DBRUN(1, sc->unexpected_attentions++);
4708
4709 BNX_PRINTF(sc, "Fatal attention detected: 0x%08X\n",
4710 sc->status_block->status_attn_bits);
4711
4712 DBRUN(BNX_FATAL,
4713 if (bnx_debug_unexpected_attention == 0)
4714 bnx_breakpoint(sc));
4715
4716 bnx_init(sc);
4717 return (1);
4718 }
4719
4720
4721 if (sc->status_block->status_rx_quick_consumer_index0 !=
4722 sc->hw_rx_cons)
4723 bnx_rx_intr(sc);
4724
4725
4726 if (sc->status_block->status_tx_quick_consumer_index0 !=
4727 sc->hw_tx_cons)
4728 bnx_tx_intr(sc);
4729
4730
4731
4732
4733 sc->last_status_idx = sc->status_block->status_idx;
4734
4735
4736
4737
4738 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4739 BUS_SPACE_BARRIER_READ);
4740
4741
4742 if ((sc->status_block->status_rx_quick_consumer_index0 ==
4743 sc->hw_rx_cons) &&
4744 (sc->status_block->status_tx_quick_consumer_index0 ==
4745 sc->hw_tx_cons))
4746 break;
4747 }
4748
4749 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
4750 sc->status_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
4751
4752
4753 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD,
4754 BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx |
4755 BNX_PCICFG_INT_ACK_CMD_MASK_INT);
4756 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD,
4757 BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
4758
4759
4760 if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
4761 bnx_start(ifp);
4762
4763 return (1);
4764 }
4765
4766
4767
4768
4769
4770
4771
4772 void
4773 bnx_set_rx_mode(struct bnx_softc *sc)
4774 {
4775 struct arpcom *ac = &sc->arpcom;
4776 struct ifnet *ifp = &ac->ac_if;
4777 struct ether_multi *enm;
4778 struct ether_multistep step;
4779 u_int32_t hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
4780 u_int32_t rx_mode, sort_mode;
4781 int h, i;
4782
4783
4784 rx_mode = sc->rx_mode & ~(BNX_EMAC_RX_MODE_PROMISCUOUS |
4785 BNX_EMAC_RX_MODE_KEEP_VLAN_TAG);
4786 sort_mode = 1 | BNX_RPM_SORT_USER0_BC_EN;
4787
4788
4789
4790
4791
4792 if (!(sc->bnx_flags & BNX_MFW_ENABLE_FLAG))
4793 rx_mode |= BNX_EMAC_RX_MODE_KEEP_VLAN_TAG;
4794
4795
4796
4797
4798
4799 if (ifp->if_flags & IFF_PROMISC) {
4800 DBPRINT(sc, BNX_INFO, "Enabling promiscuous mode.\n");
4801
4802
4803 rx_mode |= BNX_EMAC_RX_MODE_PROMISCUOUS;
4804 sort_mode |= BNX_RPM_SORT_USER0_PROM_EN;
4805 } else if (ifp->if_flags & IFF_ALLMULTI) {
4806 allmulti:
4807 DBPRINT(sc, BNX_INFO, "Enabling all multicast mode.\n");
4808
4809
4810 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
4811 REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4),
4812 0xffffffff);
4813 sort_mode |= BNX_RPM_SORT_USER0_MC_EN;
4814 } else {
4815
4816 DBPRINT(sc, BNX_INFO, "Enabling selective multicast mode.\n");
4817
4818 ETHER_FIRST_MULTI(step, ac, enm);
4819 while (enm != NULL) {
4820 if (bcmp(enm->enm_addrlo, enm->enm_addrhi,
4821 ETHER_ADDR_LEN)) {
4822 ifp->if_flags |= IFF_ALLMULTI;
4823 goto allmulti;
4824 }
4825 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN) &
4826 0xFF;
4827 hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
4828 ETHER_NEXT_MULTI(step, enm);
4829 }
4830
4831 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
4832 REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4),
4833 hashes[i]);
4834
4835 sort_mode |= BNX_RPM_SORT_USER0_MC_HSH_EN;
4836 }
4837
4838
4839 if (rx_mode != sc->rx_mode) {
4840 DBPRINT(sc, BNX_VERBOSE, "Enabling new receive mode: 0x%08X\n",
4841 rx_mode);
4842
4843 sc->rx_mode = rx_mode;
4844 REG_WR(sc, BNX_EMAC_RX_MODE, rx_mode);
4845 }
4846
4847
4848 REG_WR(sc, BNX_RPM_SORT_USER0, 0x0);
4849 REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode);
4850 REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode | BNX_RPM_SORT_USER0_ENA);
4851 }
4852
4853
4854
4855
4856
4857
4858
4859
4860 void
4861 bnx_stats_update(struct bnx_softc *sc)
4862 {
4863 struct ifnet *ifp = &sc->arpcom.ac_if;
4864 struct statistics_block *stats;
4865
4866 DBPRINT(sc, BNX_EXCESSIVE, "Entering %s()\n", __FUNCTION__);
4867
4868 stats = (struct statistics_block *)sc->stats_block;
4869
4870
4871
4872
4873
4874 ifp->if_collisions = (u_long)stats->stat_EtherStatsCollisions;
4875
4876 ifp->if_ierrors = (u_long)stats->stat_EtherStatsUndersizePkts +
4877 (u_long)stats->stat_EtherStatsOverrsizePkts +
4878 (u_long)stats->stat_IfInMBUFDiscards +
4879 (u_long)stats->stat_Dot3StatsAlignmentErrors +
4880 (u_long)stats->stat_Dot3StatsFCSErrors;
4881
4882 ifp->if_oerrors = (u_long)
4883 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
4884 (u_long)stats->stat_Dot3StatsExcessiveCollisions +
4885 (u_long)stats->stat_Dot3StatsLateCollisions;
4886
4887
4888
4889
4890
4891
4892 if (!(BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) &&
4893 !(BNX_CHIP_ID(sc) == BNX_CHIP_ID_5708_A0))
4894 ifp->if_oerrors += (u_long) stats->stat_Dot3StatsCarrierSenseErrors;
4895
4896
4897
4898
4899
4900 sc->stat_IfHCInOctets = ((u_int64_t)stats->stat_IfHCInOctets_hi << 32) +
4901 (u_int64_t) stats->stat_IfHCInOctets_lo;
4902
4903 sc->stat_IfHCInBadOctets =
4904 ((u_int64_t) stats->stat_IfHCInBadOctets_hi << 32) +
4905 (u_int64_t) stats->stat_IfHCInBadOctets_lo;
4906
4907 sc->stat_IfHCOutOctets =
4908 ((u_int64_t) stats->stat_IfHCOutOctets_hi << 32) +
4909 (u_int64_t) stats->stat_IfHCOutOctets_lo;
4910
4911 sc->stat_IfHCOutBadOctets =
4912 ((u_int64_t) stats->stat_IfHCOutBadOctets_hi << 32) +
4913 (u_int64_t) stats->stat_IfHCOutBadOctets_lo;
4914
4915 sc->stat_IfHCInUcastPkts =
4916 ((u_int64_t) stats->stat_IfHCInUcastPkts_hi << 32) +
4917 (u_int64_t) stats->stat_IfHCInUcastPkts_lo;
4918
4919 sc->stat_IfHCInMulticastPkts =
4920 ((u_int64_t) stats->stat_IfHCInMulticastPkts_hi << 32) +
4921 (u_int64_t) stats->stat_IfHCInMulticastPkts_lo;
4922
4923 sc->stat_IfHCInBroadcastPkts =
4924 ((u_int64_t) stats->stat_IfHCInBroadcastPkts_hi << 32) +
4925 (u_int64_t) stats->stat_IfHCInBroadcastPkts_lo;
4926
4927 sc->stat_IfHCOutUcastPkts =
4928 ((u_int64_t) stats->stat_IfHCOutUcastPkts_hi << 32) +
4929 (u_int64_t) stats->stat_IfHCOutUcastPkts_lo;
4930
4931 sc->stat_IfHCOutMulticastPkts =
4932 ((u_int64_t) stats->stat_IfHCOutMulticastPkts_hi << 32) +
4933 (u_int64_t) stats->stat_IfHCOutMulticastPkts_lo;
4934
4935 sc->stat_IfHCOutBroadcastPkts =
4936 ((u_int64_t) stats->stat_IfHCOutBroadcastPkts_hi << 32) +
4937 (u_int64_t) stats->stat_IfHCOutBroadcastPkts_lo;
4938
4939 sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
4940 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
4941
4942 sc->stat_Dot3StatsCarrierSenseErrors =
4943 stats->stat_Dot3StatsCarrierSenseErrors;
4944
4945 sc->stat_Dot3StatsFCSErrors = stats->stat_Dot3StatsFCSErrors;
4946
4947 sc->stat_Dot3StatsAlignmentErrors =
4948 stats->stat_Dot3StatsAlignmentErrors;
4949
4950 sc->stat_Dot3StatsSingleCollisionFrames =
4951 stats->stat_Dot3StatsSingleCollisionFrames;
4952
4953 sc->stat_Dot3StatsMultipleCollisionFrames =
4954 stats->stat_Dot3StatsMultipleCollisionFrames;
4955
4956 sc->stat_Dot3StatsDeferredTransmissions =
4957 stats->stat_Dot3StatsDeferredTransmissions;
4958
4959 sc->stat_Dot3StatsExcessiveCollisions =
4960 stats->stat_Dot3StatsExcessiveCollisions;
4961
4962 sc->stat_Dot3StatsLateCollisions = stats->stat_Dot3StatsLateCollisions;
4963
4964 sc->stat_EtherStatsCollisions = stats->stat_EtherStatsCollisions;
4965
4966 sc->stat_EtherStatsFragments = stats->stat_EtherStatsFragments;
4967
4968 sc->stat_EtherStatsJabbers = stats->stat_EtherStatsJabbers;
4969
4970 sc->stat_EtherStatsUndersizePkts = stats->stat_EtherStatsUndersizePkts;
4971
4972 sc->stat_EtherStatsOverrsizePkts = stats->stat_EtherStatsOverrsizePkts;
4973
4974 sc->stat_EtherStatsPktsRx64Octets =
4975 stats->stat_EtherStatsPktsRx64Octets;
4976
4977 sc->stat_EtherStatsPktsRx65Octetsto127Octets =
4978 stats->stat_EtherStatsPktsRx65Octetsto127Octets;
4979
4980 sc->stat_EtherStatsPktsRx128Octetsto255Octets =
4981 stats->stat_EtherStatsPktsRx128Octetsto255Octets;
4982
4983 sc->stat_EtherStatsPktsRx256Octetsto511Octets =
4984 stats->stat_EtherStatsPktsRx256Octetsto511Octets;
4985
4986 sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
4987 stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
4988
4989 sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
4990 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
4991
4992 sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
4993 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
4994
4995 sc->stat_EtherStatsPktsTx64Octets =
4996 stats->stat_EtherStatsPktsTx64Octets;
4997
4998 sc->stat_EtherStatsPktsTx65Octetsto127Octets =
4999 stats->stat_EtherStatsPktsTx65Octetsto127Octets;
5000
5001 sc->stat_EtherStatsPktsTx128Octetsto255Octets =
5002 stats->stat_EtherStatsPktsTx128Octetsto255Octets;
5003
5004 sc->stat_EtherStatsPktsTx256Octetsto511Octets =
5005 stats->stat_EtherStatsPktsTx256Octetsto511Octets;
5006
5007 sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
5008 stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
5009
5010 sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
5011 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
5012
5013 sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
5014 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
5015
5016 sc->stat_XonPauseFramesReceived = stats->stat_XonPauseFramesReceived;
5017
5018 sc->stat_XoffPauseFramesReceived = stats->stat_XoffPauseFramesReceived;
5019
5020 sc->stat_OutXonSent = stats->stat_OutXonSent;
5021
5022 sc->stat_OutXoffSent = stats->stat_OutXoffSent;
5023
5024 sc->stat_FlowControlDone = stats->stat_FlowControlDone;
5025
5026 sc->stat_MacControlFramesReceived =
5027 stats->stat_MacControlFramesReceived;
5028
5029 sc->stat_XoffStateEntered = stats->stat_XoffStateEntered;
5030
5031 sc->stat_IfInFramesL2FilterDiscards =
5032 stats->stat_IfInFramesL2FilterDiscards;
5033
5034 sc->stat_IfInRuleCheckerDiscards = stats->stat_IfInRuleCheckerDiscards;
5035
5036 sc->stat_IfInFTQDiscards = stats->stat_IfInFTQDiscards;
5037
5038 sc->stat_IfInMBUFDiscards = stats->stat_IfInMBUFDiscards;
5039
5040 sc->stat_IfInRuleCheckerP4Hit = stats->stat_IfInRuleCheckerP4Hit;
5041
5042 sc->stat_CatchupInRuleCheckerDiscards =
5043 stats->stat_CatchupInRuleCheckerDiscards;
5044
5045 sc->stat_CatchupInFTQDiscards = stats->stat_CatchupInFTQDiscards;
5046
5047 sc->stat_CatchupInMBUFDiscards = stats->stat_CatchupInMBUFDiscards;
5048
5049 sc->stat_CatchupInRuleCheckerP4Hit =
5050 stats->stat_CatchupInRuleCheckerP4Hit;
5051
5052 DBPRINT(sc, BNX_EXCESSIVE, "Exiting %s()\n", __FUNCTION__);
5053 }
5054
5055 void
5056 bnx_tick(void *xsc)
5057 {
5058 struct bnx_softc *sc = xsc;
5059 struct ifnet *ifp = &sc->arpcom.ac_if;
5060 struct mii_data *mii = NULL;
5061 u_int32_t msg;
5062
5063
5064 #ifdef BNX_DEBUG
5065 msg = (u_int32_t)BNX_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE;
5066 #else
5067 msg = (u_int32_t)++sc->bnx_fw_drv_pulse_wr_seq;
5068 #endif
5069 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_PULSE_MB, msg);
5070
5071
5072 bnx_stats_update(sc);
5073
5074
5075 timeout_add(&sc->bnx_timeout, hz);
5076
5077
5078 if (sc->bnx_link)
5079 goto bnx_tick_exit;
5080
5081
5082
5083 mii = &sc->bnx_mii;
5084 mii_tick(mii);
5085
5086
5087 if (!sc->bnx_link && mii->mii_media_status & IFM_ACTIVE &&
5088 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5089 sc->bnx_link++;
5090
5091 if (!IFQ_IS_EMPTY(&ifp->if_snd))
5092 bnx_start(ifp);
5093 }
5094
5095 bnx_tick_exit:
5096 return;
5097 }
5098
5099
5100
5101
5102 #ifdef BNX_DEBUG
5103
5104
5105
5106
5107
5108
5109
5110 void
5111 bnx_dump_mbuf(struct bnx_softc *sc, struct mbuf *m)
5112 {
5113 struct mbuf *mp = m;
5114
5115 if (m == NULL) {
5116
5117 printf("mbuf ptr is null!\n");
5118 return;
5119 }
5120
5121 while (mp) {
5122 printf("mbuf: vaddr = %p, m_len = %d, m_flags = ",
5123 mp, mp->m_len);
5124
5125 if (mp->m_flags & M_EXT)
5126 printf("M_EXT ");
5127 if (mp->m_flags & M_PKTHDR)
5128 printf("M_PKTHDR ");
5129 printf("\n");
5130
5131 if (mp->m_flags & M_EXT)
5132 printf("- m_ext: vaddr = %p, ext_size = 0x%04X\n",
5133 mp, mp->m_ext.ext_size);
5134
5135 mp = mp->m_next;
5136 }
5137 }
5138
5139
5140
5141
5142
5143
5144
5145 void
5146 bnx_dump_tx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count)
5147 {
5148 struct mbuf *m;
5149 int i;
5150
5151 BNX_PRINTF(sc,
5152 "----------------------------"
5153 " tx mbuf data "
5154 "----------------------------\n");
5155
5156 for (i = 0; i < count; i++) {
5157 m = sc->tx_mbuf_ptr[chain_prod];
5158 BNX_PRINTF(sc, "txmbuf[%d]\n", chain_prod);
5159 bnx_dump_mbuf(sc, m);
5160 chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
5161 }
5162
5163 BNX_PRINTF(sc,
5164 "--------------------------------------------"
5165 "----------------------------\n");
5166 }
5167
5168
5169
5170
5171 void
5172 bnx_dump_rx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count)
5173 {
5174 struct mbuf *m;
5175 int i;
5176
5177 BNX_PRINTF(sc,
5178 "----------------------------"
5179 " rx mbuf data "
5180 "----------------------------\n");
5181
5182 for (i = 0; i < count; i++) {
5183 m = sc->rx_mbuf_ptr[chain_prod];
5184 BNX_PRINTF(sc, "rxmbuf[0x%04X]\n", chain_prod);
5185 bnx_dump_mbuf(sc, m);
5186 chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
5187 }
5188
5189
5190 BNX_PRINTF(sc,
5191 "--------------------------------------------"
5192 "----------------------------\n");
5193 }
5194
5195 void
5196 bnx_dump_txbd(struct bnx_softc *sc, int idx, struct tx_bd *txbd)
5197 {
5198 if (idx > MAX_TX_BD)
5199
5200 BNX_PRINTF(sc, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
5201 else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
5202
5203 BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain "
5204 "page pointer\n", idx, txbd->tx_bd_haddr_hi,
5205 txbd->tx_bd_haddr_lo);
5206 else
5207
5208 BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = "
5209 "0x%08X, vlan tag = 0x%4X, flags = 0x%08X\n", idx,
5210 txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo,
5211 txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag,
5212 txbd->tx_bd_flags);
5213 }
5214
5215 void
5216 bnx_dump_rxbd(struct bnx_softc *sc, int idx, struct rx_bd *rxbd)
5217 {
5218 if (idx > MAX_RX_BD)
5219
5220 BNX_PRINTF(sc, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
5221 else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
5222
5223 BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page "
5224 "pointer\n", idx, rxbd->rx_bd_haddr_hi,
5225 rxbd->rx_bd_haddr_lo);
5226 else
5227
5228 BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = "
5229 "0x%08X, flags = 0x%08X\n", idx,
5230 rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo,
5231 rxbd->rx_bd_len, rxbd->rx_bd_flags);
5232 }
5233
5234 void
5235 bnx_dump_l2fhdr(struct bnx_softc *sc, int idx, struct l2_fhdr *l2fhdr)
5236 {
5237 BNX_PRINTF(sc, "l2_fhdr[0x%04X]: status = 0x%08X, "
5238 "pkt_len = 0x%04X, vlan = 0x%04x, ip_xsum = 0x%04X, "
5239 "tcp_udp_xsum = 0x%04X\n", idx,
5240 l2fhdr->l2_fhdr_status, l2fhdr->l2_fhdr_pkt_len,
5241 l2fhdr->l2_fhdr_vlan_tag, l2fhdr->l2_fhdr_ip_xsum,
5242 l2fhdr->l2_fhdr_tcp_udp_xsum);
5243 }
5244
5245
5246
5247
5248 void
5249 bnx_dump_tx_chain(struct bnx_softc *sc, int tx_prod, int count)
5250 {
5251 struct tx_bd *txbd;
5252 int i;
5253
5254
5255 BNX_PRINTF(sc,
5256 "----------------------------"
5257 " tx_bd chain "
5258 "----------------------------\n");
5259
5260 BNX_PRINTF(sc,
5261 "page size = 0x%08X, tx chain pages = 0x%08X\n",
5262 (u_int32_t)BCM_PAGE_SIZE, (u_int32_t) TX_PAGES);
5263
5264 BNX_PRINTF(sc,
5265 "tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n",
5266 (u_int32_t)TOTAL_TX_BD_PER_PAGE, (u_int32_t)USABLE_TX_BD_PER_PAGE);
5267
5268 BNX_PRINTF(sc, "total tx_bd = 0x%08X\n", (u_int32_t)TOTAL_TX_BD);
5269
5270 BNX_PRINTF(sc, ""
5271 "-----------------------------"
5272 " tx_bd data "
5273 "-----------------------------\n");
5274
5275
5276 for (i = 0; i < count; i++) {
5277 txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
5278 bnx_dump_txbd(sc, tx_prod, txbd);
5279 tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod));
5280 }
5281
5282 BNX_PRINTF(sc,
5283 "-----------------------------"
5284 "--------------"
5285 "-----------------------------\n");
5286 }
5287
5288
5289
5290
5291 void
5292 bnx_dump_rx_chain(struct bnx_softc *sc, int rx_prod, int count)
5293 {
5294 struct rx_bd *rxbd;
5295 int i;
5296
5297
5298 BNX_PRINTF(sc,
5299 "----------------------------"
5300 " rx_bd chain "
5301 "----------------------------\n");
5302
5303 BNX_PRINTF(sc, "----- RX_BD Chain -----\n");
5304
5305 BNX_PRINTF(sc,
5306 "page size = 0x%08X, rx chain pages = 0x%08X\n",
5307 (u_int32_t)BCM_PAGE_SIZE, (u_int32_t)RX_PAGES);
5308
5309 BNX_PRINTF(sc,
5310 "rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
5311 (u_int32_t)TOTAL_RX_BD_PER_PAGE, (u_int32_t)USABLE_RX_BD_PER_PAGE);
5312
5313 BNX_PRINTF(sc, "total rx_bd = 0x%08X\n", (u_int32_t)TOTAL_RX_BD);
5314
5315 BNX_PRINTF(sc,
5316 "----------------------------"
5317 " rx_bd data "
5318 "----------------------------\n");
5319
5320
5321 for (i = 0; i < count; i++) {
5322 rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
5323 bnx_dump_rxbd(sc, rx_prod, rxbd);
5324 rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod));
5325 }
5326
5327 BNX_PRINTF(sc,
5328 "----------------------------"
5329 "--------------"
5330 "----------------------------\n");
5331 }
5332
5333
5334
5335
5336 void
5337 bnx_dump_status_block(struct bnx_softc *sc)
5338 {
5339 struct status_block *sblk;
5340
5341 sblk = sc->status_block;
5342
5343 BNX_PRINTF(sc, "----------------------------- Status Block "
5344 "-----------------------------\n");
5345
5346 BNX_PRINTF(sc,
5347 "attn_bits = 0x%08X, attn_bits_ack = 0x%08X, index = 0x%04X\n",
5348 sblk->status_attn_bits, sblk->status_attn_bits_ack,
5349 sblk->status_idx);
5350
5351 BNX_PRINTF(sc, "rx_cons0 = 0x%08X, tx_cons0 = 0x%08X\n",
5352 sblk->status_rx_quick_consumer_index0,
5353 sblk->status_tx_quick_consumer_index0);
5354
5355 BNX_PRINTF(sc, "status_idx = 0x%04X\n", sblk->status_idx);
5356
5357
5358 if (sblk->status_rx_quick_consumer_index1 ||
5359 sblk->status_tx_quick_consumer_index1)
5360 BNX_PRINTF(sc, "rx_cons1 = 0x%08X, tx_cons1 = 0x%08X\n",
5361 sblk->status_rx_quick_consumer_index1,
5362 sblk->status_tx_quick_consumer_index1);
5363
5364 if (sblk->status_rx_quick_consumer_index2 ||
5365 sblk->status_tx_quick_consumer_index2)
5366 BNX_PRINTF(sc, "rx_cons2 = 0x%08X, tx_cons2 = 0x%08X\n",
5367 sblk->status_rx_quick_consumer_index2,
5368 sblk->status_tx_quick_consumer_index2);
5369
5370 if (sblk->status_rx_quick_consumer_index3 ||
5371 sblk->status_tx_quick_consumer_index3)
5372 BNX_PRINTF(sc, "rx_cons3 = 0x%08X, tx_cons3 = 0x%08X\n",
5373 sblk->status_rx_quick_consumer_index3,
5374 sblk->status_tx_quick_consumer_index3);
5375
5376 if (sblk->status_rx_quick_consumer_index4 ||
5377 sblk->status_rx_quick_consumer_index5)
5378 BNX_PRINTF(sc, "rx_cons4 = 0x%08X, rx_cons5 = 0x%08X\n",
5379 sblk->status_rx_quick_consumer_index4,
5380 sblk->status_rx_quick_consumer_index5);
5381
5382 if (sblk->status_rx_quick_consumer_index6 ||
5383 sblk->status_rx_quick_consumer_index7)
5384 BNX_PRINTF(sc, "rx_cons6 = 0x%08X, rx_cons7 = 0x%08X\n",
5385 sblk->status_rx_quick_consumer_index6,
5386 sblk->status_rx_quick_consumer_index7);
5387
5388 if (sblk->status_rx_quick_consumer_index8 ||
5389 sblk->status_rx_quick_consumer_index9)
5390 BNX_PRINTF(sc, "rx_cons8 = 0x%08X, rx_cons9 = 0x%08X\n",
5391 sblk->status_rx_quick_consumer_index8,
5392 sblk->status_rx_quick_consumer_index9);
5393
5394 if (sblk->status_rx_quick_consumer_index10 ||
5395 sblk->status_rx_quick_consumer_index11)
5396 BNX_PRINTF(sc, "rx_cons10 = 0x%08X, rx_cons11 = 0x%08X\n",
5397 sblk->status_rx_quick_consumer_index10,
5398 sblk->status_rx_quick_consumer_index11);
5399
5400 if (sblk->status_rx_quick_consumer_index12 ||
5401 sblk->status_rx_quick_consumer_index13)
5402 BNX_PRINTF(sc, "rx_cons12 = 0x%08X, rx_cons13 = 0x%08X\n",
5403 sblk->status_rx_quick_consumer_index12,
5404 sblk->status_rx_quick_consumer_index13);
5405
5406 if (sblk->status_rx_quick_consumer_index14 ||
5407 sblk->status_rx_quick_consumer_index15)
5408 BNX_PRINTF(sc, "rx_cons14 = 0x%08X, rx_cons15 = 0x%08X\n",
5409 sblk->status_rx_quick_consumer_index14,
5410 sblk->status_rx_quick_consumer_index15);
5411
5412 if (sblk->status_completion_producer_index ||
5413 sblk->status_cmd_consumer_index)
5414 BNX_PRINTF(sc, "com_prod = 0x%08X, cmd_cons = 0x%08X\n",
5415 sblk->status_completion_producer_index,
5416 sblk->status_cmd_consumer_index);
5417
5418 BNX_PRINTF(sc, "-------------------------------------------"
5419 "-----------------------------\n");
5420 }
5421
5422
5423
5424
5425 void
5426 bnx_dump_stats_block(struct bnx_softc *sc)
5427 {
5428 struct statistics_block *sblk;
5429
5430 sblk = sc->stats_block;
5431
5432 BNX_PRINTF(sc, ""
5433 "-----------------------------"
5434 " Stats Block "
5435 "-----------------------------\n");
5436
5437 BNX_PRINTF(sc, "IfHcInOctets = 0x%08X:%08X, "
5438 "IfHcInBadOctets = 0x%08X:%08X\n",
5439 sblk->stat_IfHCInOctets_hi, sblk->stat_IfHCInOctets_lo,
5440 sblk->stat_IfHCInBadOctets_hi, sblk->stat_IfHCInBadOctets_lo);
5441
5442 BNX_PRINTF(sc, "IfHcOutOctets = 0x%08X:%08X, "
5443 "IfHcOutBadOctets = 0x%08X:%08X\n",
5444 sblk->stat_IfHCOutOctets_hi, sblk->stat_IfHCOutOctets_lo,
5445 sblk->stat_IfHCOutBadOctets_hi, sblk->stat_IfHCOutBadOctets_lo);
5446
5447 BNX_PRINTF(sc, "IfHcInUcastPkts = 0x%08X:%08X, "
5448 "IfHcInMulticastPkts = 0x%08X:%08X\n",
5449 sblk->stat_IfHCInUcastPkts_hi, sblk->stat_IfHCInUcastPkts_lo,
5450 sblk->stat_IfHCInMulticastPkts_hi,
5451 sblk->stat_IfHCInMulticastPkts_lo);
5452
5453 BNX_PRINTF(sc, "IfHcInBroadcastPkts = 0x%08X:%08X, "
5454 "IfHcOutUcastPkts = 0x%08X:%08X\n",
5455 sblk->stat_IfHCInBroadcastPkts_hi,
5456 sblk->stat_IfHCInBroadcastPkts_lo,
5457 sblk->stat_IfHCOutUcastPkts_hi,
5458 sblk->stat_IfHCOutUcastPkts_lo);
5459
5460 BNX_PRINTF(sc, "IfHcOutMulticastPkts = 0x%08X:%08X, "
5461 "IfHcOutBroadcastPkts = 0x%08X:%08X\n",
5462 sblk->stat_IfHCOutMulticastPkts_hi,
5463 sblk->stat_IfHCOutMulticastPkts_lo,
5464 sblk->stat_IfHCOutBroadcastPkts_hi,
5465 sblk->stat_IfHCOutBroadcastPkts_lo);
5466
5467 if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors)
5468 BNX_PRINTF(sc, "0x%08X : "
5469 "emac_tx_stat_dot3statsinternalmactransmiterrors\n",
5470 sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
5471
5472 if (sblk->stat_Dot3StatsCarrierSenseErrors)
5473 BNX_PRINTF(sc, "0x%08X : Dot3StatsCarrierSenseErrors\n",
5474 sblk->stat_Dot3StatsCarrierSenseErrors);
5475
5476 if (sblk->stat_Dot3StatsFCSErrors)
5477 BNX_PRINTF(sc, "0x%08X : Dot3StatsFCSErrors\n",
5478 sblk->stat_Dot3StatsFCSErrors);
5479
5480 if (sblk->stat_Dot3StatsAlignmentErrors)
5481 BNX_PRINTF(sc, "0x%08X : Dot3StatsAlignmentErrors\n",
5482 sblk->stat_Dot3StatsAlignmentErrors);
5483
5484 if (sblk->stat_Dot3StatsSingleCollisionFrames)
5485 BNX_PRINTF(sc, "0x%08X : Dot3StatsSingleCollisionFrames\n",
5486 sblk->stat_Dot3StatsSingleCollisionFrames);
5487
5488 if (sblk->stat_Dot3StatsMultipleCollisionFrames)
5489 BNX_PRINTF(sc, "0x%08X : Dot3StatsMultipleCollisionFrames\n",
5490 sblk->stat_Dot3StatsMultipleCollisionFrames);
5491
5492 if (sblk->stat_Dot3StatsDeferredTransmissions)
5493 BNX_PRINTF(sc, "0x%08X : Dot3StatsDeferredTransmissions\n",
5494 sblk->stat_Dot3StatsDeferredTransmissions);
5495
5496 if (sblk->stat_Dot3StatsExcessiveCollisions)
5497 BNX_PRINTF(sc, "0x%08X : Dot3StatsExcessiveCollisions\n",
5498 sblk->stat_Dot3StatsExcessiveCollisions);
5499
5500 if (sblk->stat_Dot3StatsLateCollisions)
5501 BNX_PRINTF(sc, "0x%08X : Dot3StatsLateCollisions\n",
5502 sblk->stat_Dot3StatsLateCollisions);
5503
5504 if (sblk->stat_EtherStatsCollisions)
5505 BNX_PRINTF(sc, "0x%08X : EtherStatsCollisions\n",
5506 sblk->stat_EtherStatsCollisions);
5507
5508 if (sblk->stat_EtherStatsFragments)
5509 BNX_PRINTF(sc, "0x%08X : EtherStatsFragments\n",
5510 sblk->stat_EtherStatsFragments);
5511
5512 if (sblk->stat_EtherStatsJabbers)
5513 BNX_PRINTF(sc, "0x%08X : EtherStatsJabbers\n",
5514 sblk->stat_EtherStatsJabbers);
5515
5516 if (sblk->stat_EtherStatsUndersizePkts)
5517 BNX_PRINTF(sc, "0x%08X : EtherStatsUndersizePkts\n",
5518 sblk->stat_EtherStatsUndersizePkts);
5519
5520 if (sblk->stat_EtherStatsOverrsizePkts)
5521 BNX_PRINTF(sc, "0x%08X : EtherStatsOverrsizePkts\n",
5522 sblk->stat_EtherStatsOverrsizePkts);
5523
5524 if (sblk->stat_EtherStatsPktsRx64Octets)
5525 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx64Octets\n",
5526 sblk->stat_EtherStatsPktsRx64Octets);
5527
5528 if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets)
5529 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx65Octetsto127Octets\n",
5530 sblk->stat_EtherStatsPktsRx65Octetsto127Octets);
5531
5532 if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets)
5533 BNX_PRINTF(sc, "0x%08X : "
5534 "EtherStatsPktsRx128Octetsto255Octets\n",
5535 sblk->stat_EtherStatsPktsRx128Octetsto255Octets);
5536
5537 if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets)
5538 BNX_PRINTF(sc, "0x%08X : "
5539 "EtherStatsPktsRx256Octetsto511Octets\n",
5540 sblk->stat_EtherStatsPktsRx256Octetsto511Octets);
5541
5542 if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets)
5543 BNX_PRINTF(sc, "0x%08X : "
5544 "EtherStatsPktsRx512Octetsto1023Octets\n",
5545 sblk->stat_EtherStatsPktsRx512Octetsto1023Octets);
5546
5547 if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets)
5548 BNX_PRINTF(sc, "0x%08X : "
5549 "EtherStatsPktsRx1024Octetsto1522Octets\n",
5550 sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets);
5551
5552 if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets)
5553 BNX_PRINTF(sc, "0x%08X : "
5554 "EtherStatsPktsRx1523Octetsto9022Octets\n",
5555 sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets);
5556
5557 if (sblk->stat_EtherStatsPktsTx64Octets)
5558 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx64Octets\n",
5559 sblk->stat_EtherStatsPktsTx64Octets);
5560
5561 if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets)
5562 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx65Octetsto127Octets\n",
5563 sblk->stat_EtherStatsPktsTx65Octetsto127Octets);
5564
5565 if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets)
5566 BNX_PRINTF(sc, "0x%08X : "
5567 "EtherStatsPktsTx128Octetsto255Octets\n",
5568 sblk->stat_EtherStatsPktsTx128Octetsto255Octets);
5569
5570 if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets)
5571 BNX_PRINTF(sc, "0x%08X : "
5572 "EtherStatsPktsTx256Octetsto511Octets\n",
5573 sblk->stat_EtherStatsPktsTx256Octetsto511Octets);
5574
5575 if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets)
5576 BNX_PRINTF(sc, "0x%08X : "
5577 "EtherStatsPktsTx512Octetsto1023Octets\n",
5578 sblk->stat_EtherStatsPktsTx512Octetsto1023Octets);
5579
5580 if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets)
5581 BNX_PRINTF(sc, "0x%08X : "
5582 "EtherStatsPktsTx1024Octetsto1522Octets\n",
5583 sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets);
5584
5585 if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets)
5586 BNX_PRINTF(sc, "0x%08X : "
5587 "EtherStatsPktsTx1523Octetsto9022Octets\n",
5588 sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets);
5589
5590 if (sblk->stat_XonPauseFramesReceived)
5591 BNX_PRINTF(sc, "0x%08X : XonPauseFramesReceived\n",
5592 sblk->stat_XonPauseFramesReceived);
5593
5594 if (sblk->stat_XoffPauseFramesReceived)
5595 BNX_PRINTF(sc, "0x%08X : XoffPauseFramesReceived\n",
5596 sblk->stat_XoffPauseFramesReceived);
5597
5598 if (sblk->stat_OutXonSent)
5599 BNX_PRINTF(sc, "0x%08X : OutXonSent\n",
5600 sblk->stat_OutXonSent);
5601
5602 if (sblk->stat_OutXoffSent)
5603 BNX_PRINTF(sc, "0x%08X : OutXoffSent\n",
5604 sblk->stat_OutXoffSent);
5605
5606 if (sblk->stat_FlowControlDone)
5607 BNX_PRINTF(sc, "0x%08X : FlowControlDone\n",
5608 sblk->stat_FlowControlDone);
5609
5610 if (sblk->stat_MacControlFramesReceived)
5611 BNX_PRINTF(sc, "0x%08X : MacControlFramesReceived\n",
5612 sblk->stat_MacControlFramesReceived);
5613
5614 if (sblk->stat_XoffStateEntered)
5615 BNX_PRINTF(sc, "0x%08X : XoffStateEntered\n",
5616 sblk->stat_XoffStateEntered);
5617
5618 if (sblk->stat_IfInFramesL2FilterDiscards)
5619 BNX_PRINTF(sc, "0x%08X : IfInFramesL2FilterDiscards\n",
5620 sblk->stat_IfInFramesL2FilterDiscards);
5621
5622 if (sblk->stat_IfInRuleCheckerDiscards)
5623 BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerDiscards\n",
5624 sblk->stat_IfInRuleCheckerDiscards);
5625
5626 if (sblk->stat_IfInFTQDiscards)
5627 BNX_PRINTF(sc, "0x%08X : IfInFTQDiscards\n",
5628 sblk->stat_IfInFTQDiscards);
5629
5630 if (sblk->stat_IfInMBUFDiscards)
5631 BNX_PRINTF(sc, "0x%08X : IfInMBUFDiscards\n",
5632 sblk->stat_IfInMBUFDiscards);
5633
5634 if (sblk->stat_IfInRuleCheckerP4Hit)
5635 BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerP4Hit\n",
5636 sblk->stat_IfInRuleCheckerP4Hit);
5637
5638 if (sblk->stat_CatchupInRuleCheckerDiscards)
5639 BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerDiscards\n",
5640 sblk->stat_CatchupInRuleCheckerDiscards);
5641
5642 if (sblk->stat_CatchupInFTQDiscards)
5643 BNX_PRINTF(sc, "0x%08X : CatchupInFTQDiscards\n",
5644 sblk->stat_CatchupInFTQDiscards);
5645
5646 if (sblk->stat_CatchupInMBUFDiscards)
5647 BNX_PRINTF(sc, "0x%08X : CatchupInMBUFDiscards\n",
5648 sblk->stat_CatchupInMBUFDiscards);
5649
5650 if (sblk->stat_CatchupInRuleCheckerP4Hit)
5651 BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerP4Hit\n",
5652 sblk->stat_CatchupInRuleCheckerP4Hit);
5653
5654 BNX_PRINTF(sc,
5655 "-----------------------------"
5656 "--------------"
5657 "-----------------------------\n");
5658 }
5659
5660 void
5661 bnx_dump_driver_state(struct bnx_softc *sc)
5662 {
5663 BNX_PRINTF(sc,
5664 "-----------------------------"
5665 " Driver State "
5666 "-----------------------------\n");
5667
5668 BNX_PRINTF(sc, "%p - (sc) driver softc structure virtual "
5669 "address\n", sc);
5670
5671 BNX_PRINTF(sc, "%p - (sc->status_block) status block virtual address\n",
5672 sc->status_block);
5673
5674 BNX_PRINTF(sc, "%p - (sc->stats_block) statistics block virtual "
5675 "address\n", sc->stats_block);
5676
5677 BNX_PRINTF(sc, "%p - (sc->tx_bd_chain) tx_bd chain virtual "
5678 "adddress\n", sc->tx_bd_chain);
5679
5680 BNX_PRINTF(sc, "%p - (sc->rx_bd_chain) rx_bd chain virtual address\n",
5681 sc->rx_bd_chain);
5682
5683 BNX_PRINTF(sc, "%p - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n",
5684 sc->tx_mbuf_ptr);
5685
5686 BNX_PRINTF(sc, "%p - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n",
5687 sc->rx_mbuf_ptr);
5688
5689 BNX_PRINTF(sc,
5690 " 0x%08X - (sc->interrupts_generated) h/w intrs\n",
5691 sc->interrupts_generated);
5692
5693 BNX_PRINTF(sc,
5694 " 0x%08X - (sc->rx_interrupts) rx interrupts handled\n",
5695 sc->rx_interrupts);
5696
5697 BNX_PRINTF(sc,
5698 " 0x%08X - (sc->tx_interrupts) tx interrupts handled\n",
5699 sc->tx_interrupts);
5700
5701 BNX_PRINTF(sc,
5702 " 0x%08X - (sc->last_status_idx) status block index\n",
5703 sc->last_status_idx);
5704
5705 BNX_PRINTF(sc, " 0x%08X - (sc->tx_prod) tx producer index\n",
5706 sc->tx_prod);
5707
5708 BNX_PRINTF(sc, " 0x%08X - (sc->tx_cons) tx consumer index\n",
5709 sc->tx_cons);
5710
5711 BNX_PRINTF(sc,
5712 " 0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n",
5713 sc->tx_prod_bseq);
5714
5715 BNX_PRINTF(sc, " 0x%08X - (sc->rx_prod) rx producer index\n",
5716 sc->rx_prod);
5717
5718 BNX_PRINTF(sc, " 0x%08X - (sc->rx_cons) rx consumer index\n",
5719 sc->rx_cons);
5720
5721 BNX_PRINTF(sc,
5722 " 0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n",
5723 sc->rx_prod_bseq);
5724
5725 BNX_PRINTF(sc,
5726 " 0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n",
5727 sc->rx_mbuf_alloc);
5728
5729 BNX_PRINTF(sc, " 0x%08X - (sc->free_rx_bd) free rx_bd's\n",
5730 sc->free_rx_bd);
5731
5732 BNX_PRINTF(sc,
5733 "0x%08X/%08X - (sc->rx_low_watermark) rx low watermark\n",
5734 sc->rx_low_watermark, (u_int32_t) USABLE_RX_BD);
5735
5736 BNX_PRINTF(sc,
5737 " 0x%08X - (sc->txmbuf_alloc) tx mbufs allocated\n",
5738 sc->tx_mbuf_alloc);
5739
5740 BNX_PRINTF(sc,
5741 " 0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n",
5742 sc->rx_mbuf_alloc);
5743
5744 BNX_PRINTF(sc, " 0x%08X - (sc->used_tx_bd) used tx_bd's\n",
5745 sc->used_tx_bd);
5746
5747 BNX_PRINTF(sc, "0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n",
5748 sc->tx_hi_watermark, (u_int32_t) USABLE_TX_BD);
5749
5750 BNX_PRINTF(sc,
5751 " 0x%08X - (sc->mbuf_alloc_failed) failed mbuf alloc\n",
5752 sc->mbuf_alloc_failed);
5753
5754 BNX_PRINTF(sc, "-------------------------------------------"
5755 "-----------------------------\n");
5756 }
5757
5758 void
5759 bnx_dump_hw_state(struct bnx_softc *sc)
5760 {
5761 u_int32_t val1;
5762 int i;
5763
5764 BNX_PRINTF(sc,
5765 "----------------------------"
5766 " Hardware State "
5767 "----------------------------\n");
5768
5769 BNX_PRINTF(sc, "0x%08X : bootcode version\n", sc->bnx_fw_ver);
5770
5771 val1 = REG_RD(sc, BNX_MISC_ENABLE_STATUS_BITS);
5772 BNX_PRINTF(sc, "0x%08X : (0x%04X) misc_enable_status_bits\n",
5773 val1, BNX_MISC_ENABLE_STATUS_BITS);
5774
5775 val1 = REG_RD(sc, BNX_DMA_STATUS);
5776 BNX_PRINTF(sc, "0x%08X : (0x%04X) dma_status\n", val1, BNX_DMA_STATUS);
5777
5778 val1 = REG_RD(sc, BNX_CTX_STATUS);
5779 BNX_PRINTF(sc, "0x%08X : (0x%04X) ctx_status\n", val1, BNX_CTX_STATUS);
5780
5781 val1 = REG_RD(sc, BNX_EMAC_STATUS);
5782 BNX_PRINTF(sc, "0x%08X : (0x%04X) emac_status\n", val1,
5783 BNX_EMAC_STATUS);
5784
5785 val1 = REG_RD(sc, BNX_RPM_STATUS);
5786 BNX_PRINTF(sc, "0x%08X : (0x%04X) rpm_status\n", val1, BNX_RPM_STATUS);
5787
5788 val1 = REG_RD(sc, BNX_TBDR_STATUS);
5789 BNX_PRINTF(sc, "0x%08X : (0x%04X) tbdr_status\n", val1,
5790 BNX_TBDR_STATUS);
5791
5792 val1 = REG_RD(sc, BNX_TDMA_STATUS);
5793 BNX_PRINTF(sc, "0x%08X : (0x%04X) tdma_status\n", val1,
5794 BNX_TDMA_STATUS);
5795
5796 val1 = REG_RD(sc, BNX_HC_STATUS);
5797 BNX_PRINTF(sc, "0x%08X : (0x%04X) hc_status\n", val1, BNX_HC_STATUS);
5798
5799 BNX_PRINTF(sc,
5800 "----------------------------"
5801 "----------------"
5802 "----------------------------\n");
5803
5804 BNX_PRINTF(sc,
5805 "----------------------------"
5806 " Register Dump "
5807 "----------------------------\n");
5808
5809 for (i = 0x400; i < 0x8000; i += 0x10)
5810 BNX_PRINTF(sc, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
5811 i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
5812 REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
5813
5814 BNX_PRINTF(sc,
5815 "----------------------------"
5816 "----------------"
5817 "----------------------------\n");
5818 }
5819
5820 void
5821 bnx_breakpoint(struct bnx_softc *sc)
5822 {
5823
5824 if (0) {
5825 bnx_dump_txbd(sc, 0, NULL);
5826 bnx_dump_rxbd(sc, 0, NULL);
5827 bnx_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD);
5828 bnx_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD);
5829 bnx_dump_l2fhdr(sc, 0, NULL);
5830 bnx_dump_tx_chain(sc, 0, USABLE_TX_BD);
5831 bnx_dump_rx_chain(sc, 0, USABLE_RX_BD);
5832 bnx_dump_status_block(sc);
5833 bnx_dump_stats_block(sc);
5834 bnx_dump_driver_state(sc);
5835 bnx_dump_hw_state(sc);
5836 }
5837
5838 bnx_dump_driver_state(sc);
5839
5840 bnx_dump_status_block(sc);
5841
5842 #if 0
5843
5844 breakpoint();
5845 #endif
5846
5847 return;
5848 }
5849 #endif