[build] Avoid implicit-fallthrough warnings on GCC 7
[ipxe.git] / src / drivers / net / tg3 / tg3_hw.c
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
8 *
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
16 */
17
18 FILE_LICENCE ( GPL2_ONLY );
19
20 #include <mii.h>
21 #include <stdio.h>
22 #include <errno.h>
23 #include <unistd.h>
24 #include <byteswap.h>
25 #include <ipxe/pci.h>
26 #include <ipxe/iobuf.h>
27 #include <ipxe/timer.h>
28 #include <ipxe/malloc.h>
29 #include <ipxe/if_ether.h>
30 #include <ipxe/ethernet.h>
31 #include <ipxe/netdevice.h>
32
33 #include "tg3.h"
34
35 #define RESET_KIND_SHUTDOWN 0
36 #define RESET_KIND_INIT 1
37 #define RESET_KIND_SUSPEND 2
38
39 #define TG3_DEF_MAC_MODE 0
40
41 void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
42 { DBGP("%s\n", __func__);
43
44 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
45 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
46 }
47
48 u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
49 { DBGP("%s\n", __func__);
50
51 u32 val;
52
53 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
54 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
55 return val;
56 }
57
58 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
59 { DBGP("%s\n", __func__);
60
61 return readl(tp->regs + off + GRCMBOX_BASE);
62 }
63
64 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
65 { DBGP("%s\n", __func__);
66
67 writel(val, tp->regs + off + GRCMBOX_BASE);
68 }
69
70 void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
71 { DBGP("%s\n", __func__);
72
73 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
74 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
75 TG3_64BIT_REG_LOW, val);
76 return;
77 }
78 if (off == TG3_RX_STD_PROD_IDX_REG) {
79 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
80 TG3_64BIT_REG_LOW, val);
81 return;
82 }
83
84 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
85 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
86
87 /* In indirect mode when disabling interrupts, we also need
88 * to clear the interrupt bit in the GRC local ctrl register.
89 */
90 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
91 (val == 0x1)) {
92 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
93 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
94 }
95 }
96
97 u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
98 { DBGP("%s\n", __func__);
99
100 u32 val;
101
102 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
103 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
104
105 return val;
106 }
107
108 /* usec_wait specifies the wait time in usec when writing to certain registers
109 * where it is unsafe to read back the register without some delay.
110 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
111 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
112 */
113 void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
114 { DBGP("%s\n", __func__);
115
116 tw32(off, val);
117 if (usec_wait)
118 udelay(usec_wait);
119 tr32(off);
120
121 /* Wait again after the read for the posted method to guarantee that
122 * the wait time is met.
123 */
124 if (usec_wait)
125 udelay(usec_wait);
126 }
127
128 /* stolen from legacy etherboot tg3 driver */
129 void tg3_set_power_state_0(struct tg3 *tp)
130 { DBGP("%s\n", __func__);
131
132 uint16_t power_control;
133 int pm = tp->pm_cap;
134
135 /* Make sure register accesses (indirect or otherwise)
136 * will function correctly.
137 */
138 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
139
140 pci_read_config_word(tp->pdev, pm + PCI_PM_CTRL, &power_control);
141
142 power_control |= PCI_PM_CTRL_PME_STATUS;
143 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
144 power_control |= 0;
145 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
146
147 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
148
149 return;
150 }
151
152 void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
153 { DBGP("%s\n", __func__);
154
155 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
156 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
157 *val = 0;
158 return;
159 }
160
161 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
162 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
163
164 /* Always leave this as zero. */
165 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
166 }
167
168 #define PCI_VENDOR_ID_ARIMA 0x161f
169
170 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
171 { DBGP("%s\n", __func__);
172
173 u32 val;
174 u16 pmcsr;
175
176 /* On some early chips the SRAM cannot be accessed in D3hot state,
177 * so need make sure we're in D0.
178 */
179 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
180 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
181 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
182 mdelay(1);
183
184 /* Make sure register accesses (indirect or otherwise)
185 * will function correctly.
186 */
187 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
188 tp->misc_host_ctrl);
189
190 /* The memory arbiter has to be enabled in order for SRAM accesses
191 * to succeed. Normally on powerup the tg3 chip firmware will make
192 * sure it is enabled, but other entities such as system netboot
193 * code might disable it.
194 */
195 val = tr32(MEMARB_MODE);
196 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
197
198 tp->phy_id = TG3_PHY_ID_INVALID;
199 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
200
201 /* Assume an onboard device by default. */
202 tg3_flag_set(tp, EEPROM_WRITE_PROT);
203
204 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
205 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
206 u32 nic_cfg, led_cfg;
207 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
208 int eeprom_phy_serdes = 0;
209
210 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
211 tp->nic_sram_data_cfg = nic_cfg;
212
213 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
214 ver >>= NIC_SRAM_DATA_VER_SHIFT;
215 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
216 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
217 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
218 (ver > 0) && (ver < 0x100))
219 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
220
221 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
222 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
223
224 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
225 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
226 eeprom_phy_serdes = 1;
227
228 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
229 if (nic_phy_id != 0) {
230 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
231 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
232
233 eeprom_phy_id = (id1 >> 16) << 10;
234 eeprom_phy_id |= (id2 & 0xfc00) << 16;
235 eeprom_phy_id |= (id2 & 0x03ff) << 0;
236 } else
237 eeprom_phy_id = 0;
238
239 tp->phy_id = eeprom_phy_id;
240 if (eeprom_phy_serdes) {
241 if (!tg3_flag(tp, 5705_PLUS))
242 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
243 else
244 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
245 }
246
247 if (tg3_flag(tp, 5750_PLUS))
248 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
249 SHASTA_EXT_LED_MODE_MASK);
250 else
251 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
252
253 switch (led_cfg) {
254 default:
255 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
256 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
257 break;
258
259 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
260 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
261 break;
262
263 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
264 tp->led_ctrl = LED_CTRL_MODE_MAC;
265
266 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
267 * read on some older 5700/5701 bootcode.
268 */
269 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
270 ASIC_REV_5700 ||
271 GET_ASIC_REV(tp->pci_chip_rev_id) ==
272 ASIC_REV_5701)
273 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
274
275 break;
276
277 case SHASTA_EXT_LED_SHARED:
278 tp->led_ctrl = LED_CTRL_MODE_SHARED;
279 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
280 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
281 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
282 LED_CTRL_MODE_PHY_2);
283 break;
284
285 case SHASTA_EXT_LED_MAC:
286 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
287 break;
288
289 case SHASTA_EXT_LED_COMBO:
290 tp->led_ctrl = LED_CTRL_MODE_COMBO;
291 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
292 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
293 LED_CTRL_MODE_PHY_2);
294 break;
295
296 }
297
298 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
299 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
300 tp->subsystem_vendor == PCI_VENDOR_ID_DELL)
301 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
302
303 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
304 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
305
306 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
307 tg3_flag_set(tp, EEPROM_WRITE_PROT);
308 if ((tp->subsystem_vendor ==
309 PCI_VENDOR_ID_ARIMA) &&
310 (tp->subsystem_device == 0x205a ||
311 tp->subsystem_device == 0x2063))
312 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
313 } else {
314 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
315 tg3_flag_set(tp, IS_NIC);
316 }
317
318 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
319 tg3_flag_set(tp, ENABLE_ASF);
320 if (tg3_flag(tp, 5750_PLUS))
321 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
322 }
323
324 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
325 tg3_flag(tp, ENABLE_ASF))
326 tg3_flag_set(tp, ENABLE_APE);
327
328 if (cfg2 & (1 << 17))
329 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
330
331 /* serdes signal pre-emphasis in register 0x590 set by */
332 /* bootcode if bit 18 is set */
333 if (cfg2 & (1 << 18))
334 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
335
336 if ((tg3_flag(tp, 57765_PLUS) ||
337 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
338 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
339 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
340 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
341
342 if (tg3_flag(tp, PCI_EXPRESS) &&
343 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
344 !tg3_flag(tp, 57765_PLUS)) {
345 u32 cfg3;
346
347 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
348 }
349
350 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
351 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
352 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
353 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
354 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
355 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
356 }
357 }
358
359 static void tg3_switch_clocks(struct tg3 *tp)
360 { DBGP("%s\n", __func__);
361
362 u32 clock_ctrl;
363 u32 orig_clock_ctrl;
364
365 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
366 return;
367
368 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
369
370 orig_clock_ctrl = clock_ctrl;
371 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
372 CLOCK_CTRL_CLKRUN_OENABLE |
373 0x1f);
374 tp->pci_clock_ctrl = clock_ctrl;
375
376 if (tg3_flag(tp, 5705_PLUS)) {
377 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
378 tw32_wait_f(TG3PCI_CLOCK_CTRL,
379 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
380 }
381 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
382 tw32_wait_f(TG3PCI_CLOCK_CTRL,
383 clock_ctrl |
384 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
385 40);
386 tw32_wait_f(TG3PCI_CLOCK_CTRL,
387 clock_ctrl | (CLOCK_CTRL_ALTCLK),
388 40);
389 }
390 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
391 }
392
393 int tg3_get_invariants(struct tg3 *tp)
394 { DBGP("%s\n", __func__);
395
396 u32 misc_ctrl_reg;
397 u32 pci_state_reg, grc_misc_cfg;
398 u32 val;
399 u16 pci_cmd;
400 int err;
401
402 /* Force memory write invalidate off. If we leave it on,
403 * then on 5700_BX chips we have to enable a workaround.
404 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
405 * to match the cacheline size. The Broadcom driver have this
406 * workaround but turns MWI off all the times so never uses
407 * it. This seems to suggest that the workaround is insufficient.
408 */
409 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
410 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
411 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
412
413 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
414 * has the register indirect write enable bit set before
415 * we try to access any of the MMIO registers. It is also
416 * critical that the PCI-X hw workaround situation is decided
417 * before that as well.
418 */
419 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
420 &misc_ctrl_reg);
421
422 tp->pci_chip_rev_id = (misc_ctrl_reg >>
423 MISC_HOST_CTRL_CHIPREV_SHIFT);
424 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
425 u32 prod_id_asic_rev;
426
427 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
428 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
429 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
430 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
431 pci_read_config_dword(tp->pdev,
432 TG3PCI_GEN2_PRODID_ASICREV,
433 &prod_id_asic_rev);
434 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
435 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
436 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
437 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
438 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
439 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
440 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
441 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
442 pci_read_config_dword(tp->pdev,
443 TG3PCI_GEN15_PRODID_ASICREV,
444 &prod_id_asic_rev);
445 else
446 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
447 &prod_id_asic_rev);
448
449 tp->pci_chip_rev_id = prod_id_asic_rev;
450 }
451
452 /* Wrong chip ID in 5752 A0. This code can be removed later
453 * as A0 is not in production.
454 */
455 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
456 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
457
458 /* Initialize misc host control in PCI block. */
459 tp->misc_host_ctrl |= (misc_ctrl_reg &
460 MISC_HOST_CTRL_CHIPREV);
461 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
462 tp->misc_host_ctrl);
463
464 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
465 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
466 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
467 tg3_flag_set(tp, 5717_PLUS);
468
469 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
470 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766 ||
471 tg3_flag(tp, 5717_PLUS))
472 tg3_flag_set(tp, 57765_PLUS);
473
474 /* Intentionally exclude ASIC_REV_5906 */
475 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
476 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
477 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
478 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
479 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
480 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
481 tg3_flag(tp, 57765_PLUS))
482 tg3_flag_set(tp, 5755_PLUS);
483
484 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
485 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
486 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
487 tg3_flag(tp, 5755_PLUS) ||
488 tg3_flag(tp, 5780_CLASS))
489 tg3_flag_set(tp, 5750_PLUS);
490
491 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
492 tg3_flag(tp, 5750_PLUS))
493 tg3_flag_set(tp, 5705_PLUS);
494
495 if (tg3_flag(tp, 5717_PLUS))
496 tg3_flag_set(tp, LRG_PROD_RING_CAP);
497
498 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
499 &pci_state_reg);
500
501 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
502 if (tp->pcie_cap != 0) {
503 u16 lnkctl;
504
505 tg3_flag_set(tp, PCI_EXPRESS);
506
507 pci_read_config_word(tp->pdev,
508 tp->pcie_cap + PCI_EXP_LNKCTL,
509 &lnkctl);
510 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
511 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
512 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
513 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
514 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
515 tg3_flag_set(tp, CLKREQ_BUG);
516 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
517 tg3_flag_set(tp, L1PLLPD_EN);
518 }
519 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
520 tg3_flag_set(tp, PCI_EXPRESS);
521 } else if (!tg3_flag(tp, 5705_PLUS) ||
522 tg3_flag(tp, 5780_CLASS)) {
523 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
524 if (!tp->pcix_cap) {
525 DBGC(&tp->pdev->dev,
526 "Cannot find PCI-X capability, aborting\n");
527 return -EIO;
528 }
529
530 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
531 tg3_flag_set(tp, PCIX_MODE);
532 }
533
534 /* If we have an AMD 762 or VIA K8T800 chipset, write
535 * reordering to the mailbox registers done by the host
536 * controller can cause major troubles. We read back from
537 * every mailbox register write to force the writes to be
538 * posted to the chip in order.
539 */
540
541 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
542 &tp->pci_cacheline_sz);
543 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
544 &tp->pci_lat_timer);
545 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
546 tp->pci_lat_timer < 64) {
547 tp->pci_lat_timer = 64;
548 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
549 tp->pci_lat_timer);
550 }
551
552 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
553 /* 5700 BX chips need to have their TX producer index
554 * mailboxes written twice to workaround a bug.
555 */
556 tg3_flag_set(tp, TXD_MBOX_HWBUG);
557
558 /* If we are in PCI-X mode, enable register write workaround.
559 *
560 * The workaround is to use indirect register accesses
561 * for all chip writes not to mailbox registers.
562 */
563 if (tg3_flag(tp, PCIX_MODE)) {
564 u32 pm_reg;
565
566 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
567
568 /* The chip can have it's power management PCI config
569 * space registers clobbered due to this bug.
570 * So explicitly force the chip into D0 here.
571 */
572 pci_read_config_dword(tp->pdev,
573 tp->pm_cap + PCI_PM_CTRL,
574 &pm_reg);
575 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
576 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
577 pci_write_config_dword(tp->pdev,
578 tp->pm_cap + PCI_PM_CTRL,
579 pm_reg);
580
581 /* Also, force SERR#/PERR# in PCI command. */
582 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
583 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
584 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
585 }
586 }
587
588 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
589 tg3_flag_set(tp, PCI_HIGH_SPEED);
590 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
591 tg3_flag_set(tp, PCI_32BIT);
592
593 /* Chip-specific fixup from Broadcom driver */
594 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
595 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
596 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
597 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
598 }
599
600 tp->write32_mbox = tg3_write_indirect_reg32;
601 tp->write32_rx_mbox = tg3_write_indirect_mbox;
602 tp->write32_tx_mbox = tg3_write_indirect_mbox;
603 tp->read32_mbox = tg3_read_indirect_mbox;
604
605 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
606 tp->read32_mbox = tg3_read32_mbox_5906;
607 tp->write32_mbox = tg3_write32_mbox_5906;
608 tp->write32_tx_mbox = tg3_write32_mbox_5906;
609 tp->write32_rx_mbox = tg3_write32_mbox_5906;
610 }
611
612 /* Get eeprom hw config before calling tg3_set_power_state().
613 * In particular, the TG3_FLAG_IS_NIC flag must be
614 * determined before calling tg3_set_power_state() so that
615 * we know whether or not to switch out of Vaux power.
616 * When the flag is set, it means that GPIO1 is used for eeprom
617 * write protect and also implies that it is a LOM where GPIOs
618 * are not used to switch power.
619 */
620 tg3_get_eeprom_hw_cfg(tp);
621
622 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
623 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
624 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
625 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
626 tg3_flag(tp, 57765_PLUS))
627 tg3_flag_set(tp, CPMU_PRESENT);
628
629 /* Set up tp->grc_local_ctrl before calling tg3_power_up().
630 * GPIO1 driven high will bring 5700's external PHY out of reset.
631 * It is also used as eeprom write protect on LOMs.
632 */
633 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
634 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
635 tg3_flag(tp, EEPROM_WRITE_PROT))
636 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
637 GRC_LCLCTRL_GPIO_OUTPUT1);
638 /* Unused GPIO3 must be driven as output on 5752 because there
639 * are no pull-up resistors on unused GPIO pins.
640 */
641 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
642 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
643
644 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
645 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
646 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
647 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
648
649 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
650 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
651 /* Turn off the debug UART. */
652 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
653 if (tg3_flag(tp, IS_NIC))
654 /* Keep VMain power. */
655 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
656 GRC_LCLCTRL_GPIO_OUTPUT0;
657 }
658
659 /* Force the chip into D0. */
660 tg3_set_power_state_0(tp);
661
662 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
663 tp->phy_flags |= TG3_PHYFLG_IS_FET;
664
665 /* A few boards don't want Ethernet@WireSpeed phy feature */
666 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
667 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
668 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
669 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
670 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
671 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
672 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
673
674 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
675 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
676 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
677 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
678 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
679
680 if (tg3_flag(tp, 5705_PLUS) &&
681 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
682 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
683 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
684 !tg3_flag(tp, 57765_PLUS)) {
685 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
686 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
687 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
688 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
689 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
690 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
691 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
692 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
693 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
694 } else
695 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
696 }
697
698 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
699 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
700 tp->phy_otp = tg3_read_otp_phycfg(tp);
701 if (tp->phy_otp == 0)
702 tp->phy_otp = TG3_OTP_DEFAULT;
703 }
704
705 if (tg3_flag(tp, CPMU_PRESENT))
706 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
707 else
708 tp->mi_mode = MAC_MI_MODE_BASE;
709
710 tp->coalesce_mode = 0;
711 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
712 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
713 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
714
715 /* Set these bits to enable statistics workaround. */
716 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
717 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
718 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
719 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
720 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
721 }
722
723 tg3_mdio_init(tp);
724
725 /* Initialize data/descriptor byte/word swapping. */
726 val = tr32(GRC_MODE);
727 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
728 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
729 GRC_MODE_WORD_SWAP_B2HRX_DATA |
730 GRC_MODE_B2HRX_ENABLE |
731 GRC_MODE_HTX2B_ENABLE |
732 GRC_MODE_HOST_STACKUP);
733 else
734 val &= GRC_MODE_HOST_STACKUP;
735
736 tw32(GRC_MODE, val | tp->grc_mode);
737
738 tg3_switch_clocks(tp);
739
740 /* Clear this out for sanity. */
741 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
742
743 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
744 &pci_state_reg);
745 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
746 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
747 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
748
749 if (chiprevid == CHIPREV_ID_5701_A0 ||
750 chiprevid == CHIPREV_ID_5701_B0 ||
751 chiprevid == CHIPREV_ID_5701_B2 ||
752 chiprevid == CHIPREV_ID_5701_B5) {
753 void *sram_base;
754
755 /* Write some dummy words into the SRAM status block
756 * area, see if it reads back correctly. If the return
757 * value is bad, force enable the PCIX workaround.
758 */
759 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
760
761 writel(0x00000000, sram_base);
762 writel(0x00000000, sram_base + 4);
763 writel(0xffffffff, sram_base + 4);
764 if (readl(sram_base) != 0x00000000)
765 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
766 }
767 }
768
769 udelay(50);
770 /* FIXME: do we need nvram access? */
771 /// tg3_nvram_init(tp);
772
773 grc_misc_cfg = tr32(GRC_MISC_CFG);
774 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
775
776 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
777 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
778 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
779 tg3_flag_set(tp, IS_5788);
780
781 if (!tg3_flag(tp, IS_5788) &&
782 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
783 tg3_flag_set(tp, TAGGED_STATUS);
784 if (tg3_flag(tp, TAGGED_STATUS)) {
785 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
786 HOSTCC_MODE_CLRTICK_TXBD);
787
788 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
789 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
790 tp->misc_host_ctrl);
791 }
792
793 /* Preserve the APE MAC_MODE bits */
794 if (tg3_flag(tp, ENABLE_APE))
795 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
796 else
797 tp->mac_mode = TG3_DEF_MAC_MODE;
798
799 /* these are limited to 10/100 only */
800 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
801 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
802 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
803 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
804 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
805 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
806 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
807 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
808 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
809 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
810 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
811 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
812 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
813 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
814 (tp->phy_flags & TG3_PHYFLG_IS_FET))
815 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
816
817 err = tg3_phy_probe(tp);
818 if (err) {
819 DBGC(&tp->pdev->dev, "phy probe failed, err: %s\n", strerror(err));
820 /* ... but do not return immediately ... */
821 }
822
823 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
824 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
825 } else {
826 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
827 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
828 else
829 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
830 }
831
832 /* For all SERDES we poll the MAC status register. */
833 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
834 tg3_flag_set(tp, POLL_SERDES);
835 else
836 tg3_flag_clear(tp, POLL_SERDES);
837
838 /* Increment the rx prod index on the rx std ring by at most
839 * 8 for these chips to workaround hw errata.
840 */
841 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
842 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
843 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
844 tp->rx_std_max_post = 8;
845
846 return err;
847 }
848
849 void tg3_init_bufmgr_config(struct tg3 *tp)
850 { DBGP("%s\n", __func__);
851
852 if (tg3_flag(tp, 57765_PLUS)) {
853 tp->bufmgr_config.mbuf_read_dma_low_water =
854 DEFAULT_MB_RDMA_LOW_WATER_5705;
855 tp->bufmgr_config.mbuf_mac_rx_low_water =
856 DEFAULT_MB_MACRX_LOW_WATER_57765;
857 tp->bufmgr_config.mbuf_high_water =
858 DEFAULT_MB_HIGH_WATER_57765;
859
860 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
861 DEFAULT_MB_RDMA_LOW_WATER_5705;
862 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
863 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
864 tp->bufmgr_config.mbuf_high_water_jumbo =
865 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
866 } else if (tg3_flag(tp, 5705_PLUS)) {
867 tp->bufmgr_config.mbuf_read_dma_low_water =
868 DEFAULT_MB_RDMA_LOW_WATER_5705;
869 tp->bufmgr_config.mbuf_mac_rx_low_water =
870 DEFAULT_MB_MACRX_LOW_WATER_5705;
871 tp->bufmgr_config.mbuf_high_water =
872 DEFAULT_MB_HIGH_WATER_5705;
873 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
874 tp->bufmgr_config.mbuf_mac_rx_low_water =
875 DEFAULT_MB_MACRX_LOW_WATER_5906;
876 tp->bufmgr_config.mbuf_high_water =
877 DEFAULT_MB_HIGH_WATER_5906;
878 }
879
880 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
881 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
882 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
883 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
884 tp->bufmgr_config.mbuf_high_water_jumbo =
885 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
886 } else {
887 tp->bufmgr_config.mbuf_read_dma_low_water =
888 DEFAULT_MB_RDMA_LOW_WATER;
889 tp->bufmgr_config.mbuf_mac_rx_low_water =
890 DEFAULT_MB_MACRX_LOW_WATER;
891 tp->bufmgr_config.mbuf_high_water =
892 DEFAULT_MB_HIGH_WATER;
893
894 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
895 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
896 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
897 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
898 tp->bufmgr_config.mbuf_high_water_jumbo =
899 DEFAULT_MB_HIGH_WATER_JUMBO;
900 }
901
902 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
903 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
904 }
905
906 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
907
908 void tg3_wait_for_event_ack(struct tg3 *tp)
909 { DBGP("%s\n", __func__);
910
911 int i;
912
913 for (i = 0; i < TG3_FW_EVENT_TIMEOUT_USEC / 10; i++) {
914 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
915 break;
916
917 udelay(10);
918 }
919 }
920
921 void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
922 { DBGP("%s\n", __func__);
923
924 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
925 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
926 return;
927
928 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
929 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
930
931 /* Always leave this as zero. */
932 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
933 }
934
935 static void tg3_stop_fw(struct tg3 *tp)
936 { DBGP("%s\n", __func__);
937
938 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
939 /* Wait for RX cpu to ACK the previous event. */
940 tg3_wait_for_event_ack(tp);
941
942 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
943
944 tg3_generate_fw_event(tp);
945
946 /* Wait for RX cpu to ACK this event. */
947 tg3_wait_for_event_ack(tp);
948 }
949 }
950
951 static void tg3_write_sig_pre_reset(struct tg3 *tp)
952 { DBGP("%s\n", __func__);
953
954 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
955 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
956 }
957
958 void tg3_disable_ints(struct tg3 *tp)
959 { DBGP("%s\n", __func__);
960
961 tw32(TG3PCI_MISC_HOST_CTRL,
962 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
963
964 tw32_mailbox_f(tp->int_mbox, 0x00000001);
965 }
966
967 void tg3_enable_ints(struct tg3 *tp)
968 { DBGP("%s\n", __func__);
969
970 tw32(TG3PCI_MISC_HOST_CTRL,
971 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
972
973 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
974
975 tw32_mailbox_f(tp->int_mbox, tp->last_tag << 24);
976
977 /* Force an initial interrupt */
978 if (!tg3_flag(tp, TAGGED_STATUS) &&
979 (tp->hw_status->status & SD_STATUS_UPDATED))
980 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
981 else
982 tw32(HOSTCC_MODE, tp->coal_now);
983 }
984
985 #define MAX_WAIT_CNT 1000
986
987 /* To stop a block, clear the enable bit and poll till it clears. */
988 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
989 { DBGP("%s\n", __func__);
990
991 unsigned int i;
992 u32 val;
993
994 if (tg3_flag(tp, 5705_PLUS)) {
995 switch (ofs) {
996 case RCVLSC_MODE:
997 case DMAC_MODE:
998 case MBFREE_MODE:
999 case BUFMGR_MODE:
1000 case MEMARB_MODE:
1001 /* We can't enable/disable these bits of the
1002 * 5705/5750, just say success.
1003 */
1004 return 0;
1005
1006 default:
1007 break;
1008 }
1009 }
1010
1011 val = tr32(ofs);
1012 val &= ~enable_bit;
1013 tw32_f(ofs, val);
1014
1015 for (i = 0; i < MAX_WAIT_CNT; i++) {
1016 udelay(100);
1017 val = tr32(ofs);
1018 if ((val & enable_bit) == 0)
1019 break;
1020 }
1021
1022 if (i == MAX_WAIT_CNT) {
1023 DBGC(&tp->pdev->dev,
1024 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
1025 ofs, enable_bit);
1026 return -ENODEV;
1027 }
1028
1029 return 0;
1030 }
1031
1032 static int tg3_abort_hw(struct tg3 *tp)
1033 { DBGP("%s\n", __func__);
1034
1035 int i, err;
1036
1037 tg3_disable_ints(tp);
1038
1039 tp->rx_mode &= ~RX_MODE_ENABLE;
1040 tw32_f(MAC_RX_MODE, tp->rx_mode);
1041 udelay(10);
1042
1043 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE);
1044 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE);
1045 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE);
1046 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE);
1047 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE);
1048 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE);
1049
1050 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE);
1051 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE);
1052 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
1053 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE);
1054 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
1055 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE);
1056 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE);
1057
1058 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
1059 tw32_f(MAC_MODE, tp->mac_mode);
1060 udelay(40);
1061
1062 tp->tx_mode &= ~TX_MODE_ENABLE;
1063 tw32_f(MAC_TX_MODE, tp->tx_mode);
1064
1065 for (i = 0; i < MAX_WAIT_CNT; i++) {
1066 udelay(100);
1067 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
1068 break;
1069 }
1070 if (i >= MAX_WAIT_CNT) {
1071 DBGC(&tp->pdev->dev,
1072 "%s timed out, TX_MODE_ENABLE will not clear "
1073 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
1074 err |= -ENODEV;
1075 }
1076
1077 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE);
1078 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE);
1079 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE);
1080
1081 tw32(FTQ_RESET, 0xffffffff);
1082 tw32(FTQ_RESET, 0x00000000);
1083
1084 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE);
1085 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE);
1086
1087 if (tp->hw_status)
1088 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
1089
1090 return err;
1091 }
1092
1093 void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
1094 { DBGP("%s\n", __func__);
1095
1096 u32 addr_high, addr_low;
1097 int i;
1098
1099 addr_high = ((tp->dev->ll_addr[0] << 8) |
1100 tp->dev->ll_addr[1]);
1101 addr_low = ((tp->dev->ll_addr[2] << 24) |
1102 (tp->dev->ll_addr[3] << 16) |
1103 (tp->dev->ll_addr[4] << 8) |
1104 (tp->dev->ll_addr[5] << 0));
1105 for (i = 0; i < 4; i++) {
1106 if (i == 1 && skip_mac_1)
1107 continue;
1108 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
1109 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
1110 }
1111
1112 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1113 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1114 for (i = 0; i < 12; i++) {
1115 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
1116 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
1117 }
1118 }
1119
1120 addr_high = (tp->dev->ll_addr[0] +
1121 tp->dev->ll_addr[1] +
1122 tp->dev->ll_addr[2] +
1123 tp->dev->ll_addr[3] +
1124 tp->dev->ll_addr[4] +
1125 tp->dev->ll_addr[5]) &
1126 TX_BACKOFF_SEED_MASK;
1127 tw32(MAC_TX_BACKOFF_SEED, addr_high);
1128 }
1129
1130 /* Save PCI command register before chip reset */
1131 static void tg3_save_pci_state(struct tg3 *tp)
1132 { DBGP("%s\n", __func__);
1133
1134 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
1135 }
1136
1137 /* Restore PCI state after chip reset */
1138 static void tg3_restore_pci_state(struct tg3 *tp)
1139 { DBGP("%s\n", __func__);
1140
1141 u32 val;
1142
1143 /* Re-enable indirect register accesses. */
1144 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
1145 tp->misc_host_ctrl);
1146
1147 /* Set MAX PCI retry to zero. */
1148 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
1149 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
1150 tg3_flag(tp, PCIX_MODE))
1151 val |= PCISTATE_RETRY_SAME_DMA;
1152
1153 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
1154
1155 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
1156
1157 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
1158 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
1159 tp->pci_cacheline_sz);
1160 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
1161 tp->pci_lat_timer);
1162 }
1163
1164
1165 /* Make sure PCI-X relaxed ordering bit is clear. */
1166 if (tg3_flag(tp, PCIX_MODE)) {
1167 u16 pcix_cmd;
1168
1169 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
1170 &pcix_cmd);
1171 pcix_cmd &= ~PCI_X_CMD_ERO;
1172 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
1173 pcix_cmd);
1174 }
1175 }
1176
1177 static int tg3_poll_fw(struct tg3 *tp)
1178 { DBGP("%s\n", __func__);
1179
1180 int i;
1181 u32 val;
1182
1183 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1184 /* Wait up to 20ms for init done. */
1185 for (i = 0; i < 200; i++) {
1186 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1187 return 0;
1188 udelay(100);
1189 }
1190 return -ENODEV;
1191 }
1192
1193 /* Wait for firmware initialization to complete. */
1194 for (i = 0; i < 100000; i++) {
1195 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1196 if (val == (u32)~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1197 break;
1198 udelay(10);
1199 }
1200
1201 /* Chip might not be fitted with firmware. Some Sun onboard
1202 * parts are configured like that. So don't signal the timeout
1203 * of the above loop as an error, but do report the lack of
1204 * running firmware once.
1205 */
1206 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1207 tg3_flag_set(tp, NO_FWARE_REPORTED);
1208
1209 DBGC(tp->dev, "No firmware running\n");
1210 }
1211
1212 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1213 /* The 57765 A0 needs a little more
1214 * time to do some important work.
1215 */
1216 mdelay(10);
1217 }
1218
1219 return 0;
1220 }
1221
1222 static int tg3_nvram_lock(struct tg3 *tp)
1223 { DBGP("%s\n", __func__);
1224
1225 if (tg3_flag(tp, NVRAM)) {
1226 int i;
1227
1228 if (tp->nvram_lock_cnt == 0) {
1229 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
1230 for (i = 0; i < 8000; i++) {
1231 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
1232 break;
1233 udelay(20);
1234 }
1235 if (i == 8000) {
1236 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
1237 return -ENODEV;
1238 }
1239 }
1240 tp->nvram_lock_cnt++;
1241 }
1242 return 0;
1243 }
1244
1245 static void tg3_nvram_unlock(struct tg3 *tp)
1246 { DBGP("%s\n", __func__);
1247
1248 if (tg3_flag(tp, NVRAM)) {
1249 if (tp->nvram_lock_cnt > 0)
1250 tp->nvram_lock_cnt--;
1251 if (tp->nvram_lock_cnt == 0)
1252 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
1253 }
1254 }
1255
1256 static int tg3_chip_reset(struct tg3 *tp)
1257 { DBGP("%s\n", __func__);
1258
1259 u32 val;
1260 int err;
1261
1262 tg3_nvram_lock(tp);
1263
1264
1265 /* No matching tg3_nvram_unlock() after this because
1266 * chip reset below will undo the nvram lock.
1267 */
1268 tp->nvram_lock_cnt = 0;
1269
1270 /* GRC_MISC_CFG core clock reset will clear the memory
1271 * enable bit in PCI register 4 and the MSI enable bit
1272 * on some chips, so we save relevant registers here.
1273 */
1274 tg3_save_pci_state(tp);
1275
1276 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
1277 tg3_flag(tp, 5755_PLUS))
1278 tw32(GRC_FASTBOOT_PC, 0);
1279
1280 #if 0
1281 /*
1282 * We must avoid the readl() that normally takes place.
1283 * It locks machines, causes machine checks, and other
1284 * fun things. So, temporarily disable the 5701
1285 * hardware workaround, while we do the reset.
1286 */
1287 write_op = tp->write32;
1288 if (write_op == tg3_write_flush_reg32)
1289 tp->write32 = tg3_write32;
1290 #endif
1291
1292 /* Prevent the irq handler from reading or writing PCI registers
1293 * during chip reset when the memory enable bit in the PCI command
1294 * register may be cleared. The chip does not generate interrupt
1295 * at this time, but the irq handler may still be called due to irq
1296 * sharing or irqpoll.
1297 */
1298 tg3_flag_set(tp, CHIP_RESETTING);
1299
1300 if (tp->hw_status) {
1301 tp->hw_status->status = 0;
1302 tp->hw_status->status_tag = 0;
1303 }
1304 tp->last_tag = 0;
1305 tp->last_irq_tag = 0;
1306
1307 mb();
1308
1309 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
1310 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
1311 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
1312 }
1313
1314 /* do the reset */
1315 val = GRC_MISC_CFG_CORECLK_RESET;
1316
1317 if (tg3_flag(tp, PCI_EXPRESS)) {
1318 /* Force PCIe 1.0a mode */
1319 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
1320 !tg3_flag(tp, 57765_PLUS) &&
1321 tr32(TG3_PCIE_PHY_TSTCTL) ==
1322 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
1323 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
1324
1325 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
1326 tw32(GRC_MISC_CFG, (1 << 29));
1327 val |= (1 << 29);
1328 }
1329 }
1330
1331 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1332 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
1333 tw32(GRC_VCPU_EXT_CTRL,
1334 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
1335 }
1336
1337 /* Manage gphy power for all CPMU absent PCIe devices. */
1338 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
1339 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
1340
1341 tw32(GRC_MISC_CFG, val);
1342
1343 /* Unfortunately, we have to delay before the PCI read back.
1344 * Some 575X chips even will not respond to a PCI cfg access
1345 * when the reset command is given to the chip.
1346 *
1347 * How do these hardware designers expect things to work
1348 * properly if the PCI write is posted for a long period
1349 * of time? It is always necessary to have some method by
1350 * which a register read back can occur to push the write
1351 * out which does the reset.
1352 *
1353 * For most tg3 variants the trick below was working.
1354 * Ho hum...
1355 */
1356 udelay(120);
1357
1358 /* Flush PCI posted writes. The normal MMIO registers
1359 * are inaccessible at this time so this is the only
1360 * way to make this reliably (actually, this is no longer
1361 * the case, see above). I tried to use indirect
1362 * register read/write but this upset some 5701 variants.
1363 */
1364 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
1365
1366 udelay(120);
1367
1368 if (tg3_flag(tp, PCI_EXPRESS) && tp->pcie_cap) {
1369 u16 val16;
1370
1371 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
1372 int i;
1373 u32 cfg_val;
1374
1375 /* Wait for link training to complete. */
1376 for (i = 0; i < 5000; i++)
1377 udelay(100);
1378
1379 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
1380 pci_write_config_dword(tp->pdev, 0xc4,
1381 cfg_val | (1 << 15));
1382 }
1383
1384 /* Clear the "no snoop" and "relaxed ordering" bits. */
1385 pci_read_config_word(tp->pdev,
1386 tp->pcie_cap + PCI_EXP_DEVCTL,
1387 &val16);
1388 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
1389 PCI_EXP_DEVCTL_NOSNOOP_EN);
1390 /*
1391 * Older PCIe devices only support the 128 byte
1392 * MPS setting. Enforce the restriction.
1393 */
1394 if (!tg3_flag(tp, CPMU_PRESENT))
1395 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
1396 pci_write_config_word(tp->pdev,
1397 tp->pcie_cap + PCI_EXP_DEVCTL,
1398 val16);
1399
1400 /* Clear error status */
1401 pci_write_config_word(tp->pdev,
1402 tp->pcie_cap + PCI_EXP_DEVSTA,
1403 PCI_EXP_DEVSTA_CED |
1404 PCI_EXP_DEVSTA_NFED |
1405 PCI_EXP_DEVSTA_FED |
1406 PCI_EXP_DEVSTA_URD);
1407 }
1408
1409 tg3_restore_pci_state(tp);
1410
1411 tg3_flag_clear(tp, CHIP_RESETTING);
1412 tg3_flag_clear(tp, ERROR_PROCESSED);
1413
1414 val = 0;
1415 if (tg3_flag(tp, 5780_CLASS))
1416 val = tr32(MEMARB_MODE);
1417 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
1418
1419 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
1420 tg3_stop_fw(tp);
1421 tw32(0x5000, 0x400);
1422 }
1423
1424 tw32(GRC_MODE, tp->grc_mode);
1425
1426 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
1427 val = tr32(0xc4);
1428
1429 tw32(0xc4, val | (1 << 15));
1430 }
1431
1432 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
1433 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1434 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
1435 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
1436 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
1437 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
1438 }
1439
1440 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
1441 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
1442 val = tp->mac_mode;
1443 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
1444 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1445 val = tp->mac_mode;
1446 } else
1447 val = 0;
1448
1449 tw32_f(MAC_MODE, val);
1450 udelay(40);
1451
1452 err = tg3_poll_fw(tp);
1453 if (err)
1454 return err;
1455
1456 if (tg3_flag(tp, PCI_EXPRESS) &&
1457 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
1458 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
1459 !tg3_flag(tp, 57765_PLUS)) {
1460 val = tr32(0x7c00);
1461
1462 tw32(0x7c00, val | (1 << 25));
1463 }
1464
1465 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
1466 val = tr32(TG3_CPMU_CLCK_ORIDE);
1467 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
1468 }
1469
1470 if (tg3_flag(tp, CPMU_PRESENT)) {
1471 tw32(TG3_CPMU_D0_CLCK_POLICY, 0);
1472 val = tr32(TG3_CPMU_CLCK_ORIDE_EN);
1473 tw32(TG3_CPMU_CLCK_ORIDE_EN,
1474 val | CPMU_CLCK_ORIDE_MAC_CLCK_ORIDE_EN);
1475 }
1476
1477 return 0;
1478 }
1479
1480 int tg3_halt(struct tg3 *tp)
1481 { DBGP("%s\n", __func__);
1482
1483 int err;
1484
1485 tg3_stop_fw(tp);
1486
1487 tg3_write_sig_pre_reset(tp);
1488
1489 tg3_abort_hw(tp);
1490 err = tg3_chip_reset(tp);
1491
1492 __tg3_set_mac_addr(tp, 0);
1493
1494 if (err)
1495 return err;
1496
1497 return 0;
1498 }
1499
1500 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
1501 u32 offset, u32 *val)
1502 { DBGP("%s\n", __func__);
1503
1504 u32 tmp;
1505 int i;
1506
1507 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
1508 return -EINVAL;
1509
1510 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
1511 EEPROM_ADDR_DEVID_MASK |
1512 EEPROM_ADDR_READ);
1513 tw32(GRC_EEPROM_ADDR,
1514 tmp |
1515 (0 << EEPROM_ADDR_DEVID_SHIFT) |
1516 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
1517 EEPROM_ADDR_ADDR_MASK) |
1518 EEPROM_ADDR_READ | EEPROM_ADDR_START);
1519
1520 for (i = 0; i < 1000; i++) {
1521 tmp = tr32(GRC_EEPROM_ADDR);
1522
1523 if (tmp & EEPROM_ADDR_COMPLETE)
1524 break;
1525 mdelay(1);
1526 }
1527 if (!(tmp & EEPROM_ADDR_COMPLETE))
1528 return -EBUSY;
1529
1530 tmp = tr32(GRC_EEPROM_DATA);
1531
1532 /*
1533 * The data will always be opposite the native endian
1534 * format. Perform a blind byteswap to compensate.
1535 */
1536 *val = bswap_32(tmp);
1537
1538 return 0;
1539 }
1540
1541 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
1542 { DBGP("%s\n", __func__);
1543
1544 if (tg3_flag(tp, NVRAM) &&
1545 tg3_flag(tp, NVRAM_BUFFERED) &&
1546 tg3_flag(tp, FLASH) &&
1547 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
1548 (tp->nvram_jedecnum == JEDEC_ATMEL))
1549
1550 addr = ((addr / tp->nvram_pagesize) <<
1551 ATMEL_AT45DB0X1B_PAGE_POS) +
1552 (addr % tp->nvram_pagesize);
1553
1554 return addr;
1555 }
1556
1557 static void tg3_enable_nvram_access(struct tg3 *tp)
1558 { DBGP("%s\n", __func__);
1559
1560 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
1561 u32 nvaccess = tr32(NVRAM_ACCESS);
1562
1563 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
1564 }
1565 }
1566
1567 static void tg3_disable_nvram_access(struct tg3 *tp)
1568 { DBGP("%s\n", __func__);
1569
1570 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
1571 u32 nvaccess = tr32(NVRAM_ACCESS);
1572
1573 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
1574 }
1575 }
1576
1577 #define NVRAM_CMD_TIMEOUT 10000
1578
1579 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
1580 { DBGP("%s\n", __func__);
1581
1582 int i;
1583
1584 tw32(NVRAM_CMD, nvram_cmd);
1585 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
1586 udelay(10);
1587 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
1588 udelay(10);
1589 break;
1590 }
1591 }
1592
1593 if (i == NVRAM_CMD_TIMEOUT)
1594 return -EBUSY;
1595
1596 return 0;
1597 }
1598
1599 /* NOTE: Data read in from NVRAM is byteswapped according to
1600 * the byteswapping settings for all other register accesses.
1601 * tg3 devices are BE devices, so on a BE machine, the data
1602 * returned will be exactly as it is seen in NVRAM. On a LE
1603 * machine, the 32-bit value will be byteswapped.
1604 */
1605 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
1606 { DBGP("%s\n", __func__);
1607
1608 int ret;
1609
1610 if (!tg3_flag(tp, NVRAM))
1611 return tg3_nvram_read_using_eeprom(tp, offset, val);
1612
1613 offset = tg3_nvram_phys_addr(tp, offset);
1614
1615 if (offset > NVRAM_ADDR_MSK)
1616 return -EINVAL;
1617
1618 ret = tg3_nvram_lock(tp);
1619 if (ret)
1620 return ret;
1621
1622 tg3_enable_nvram_access(tp);
1623
1624 tw32(NVRAM_ADDR, offset);
1625 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
1626 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
1627
1628 if (ret == 0)
1629 *val = tr32(NVRAM_RDDATA);
1630
1631 tg3_disable_nvram_access(tp);
1632
1633 tg3_nvram_unlock(tp);
1634
1635 return ret;
1636 }
1637
1638 /* Ensures NVRAM data is in bytestream format. */
1639 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, u32 *val)
1640 { DBGP("%s\n", __func__);
1641
1642 u32 v = 0;
1643 int res = tg3_nvram_read(tp, offset, &v);
1644 if (!res)
1645 *val = cpu_to_be32(v);
1646 return res;
1647 }
1648
1649 int tg3_get_device_address(struct tg3 *tp)
1650 { DBGP("%s\n", __func__);
1651
1652 struct net_device *dev = tp->dev;
1653 u32 hi, lo, mac_offset;
1654 int addr_ok = 0;
1655
1656 mac_offset = 0x7c;
1657 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1658 tg3_flag(tp, 5780_CLASS)) {
1659 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
1660 mac_offset = 0xcc;
1661 if (tg3_nvram_lock(tp))
1662 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
1663 else
1664 tg3_nvram_unlock(tp);
1665 } else if (tg3_flag(tp, 5717_PLUS)) {
1666 if (PCI_FUNC(tp->pdev->busdevfn) & 1)
1667 mac_offset = 0xcc;
1668 if (PCI_FUNC(tp->pdev->busdevfn) > 1)
1669 mac_offset += 0x18c;
1670 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
1671 mac_offset = 0x10;
1672
1673 /* First try to get it from MAC address mailbox. */
1674 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
1675 if ((hi >> 16) == 0x484b) {
1676 dev->hw_addr[0] = (hi >> 8) & 0xff;
1677 dev->hw_addr[1] = (hi >> 0) & 0xff;
1678
1679 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
1680 dev->hw_addr[2] = (lo >> 24) & 0xff;
1681 dev->hw_addr[3] = (lo >> 16) & 0xff;
1682 dev->hw_addr[4] = (lo >> 8) & 0xff;
1683 dev->hw_addr[5] = (lo >> 0) & 0xff;
1684
1685 /* Some old bootcode may report a 0 MAC address in SRAM */
1686 addr_ok = is_valid_ether_addr(&dev->hw_addr[0]);
1687 }
1688 if (!addr_ok) {
1689 /* Next, try NVRAM. */
1690 if (!tg3_flag(tp, NO_NVRAM) &&
1691 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
1692 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
1693 memcpy(&dev->hw_addr[0], ((char *)&hi) + 2, 2);
1694 memcpy(&dev->hw_addr[2], (char *)&lo, sizeof(lo));
1695 }
1696 /* Finally just fetch it out of the MAC control regs. */
1697 else {
1698 hi = tr32(MAC_ADDR_0_HIGH);
1699 lo = tr32(MAC_ADDR_0_LOW);
1700
1701 dev->hw_addr[5] = lo & 0xff;
1702 dev->hw_addr[4] = (lo >> 8) & 0xff;
1703 dev->hw_addr[3] = (lo >> 16) & 0xff;
1704 dev->hw_addr[2] = (lo >> 24) & 0xff;
1705 dev->hw_addr[1] = hi & 0xff;
1706 dev->hw_addr[0] = (hi >> 8) & 0xff;
1707 }
1708 }
1709
1710 if (!is_valid_ether_addr(&dev->hw_addr[0])) {
1711 return -EINVAL;
1712 }
1713
1714 return 0;
1715 }
1716
1717 static void __tg3_set_rx_mode(struct net_device *dev)
1718 { DBGP("%s\n", __func__);
1719
1720 struct tg3 *tp = netdev_priv(dev);
1721 u32 rx_mode;
1722
1723 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
1724 RX_MODE_KEEP_VLAN_TAG);
1725
1726 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
1727
1728 /* Accept all multicast. */
1729 tw32(MAC_HASH_REG_0, 0xffffffff);
1730 tw32(MAC_HASH_REG_1, 0xffffffff);
1731 tw32(MAC_HASH_REG_2, 0xffffffff);
1732 tw32(MAC_HASH_REG_3, 0xffffffff);
1733
1734 if (rx_mode != tp->rx_mode) {
1735 tp->rx_mode = rx_mode;
1736 tw32_f(MAC_RX_MODE, rx_mode);
1737 udelay(10);
1738 }
1739 }
1740
1741 static void __tg3_set_coalesce(struct tg3 *tp)
1742 { DBGP("%s\n", __func__);
1743
1744
1745 tw32(HOSTCC_RXCOL_TICKS, 0);
1746 tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
1747 tw32(HOSTCC_RXMAX_FRAMES, 1);
1748 /* FIXME: mix between TXMAX and RXMAX taken from legacy driver */
1749 tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
1750 tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
1751 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
1752
1753 if (!tg3_flag(tp, 5705_PLUS)) {
1754 u32 val = DEFAULT_STAT_COAL_TICKS;
1755
1756 tw32(HOSTCC_RXCOAL_TICK_INT, DEFAULT_RXCOAL_TICK_INT);
1757 tw32(HOSTCC_TXCOAL_TICK_INT, DEFAULT_TXCOAL_TICK_INT);
1758
1759 if (!netdev_link_ok(tp->dev))
1760 val = 0;
1761
1762 tw32(HOSTCC_STAT_COAL_TICKS, val);
1763 }
1764 }
1765
1766 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
1767 dma_addr_t mapping, u32 maxlen_flags,
1768 u32 nic_addr)
1769 { DBGP("%s\n", __func__);
1770
1771 tg3_write_mem(tp,
1772 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
1773 ((u64) mapping >> 32));
1774 tg3_write_mem(tp,
1775 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
1776 ((u64) mapping & 0xffffffff));
1777 tg3_write_mem(tp,
1778 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
1779 maxlen_flags);
1780
1781 if (!tg3_flag(tp, 5705_PLUS))
1782 tg3_write_mem(tp,
1783 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
1784 nic_addr);
1785 }
1786
1787 static void tg3_rings_reset(struct tg3 *tp)
1788 { DBGP("%s\n", __func__);
1789
1790 int i;
1791 u32 txrcb, rxrcb, limit;
1792
1793 /* Disable all transmit rings but the first. */
1794 if (!tg3_flag(tp, 5705_PLUS))
1795 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
1796 else if (tg3_flag(tp, 5717_PLUS))
1797 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
1798 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
1799 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
1800 else
1801 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
1802
1803 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
1804 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
1805 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
1806 BDINFO_FLAGS_DISABLED);
1807
1808
1809 /* Disable all receive return rings but the first. */
1810 if (tg3_flag(tp, 5717_PLUS))
1811 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
1812 else if (!tg3_flag(tp, 5705_PLUS))
1813 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
1814 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
1815 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
1816 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
1817 else
1818 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
1819
1820 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
1821 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
1822 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
1823 BDINFO_FLAGS_DISABLED);
1824
1825 /* Disable interrupts */
1826 tw32_mailbox_f(tp->int_mbox, 1);
1827
1828 tp->tx_prod = 0;
1829 tp->tx_cons = 0;
1830 tw32_mailbox(tp->prodmbox, 0);
1831 tw32_rx_mbox(tp->consmbox, 0);
1832
1833 /* Make sure the NIC-based send BD rings are disabled. */
1834 if (!tg3_flag(tp, 5705_PLUS)) {
1835 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
1836 for (i = 0; i < 16; i++)
1837 tw32_tx_mbox(mbox + i * 8, 0);
1838 }
1839
1840 txrcb = NIC_SRAM_SEND_RCB;
1841 rxrcb = NIC_SRAM_RCV_RET_RCB;
1842
1843 /* Clear status block in ram. */
1844 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
1845
1846 /* Set status block DMA address */
1847 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
1848 ((u64) tp->status_mapping >> 32));
1849 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
1850 ((u64) tp->status_mapping & 0xffffffff));
1851
1852 if (tp->tx_ring) {
1853 tg3_set_bdinfo(tp, txrcb, tp->tx_desc_mapping,
1854 (TG3_TX_RING_SIZE <<
1855 BDINFO_FLAGS_MAXLEN_SHIFT),
1856 NIC_SRAM_TX_BUFFER_DESC);
1857 txrcb += TG3_BDINFO_SIZE;
1858 }
1859
1860 /* FIXME: will TG3_RX_RET_MAX_SIZE_5705 work on all cards? */
1861 if (tp->rx_rcb) {
1862 tg3_set_bdinfo(tp, rxrcb, tp->rx_rcb_mapping,
1863 TG3_RX_RET_MAX_SIZE_5705 <<
1864 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
1865 rxrcb += TG3_BDINFO_SIZE;
1866 }
1867 }
1868
1869 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
1870 { DBGP("%s\n", __func__);
1871
1872 u32 val, bdcache_maxcnt;
1873
1874 if (!tg3_flag(tp, 5750_PLUS) ||
1875 tg3_flag(tp, 5780_CLASS) ||
1876 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
1877 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
1878 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
1879 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
1880 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
1881 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
1882 else
1883 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
1884
1885
1886 /* NOTE: legacy driver uses RX_PENDING / 8, we only use 4 descriptors
1887 * for now, use / 4 so the result is > 0
1888 */
1889 val = TG3_DEF_RX_RING_PENDING / 4;
1890 tw32(RCVBDI_STD_THRESH, val);
1891
1892 if (tg3_flag(tp, 57765_PLUS))
1893 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
1894 }
1895
1896 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
1897 { DBGP("%s\n", __func__);
1898
1899 u32 val, rdmac_mode;
1900 int i, err, limit;
1901 struct tg3_rx_prodring_set *tpr = &tp->prodring;
1902
1903 tg3_stop_fw(tp);
1904
1905 tg3_write_sig_pre_reset(tp);
1906
1907 if (tg3_flag(tp, INIT_COMPLETE))
1908 tg3_abort_hw(tp);
1909
1910 if (reset_phy)
1911 tg3_phy_reset(tp);
1912
1913 err = tg3_chip_reset(tp);
1914 if (err)
1915 return err;
1916
1917 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
1918 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
1919 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
1920 PCIE_PWR_MGMT_L1_THRESH_4MS;
1921 tw32(PCIE_PWR_MGMT_THRESH, val);
1922
1923 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
1924 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
1925
1926 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
1927
1928 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
1929 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
1930 }
1931
1932 if (tg3_flag(tp, L1PLLPD_EN)) {
1933 u32 grc_mode = tr32(GRC_MODE);
1934
1935 /* Access the lower 1K of PL PCIE block registers. */
1936 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
1937 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
1938
1939 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
1940 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
1941 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
1942
1943 tw32(GRC_MODE, grc_mode);
1944 }
1945
1946 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
1947 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1948 u32 grc_mode = tr32(GRC_MODE);
1949
1950 /* Access the lower 1K of PL PCIE block registers. */
1951 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
1952 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
1953
1954 val = tr32(TG3_PCIE_TLDLPL_PORT +
1955 TG3_PCIE_PL_LO_PHYCTL5);
1956 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
1957 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
1958
1959 tw32(GRC_MODE, grc_mode);
1960 }
1961
1962 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
1963 u32 grc_mode = tr32(GRC_MODE);
1964
1965 /* Access the lower 1K of DL PCIE block registers. */
1966 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
1967 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
1968
1969 val = tr32(TG3_PCIE_TLDLPL_PORT +
1970 TG3_PCIE_DL_LO_FTSMAX);
1971 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
1972 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
1973 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
1974
1975 tw32(GRC_MODE, grc_mode);
1976 }
1977
1978 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
1979 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
1980 val |= CPMU_LSPD_10MB_MACCLK_6_25;
1981 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
1982 }
1983
1984 /* This works around an issue with Athlon chipsets on
1985 * B3 tigon3 silicon. This bit has no effect on any
1986 * other revision. But do not set this on PCI Express
1987 * chips and don't even touch the clocks if the CPMU is present.
1988 */
1989 if (!tg3_flag(tp, CPMU_PRESENT)) {
1990 if (!tg3_flag(tp, PCI_EXPRESS))
1991 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
1992 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
1993 }
1994
1995 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
1996 tg3_flag(tp, PCIX_MODE)) {
1997 val = tr32(TG3PCI_PCISTATE);
1998 val |= PCISTATE_RETRY_SAME_DMA;
1999 tw32(TG3PCI_PCISTATE, val);
2000 }
2001
2002 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
2003 /* Enable some hw fixes. */
2004 val = tr32(TG3PCI_MSI_DATA);
2005 val |= (1 << 26) | (1 << 28) | (1 << 29);
2006 tw32(TG3PCI_MSI_DATA, val);
2007 }
2008
2009 /* Descriptor ring init may make accesses to the
2010 * NIC SRAM area to setup the TX descriptors, so we
2011 * can only do this after the hardware has been
2012 * successfully reset.
2013 */
2014 err = tg3_init_rings(tp);
2015 if (err)
2016 return err;
2017
2018 if (tg3_flag(tp, 57765_PLUS)) {
2019 val = tr32(TG3PCI_DMA_RW_CTRL) &
2020 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
2021 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
2022 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
2023 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
2024 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
2025 val |= DMA_RWCTRL_TAGGED_STAT_WA;
2026 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
2027 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
2028 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
2029 /* This value is determined during the probe time DMA
2030 * engine test, tg3_test_dma.
2031 */
2032 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
2033 }
2034
2035 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
2036 GRC_MODE_4X_NIC_SEND_RINGS |
2037 GRC_MODE_NO_TX_PHDR_CSUM |
2038 GRC_MODE_NO_RX_PHDR_CSUM);
2039 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
2040 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
2041
2042 /* Pseudo-header checksum is done by hardware logic and not
2043 * the offload processers, so make the chip do the pseudo-
2044 * header checksums on receive. For transmit it is more
2045 * convenient to do the pseudo-header checksum in software
2046 * as Linux does that on transmit for us in all cases.
2047 */
2048 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
2049
2050 tw32(GRC_MODE,
2051 tp->grc_mode |
2052 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
2053
2054 /* Setup the timer prescalar register. Clock is always 66Mhz. */
2055 val = tr32(GRC_MISC_CFG);
2056 val &= ~0xff;
2057 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
2058 tw32(GRC_MISC_CFG, val);
2059
2060 /* Initialize MBUF/DESC pool. */
2061 if (tg3_flag(tp, 5750_PLUS)) {
2062 /* Do nothing. */
2063 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
2064 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
2065 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
2066 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
2067 else
2068 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
2069 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
2070 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
2071 }
2072
2073 tw32(BUFMGR_MB_RDMA_LOW_WATER,
2074 tp->bufmgr_config.mbuf_read_dma_low_water);
2075 tw32(BUFMGR_MB_MACRX_LOW_WATER,
2076 tp->bufmgr_config.mbuf_mac_rx_low_water);
2077 tw32(BUFMGR_MB_HIGH_WATER,
2078 tp->bufmgr_config.mbuf_high_water);
2079
2080 tw32(BUFMGR_DMA_LOW_WATER,
2081 tp->bufmgr_config.dma_low_water);
2082 tw32(BUFMGR_DMA_HIGH_WATER,
2083 tp->bufmgr_config.dma_high_water);
2084
2085 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
2086 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2087 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
2088 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2089 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
2090 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
2091 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
2092 tw32(BUFMGR_MODE, val);
2093 for (i = 0; i < 2000; i++) {
2094 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
2095 break;
2096 udelay(10);
2097 }
2098 if (i >= 2000) {
2099 DBGC(tp->dev, "%s cannot enable BUFMGR\n", __func__);
2100 return -ENODEV;
2101 }
2102
2103 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
2104 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
2105
2106 tg3_setup_rxbd_thresholds(tp);
2107
2108 /* Initialize TG3_BDINFO's at:
2109 * RCVDBDI_STD_BD: standard eth size rx ring
2110 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
2111 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
2112 *
2113 * like so:
2114 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
2115 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
2116 * ring attribute flags
2117 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
2118 *
2119 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
2120 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
2121 *
2122 * The size of each ring is fixed in the firmware, but the location is
2123 * configurable.
2124 */
2125 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
2126 ((u64) tpr->rx_std_mapping >> 32));
2127 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
2128 ((u64) tpr->rx_std_mapping & 0xffffffff));
2129 if (!tg3_flag(tp, 5717_PLUS))
2130 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
2131 NIC_SRAM_RX_BUFFER_DESC);
2132
2133 /* Disable the mini ring */
2134 if (!tg3_flag(tp, 5705_PLUS))
2135 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
2136 BDINFO_FLAGS_DISABLED);
2137
2138 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
2139
2140 if (tg3_flag(tp, 57765_PLUS))
2141 val |= (RX_STD_MAX_SIZE << 2);
2142
2143 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
2144
2145 tpr->rx_std_prod_idx = 0;
2146
2147 /* std prod index is updated by tg3_refill_prod_ring() */
2148 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 0);
2149 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, 0);
2150
2151 tg3_rings_reset(tp);
2152
2153 __tg3_set_mac_addr(tp,0);
2154
2155 #define TG3_MAX_MTU 1522
2156 /* MTU + ethernet header + FCS + optional VLAN tag */
2157 tw32(MAC_RX_MTU_SIZE, TG3_MAX_MTU);
2158
2159 /* The slot time is changed by tg3_setup_phy if we
2160 * run at gigabit with half duplex.
2161 */
2162 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2163 (6 << TX_LENGTHS_IPG_SHIFT) |
2164 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
2165
2166 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
2167 val |= tr32(MAC_TX_LENGTHS) &
2168 (TX_LENGTHS_JMB_FRM_LEN_MSK |
2169 TX_LENGTHS_CNT_DWN_VAL_MSK);
2170
2171 tw32(MAC_TX_LENGTHS, val);
2172
2173 /* Receive rules. */
2174 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
2175 tw32(RCVLPC_CONFIG, 0x0181);
2176
2177 /* Calculate RDMAC_MODE setting early, we need it to determine
2178 * the RCVLPC_STATE_ENABLE mask.
2179 */
2180 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
2181 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
2182 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
2183 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
2184 RDMAC_MODE_LNGREAD_ENAB);
2185
2186 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
2187 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
2188
2189 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
2190 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
2191 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
2192 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
2193 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
2194 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
2195
2196 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
2197 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
2198 if (tg3_flag(tp, TSO_CAPABLE) &&
2199 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2200 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
2201 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
2202 !tg3_flag(tp, IS_5788)) {
2203 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
2204 }
2205 }
2206
2207 if (tg3_flag(tp, PCI_EXPRESS))
2208 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
2209
2210 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
2211 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
2212
2213 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
2214 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
2215 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
2216 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
2217 tg3_flag(tp, 57765_PLUS)) {
2218 val = tr32(TG3_RDMA_RSRVCTRL_REG);
2219 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2220 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2221 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
2222 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
2223 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
2224 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
2225 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
2226 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
2227 }
2228 tw32(TG3_RDMA_RSRVCTRL_REG,
2229 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
2230 }
2231
2232 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2233 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2234 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
2235 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
2236 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
2237 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
2238 }
2239
2240 /* Receive/send statistics. */
2241 if (tg3_flag(tp, 5750_PLUS)) {
2242 val = tr32(RCVLPC_STATS_ENABLE);
2243 val &= ~RCVLPC_STATSENAB_DACK_FIX;
2244 tw32(RCVLPC_STATS_ENABLE, val);
2245 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
2246 tg3_flag(tp, TSO_CAPABLE)) {
2247 val = tr32(RCVLPC_STATS_ENABLE);
2248 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
2249 tw32(RCVLPC_STATS_ENABLE, val);
2250 } else {
2251 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
2252 }
2253 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
2254 tw32(SNDDATAI_STATSENAB, 0xffffff);
2255 tw32(SNDDATAI_STATSCTRL,
2256 (SNDDATAI_SCTRL_ENABLE |
2257 SNDDATAI_SCTRL_FASTUPD));
2258
2259 /* Setup host coalescing engine. */
2260 tw32(HOSTCC_MODE, 0);
2261 for (i = 0; i < 2000; i++) {
2262 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
2263 break;
2264 udelay(10);
2265 }
2266
2267 __tg3_set_coalesce(tp);
2268
2269 if (!tg3_flag(tp, 5705_PLUS)) {
2270 /* Status/statistics block address. See tg3_timer,
2271 * the tg3_periodic_fetch_stats call there, and
2272 * tg3_get_stats to see how this works for 5705/5750 chips.
2273 * NOTE: stats block removed for iPXE
2274 */
2275 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
2276
2277 /* Clear statistics and status block memory areas */
2278 for (i = NIC_SRAM_STATS_BLK;
2279 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
2280 i += sizeof(u32)) {
2281 tg3_write_mem(tp, i, 0);
2282 udelay(40);
2283 }
2284 }
2285
2286 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
2287
2288 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
2289 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
2290 if (!tg3_flag(tp, 5705_PLUS))
2291 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
2292
2293 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
2294 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
2295 /* reset to prevent losing 1st rx packet intermittently */
2296 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
2297 udelay(10);
2298 }
2299
2300 if (tg3_flag(tp, ENABLE_APE))
2301 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
2302 else
2303 tp->mac_mode = 0;
2304 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
2305 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
2306 if (!tg3_flag(tp, 5705_PLUS) &&
2307 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
2308 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
2309 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2310 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
2311 udelay(40);
2312
2313 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
2314 * If TG3_FLAG_IS_NIC is zero, we should read the
2315 * register to preserve the GPIO settings for LOMs. The GPIOs,
2316 * whether used as inputs or outputs, are set by boot code after
2317 * reset.
2318 */
2319 if (!tg3_flag(tp, IS_NIC)) {
2320 u32 gpio_mask;
2321
2322 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
2323 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
2324 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
2325
2326 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
2327 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
2328 GRC_LCLCTRL_GPIO_OUTPUT3;
2329
2330 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
2331 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
2332
2333 tp->grc_local_ctrl &= ~gpio_mask;
2334 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
2335
2336 /* GPIO1 must be driven high for eeprom write protect */
2337 if (tg3_flag(tp, EEPROM_WRITE_PROT))
2338 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
2339 GRC_LCLCTRL_GPIO_OUTPUT1);
2340 }
2341 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
2342 udelay(100);
2343
2344 if (!tg3_flag(tp, 5705_PLUS)) {
2345 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
2346 udelay(40);
2347 }
2348
2349 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
2350 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
2351 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
2352 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
2353 WDMAC_MODE_LNGREAD_ENAB);
2354
2355 /* Enable host coalescing bug fix */
2356 if (tg3_flag(tp, 5755_PLUS))
2357 val |= WDMAC_MODE_STATUS_TAG_FIX;
2358
2359 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
2360 val |= WDMAC_MODE_BURST_ALL_DATA;
2361
2362 tw32_f(WDMAC_MODE, val);
2363 udelay(40);
2364
2365 if (tg3_flag(tp, PCIX_MODE)) {
2366 u16 pcix_cmd;
2367
2368 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
2369 &pcix_cmd);
2370 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
2371 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
2372 pcix_cmd |= PCI_X_CMD_READ_2K;
2373 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2374 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
2375 pcix_cmd |= PCI_X_CMD_READ_2K;
2376 }
2377 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
2378 pcix_cmd);
2379 }
2380
2381 tw32_f(RDMAC_MODE, rdmac_mode);
2382 udelay(40);
2383
2384 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
2385 if (!tg3_flag(tp, 5705_PLUS))
2386 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
2387
2388 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
2389 tw32(SNDDATAC_MODE,
2390 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
2391 else
2392 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
2393
2394 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
2395 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
2396 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
2397 if (tg3_flag(tp, LRG_PROD_RING_CAP))
2398 val |= RCVDBDI_MODE_LRG_RING_SZ;
2399 tw32(RCVDBDI_MODE, val);
2400 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
2401
2402 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
2403 if (tg3_flag(tp, ENABLE_TSS))
2404 val |= SNDBDI_MODE_MULTI_TXQ_EN;
2405 tw32(SNDBDI_MODE, val);
2406 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
2407
2408
2409 /* FIXME: 5701 firmware fix? */
2410 #if 0
2411 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
2412 err = tg3_load_5701_a0_firmware_fix(tp);
2413 if (err)
2414 return err;
2415 }
2416 #endif
2417
2418 tp->tx_mode = TX_MODE_ENABLE;
2419
2420 if (tg3_flag(tp, 5755_PLUS) ||
2421 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
2422 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
2423
2424 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2425 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
2426 tp->tx_mode &= ~val;
2427 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
2428 }
2429
2430 tw32_f(MAC_TX_MODE, tp->tx_mode);
2431 udelay(100);
2432
2433 tp->rx_mode = RX_MODE_ENABLE;
2434
2435 tw32_f(MAC_RX_MODE, tp->rx_mode);
2436 udelay(10);
2437
2438 tw32(MAC_LED_CTRL, tp->led_ctrl);
2439
2440 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2441 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2442 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
2443 udelay(10);
2444 }
2445 tw32_f(MAC_RX_MODE, tp->rx_mode);
2446 udelay(10);
2447
2448 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2449 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
2450 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
2451 /* Set drive transmission level to 1.2V */
2452 /* only if the signal pre-emphasis bit is not set */
2453 val = tr32(MAC_SERDES_CFG);
2454 val &= 0xfffff000;
2455 val |= 0x880;
2456 tw32(MAC_SERDES_CFG, val);
2457 }
2458 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
2459 tw32(MAC_SERDES_CFG, 0x616000);
2460 }
2461
2462 /* Prevent chip from dropping frames when flow control
2463 * is enabled.
2464 */
2465 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2466 val = 1;
2467 else
2468 val = 2;
2469 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
2470
2471 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
2472 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2473 /* Use hardware link auto-negotiation */
2474 tg3_flag_set(tp, HW_AUTONEG);
2475 }
2476
2477 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
2478 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2479 u32 tmp;
2480
2481 tmp = tr32(SERDES_RX_CTRL);
2482 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
2483 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
2484 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
2485 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
2486 }
2487
2488 err = tg3_setup_phy(tp, 0);
2489 if (err)
2490 return err;
2491
2492 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
2493 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2494 u32 tmp;
2495
2496 /* Clear CRC stats. */
2497 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
2498 tg3_writephy(tp, MII_TG3_TEST1,
2499 tmp | MII_TG3_TEST1_CRC_EN);
2500 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
2501 }
2502 }
2503
2504 __tg3_set_rx_mode(tp->dev);
2505
2506 /* Initialize receive rules. */
2507 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
2508 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
2509 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
2510 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
2511
2512 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
2513 limit = 8;
2514 else
2515 limit = 16;
2516 if (tg3_flag(tp, ENABLE_ASF))
2517 limit -= 4;
2518 switch (limit) {
2519 case 16:
2520 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
2521 /* Fall through */
2522 case 15:
2523 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
2524 /* Fall through */
2525 case 14:
2526 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
2527 /* Fall through */
2528 case 13:
2529 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
2530 /* Fall through */
2531 case 12:
2532 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
2533 /* Fall through */
2534 case 11:
2535 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
2536 /* Fall through */
2537 case 10:
2538 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
2539 /* Fall through */
2540 case 9:
2541 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
2542 /* Fall through */
2543 case 8:
2544 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
2545 /* Fall through */
2546 case 7:
2547 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
2548 /* Fall through */
2549 case 6:
2550 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
2551 /* Fall through */
2552 case 5:
2553 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
2554 /* Fall through */
2555 case 4:
2556 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
2557 case 3:
2558 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
2559 case 2:
2560 case 1:
2561
2562 default:
2563 break;
2564 }
2565
2566 return 0;
2567 }
2568
2569 /* Called at device open time to get the chip ready for
2570 * packet processing. Invoked with tp->lock held.
2571 */
2572 int tg3_init_hw(struct tg3 *tp, int reset_phy)
2573 { DBGP("%s\n", __func__);
2574
2575 tg3_switch_clocks(tp);
2576
2577 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
2578
2579 return tg3_reset_hw(tp, reset_phy);
2580 }
2581
2582 void tg3_set_txd(struct tg3 *tp, int entry,
2583 dma_addr_t mapping, int len, u32 flags)
2584 { DBGP("%s\n", __func__);
2585
2586 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
2587
2588 txd->addr_hi = ((u64) mapping >> 32);
2589 txd->addr_lo = ((u64) mapping & 0xffffffff);
2590 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
2591 txd->vlan_tag = 0;
2592 }
2593
2594 int tg3_do_test_dma(struct tg3 *tp, u32 __unused *buf, dma_addr_t buf_dma, int size, int to_device)
2595 { DBGP("%s\n", __func__);
2596
2597 struct tg3_internal_buffer_desc test_desc;
2598 u32 sram_dma_descs;
2599 int ret;
2600 unsigned int i;
2601
2602 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
2603
2604 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
2605 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
2606 tw32(RDMAC_STATUS, 0);
2607 tw32(WDMAC_STATUS, 0);
2608
2609 tw32(BUFMGR_MODE, 0);
2610 tw32(FTQ_RESET, 0);
2611
2612 test_desc.addr_hi = ((u64) buf_dma) >> 32;
2613 test_desc.addr_lo = buf_dma & 0xffffffff;
2614 test_desc.nic_mbuf = 0x00002100;
2615 test_desc.len = size;
2616
2617 /*
2618 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
2619 * the *second* time the tg3 driver was getting loaded after an
2620 * initial scan.
2621 *
2622 * Broadcom tells me:
2623 * ...the DMA engine is connected to the GRC block and a DMA
2624 * reset may affect the GRC block in some unpredictable way...
2625 * The behavior of resets to individual blocks has not been tested.
2626 *
2627 * Broadcom noted the GRC reset will also reset all sub-components.
2628 */
2629 if (to_device) {
2630 test_desc.cqid_sqid = (13 << 8) | 2;
2631
2632 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
2633 udelay(40);
2634 } else {
2635 test_desc.cqid_sqid = (16 << 8) | 7;
2636
2637 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
2638 udelay(40);
2639 }
2640 test_desc.flags = 0x00000005;
2641
2642 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
2643 u32 val;
2644
2645 val = *(((u32 *)&test_desc) + i);
2646 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
2647 sram_dma_descs + (i * sizeof(u32)));
2648 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
2649 }
2650 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
2651
2652 if (to_device)
2653 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
2654 else
2655 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
2656
2657 ret = -ENODEV;
2658 for (i = 0; i < 40; i++) {
2659 u32 val;
2660
2661 if (to_device)
2662 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
2663 else
2664 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
2665 if ((val & 0xffff) == sram_dma_descs) {
2666 ret = 0;
2667 break;
2668 }
2669
2670 udelay(100);
2671 }
2672
2673 return ret;
2674 }