[kernel] r14058 - in dists/lenny/linux-2.6: . debian debian/config/alpha debian/config/amd64 debian/config/arm debian/config/hppa debian/config/i386 debian/config/ia64 debian/config/mips debian/config/mipsel debian/config/powerpc debian/config/sparc debian/patches/features/all debian/patches/series
Dann Frazier
dannf at alioth.debian.org
Fri Jul 31 06:10:30 UTC 2009
Author: dannf
Date: Fri Jul 31 06:10:25 2009
New Revision: 14058
Log:
merge bnx2x branch
Added:
dists/lenny/linux-2.6/debian/patches/features/all/bnx2x-Separated-FW-from-the-source.patch
- copied unchanged from r14057, people/dannf/lenny-bnx2x/debian/patches/features/all/bnx2x-Separated-FW-from-the-source.patch
dists/lenny/linux-2.6/debian/patches/features/all/bnx2x-update.patch
- copied unchanged from r14057, people/dannf/lenny-bnx2x/debian/patches/features/all/bnx2x-update.patch
Modified:
dists/lenny/linux-2.6/ (props changed)
dists/lenny/linux-2.6/debian/changelog
dists/lenny/linux-2.6/debian/config/alpha/config
dists/lenny/linux-2.6/debian/config/amd64/config
dists/lenny/linux-2.6/debian/config/arm/config.footbridge
dists/lenny/linux-2.6/debian/config/arm/config.orion5x
dists/lenny/linux-2.6/debian/config/arm/config.versatile
dists/lenny/linux-2.6/debian/config/hppa/config
dists/lenny/linux-2.6/debian/config/i386/config
dists/lenny/linux-2.6/debian/config/ia64/config
dists/lenny/linux-2.6/debian/config/mips/config.4kc-malta
dists/lenny/linux-2.6/debian/config/mips/config.5kc-malta
dists/lenny/linux-2.6/debian/config/mips/config.r5k-ip32
dists/lenny/linux-2.6/debian/config/mips/config.sb1-bcm91250a
dists/lenny/linux-2.6/debian/config/mips/config.sb1a-bcm91480b
dists/lenny/linux-2.6/debian/config/mipsel/config.4kc-malta
dists/lenny/linux-2.6/debian/config/mipsel/config.5kc-malta
dists/lenny/linux-2.6/debian/config/mipsel/config.r5k-cobalt
dists/lenny/linux-2.6/debian/config/mipsel/config.sb1-bcm91250a
dists/lenny/linux-2.6/debian/config/mipsel/config.sb1a-bcm91480b
dists/lenny/linux-2.6/debian/config/powerpc/config
dists/lenny/linux-2.6/debian/config/sparc/config
dists/lenny/linux-2.6/debian/patches/series/18
Modified: dists/lenny/linux-2.6/debian/changelog
==============================================================================
--- dists/lenny/linux-2.6/debian/changelog Fri Jul 31 05:46:40 2009 (r14057)
+++ dists/lenny/linux-2.6/debian/changelog Fri Jul 31 06:10:25 2009 (r14058)
@@ -22,6 +22,7 @@
* libata: make sure port is thawed when skipping resets. This change
avoid regressing #533657 with the fix for #498271.
* Add -fno-delete-null-pointer-checks to CFLAGS (Closes: #537617)
+ * Add a backport of bnx2x from 2.6.30 with request_firmware changes
[ Moritz Muehlenhoff ]
* fbdev/atyfb: Fix display corruption on some PowerMacs & PowerBooks
Modified: dists/lenny/linux-2.6/debian/config/alpha/config
==============================================================================
--- dists/lenny/linux-2.6/debian/config/alpha/config Fri Jul 31 05:46:40 2009 (r14057)
+++ dists/lenny/linux-2.6/debian/config/alpha/config Fri Jul 31 06:10:25 2009 (r14058)
@@ -822,6 +822,7 @@
CONFIG_VIA_VELOCITY=m
CONFIG_TIGON3=m
CONFIG_BNX2=m
+CONFIG_BNX2X=m
CONFIG_FDDI=y
CONFIG_DEFXX=m
CONFIG_SKFP=m
Modified: dists/lenny/linux-2.6/debian/config/amd64/config
==============================================================================
--- dists/lenny/linux-2.6/debian/config/amd64/config Fri Jul 31 05:46:40 2009 (r14057)
+++ dists/lenny/linux-2.6/debian/config/amd64/config Fri Jul 31 06:10:25 2009 (r14058)
@@ -932,6 +932,7 @@
CONFIG_VIA_VELOCITY=m
CONFIG_TIGON3=m
CONFIG_BNX2=m
+CONFIG_BNX2X=m
CONFIG_FDDI=y
CONFIG_DEFXX=m
# CONFIG_DEFXX_MMIO is not set
Modified: dists/lenny/linux-2.6/debian/config/arm/config.footbridge
==============================================================================
--- dists/lenny/linux-2.6/debian/config/arm/config.footbridge Fri Jul 31 05:46:40 2009 (r14057)
+++ dists/lenny/linux-2.6/debian/config/arm/config.footbridge Fri Jul 31 06:10:25 2009 (r14058)
@@ -666,6 +666,7 @@
# CONFIG_VIA_VELOCITY is not set
# CONFIG_TIGON3 is not set
# CONFIG_BNX2 is not set
+# CONFIG_BNX2X is not set
# CONFIG_FDDI is not set
# CONFIG_HIPPI is not set
# CONFIG_PLIP is not set
Modified: dists/lenny/linux-2.6/debian/config/arm/config.orion5x
==============================================================================
--- dists/lenny/linux-2.6/debian/config/arm/config.orion5x Fri Jul 31 05:46:40 2009 (r14057)
+++ dists/lenny/linux-2.6/debian/config/arm/config.orion5x Fri Jul 31 06:10:25 2009 (r14058)
@@ -318,6 +318,7 @@
# CONFIG_VIA_VELOCITY is not set
# CONFIG_TIGON3 is not set
# CONFIG_BNX2 is not set
+# CONFIG_BNX2X is not set
CONFIG_MV643XX_ETH=m
# CONFIG_QLA3XXX is not set
# CONFIG_ATL1 is not set
Modified: dists/lenny/linux-2.6/debian/config/arm/config.versatile
==============================================================================
--- dists/lenny/linux-2.6/debian/config/arm/config.versatile Fri Jul 31 05:46:40 2009 (r14057)
+++ dists/lenny/linux-2.6/debian/config/arm/config.versatile Fri Jul 31 06:10:25 2009 (r14058)
@@ -300,6 +300,7 @@
CONFIG_VIA_VELOCITY=m
# CONFIG_TIGON3 is not set
# CONFIG_BNX2 is not set
+# CONFIG_BNX2X is not set
CONFIG_QLA3XXX=m
CONFIG_ATL1=m
# CONFIG_NETDEV_10000 is not set
Modified: dists/lenny/linux-2.6/debian/config/hppa/config
==============================================================================
--- dists/lenny/linux-2.6/debian/config/hppa/config Fri Jul 31 05:46:40 2009 (r14057)
+++ dists/lenny/linux-2.6/debian/config/hppa/config Fri Jul 31 06:10:25 2009 (r14058)
@@ -317,6 +317,7 @@
CONFIG_VIA_VELOCITY=m
CONFIG_TIGON3=m
CONFIG_BNX2=m
+CONFIG_BNX2X=m
# CONFIG_FDDI is not set
# CONFIG_HIPPI is not set
CONFIG_PLIP=m
Modified: dists/lenny/linux-2.6/debian/config/i386/config
==============================================================================
--- dists/lenny/linux-2.6/debian/config/i386/config Fri Jul 31 05:46:40 2009 (r14057)
+++ dists/lenny/linux-2.6/debian/config/i386/config Fri Jul 31 06:10:25 2009 (r14058)
@@ -1119,6 +1119,7 @@
CONFIG_VIA_VELOCITY=m
CONFIG_TIGON3=m
CONFIG_BNX2=m
+CONFIG_BNX2X=m
CONFIG_XEN_NETDEV_FRONTEND=m
CONFIG_FDDI=y
CONFIG_DEFXX=m
Modified: dists/lenny/linux-2.6/debian/config/ia64/config
==============================================================================
--- dists/lenny/linux-2.6/debian/config/ia64/config Fri Jul 31 05:46:40 2009 (r14057)
+++ dists/lenny/linux-2.6/debian/config/ia64/config Fri Jul 31 06:10:25 2009 (r14058)
@@ -534,6 +534,7 @@
CONFIG_VIA_VELOCITY=m
CONFIG_TIGON3=m
CONFIG_BNX2=m
+CONFIG_BNX2X=m
CONFIG_FDDI=y
CONFIG_DEFXX=m
CONFIG_SKFP=m
Modified: dists/lenny/linux-2.6/debian/config/mips/config.4kc-malta
==============================================================================
--- dists/lenny/linux-2.6/debian/config/mips/config.4kc-malta Fri Jul 31 05:46:40 2009 (r14057)
+++ dists/lenny/linux-2.6/debian/config/mips/config.4kc-malta Fri Jul 31 06:10:25 2009 (r14058)
@@ -1006,6 +1006,7 @@
CONFIG_VIA_VELOCITY=m
CONFIG_TIGON3=m
CONFIG_BNX2=m
+CONFIG_BNX2X=m
CONFIG_QLA3XXX=m
CONFIG_FDDI=y
CONFIG_DEFXX=m
Modified: dists/lenny/linux-2.6/debian/config/mips/config.5kc-malta
==============================================================================
--- dists/lenny/linux-2.6/debian/config/mips/config.5kc-malta Fri Jul 31 05:46:40 2009 (r14057)
+++ dists/lenny/linux-2.6/debian/config/mips/config.5kc-malta Fri Jul 31 06:10:25 2009 (r14058)
@@ -1225,6 +1225,7 @@
CONFIG_VIA_VELOCITY=m
CONFIG_TIGON3=m
CONFIG_BNX2=m
+CONFIG_BNX2X=m
CONFIG_QLA3XXX=m
CONFIG_ATL1=m
CONFIG_FDDI=y
Modified: dists/lenny/linux-2.6/debian/config/mips/config.r5k-ip32
==============================================================================
--- dists/lenny/linux-2.6/debian/config/mips/config.r5k-ip32 Fri Jul 31 05:46:40 2009 (r14057)
+++ dists/lenny/linux-2.6/debian/config/mips/config.r5k-ip32 Fri Jul 31 06:10:25 2009 (r14058)
@@ -413,6 +413,7 @@
# CONFIG_SKY2 is not set
# CONFIG_TIGON3 is not set
# CONFIG_BNX2 is not set
+# CONFIG_BNX2X is not set
# CONFIG_CHELSIO_T1 is not set
# CONFIG_IXGB is not set
# CONFIG_S2IO is not set
Modified: dists/lenny/linux-2.6/debian/config/mips/config.sb1-bcm91250a
==============================================================================
--- dists/lenny/linux-2.6/debian/config/mips/config.sb1-bcm91250a Fri Jul 31 05:46:40 2009 (r14057)
+++ dists/lenny/linux-2.6/debian/config/mips/config.sb1-bcm91250a Fri Jul 31 06:10:25 2009 (r14058)
@@ -577,6 +577,7 @@
CONFIG_VIA_VELOCITY=m
CONFIG_TIGON3=m
CONFIG_BNX2=m
+CONFIG_BNX2X=m
# CONFIG_CHELSIO_T1 is not set
# CONFIG_IXGB is not set
# CONFIG_S2IO is not set
Modified: dists/lenny/linux-2.6/debian/config/mips/config.sb1a-bcm91480b
==============================================================================
--- dists/lenny/linux-2.6/debian/config/mips/config.sb1a-bcm91480b Fri Jul 31 05:46:40 2009 (r14057)
+++ dists/lenny/linux-2.6/debian/config/mips/config.sb1a-bcm91480b Fri Jul 31 06:10:25 2009 (r14058)
@@ -578,6 +578,7 @@
CONFIG_VIA_VELOCITY=m
CONFIG_TIGON3=m
CONFIG_BNX2=m
+CONFIG_BNX2X=m
# CONFIG_CHELSIO_T1 is not set
# CONFIG_IXGB is not set
# CONFIG_S2IO is not set
Modified: dists/lenny/linux-2.6/debian/config/mipsel/config.4kc-malta
==============================================================================
--- dists/lenny/linux-2.6/debian/config/mipsel/config.4kc-malta Fri Jul 31 05:46:40 2009 (r14057)
+++ dists/lenny/linux-2.6/debian/config/mipsel/config.4kc-malta Fri Jul 31 06:10:25 2009 (r14058)
@@ -1005,6 +1005,7 @@
CONFIG_VIA_VELOCITY=m
CONFIG_TIGON3=m
CONFIG_BNX2=m
+CONFIG_BNX2X=m
CONFIG_QLA3XXX=m
CONFIG_FDDI=y
CONFIG_DEFXX=m
Modified: dists/lenny/linux-2.6/debian/config/mipsel/config.5kc-malta
==============================================================================
--- dists/lenny/linux-2.6/debian/config/mipsel/config.5kc-malta Fri Jul 31 05:46:40 2009 (r14057)
+++ dists/lenny/linux-2.6/debian/config/mipsel/config.5kc-malta Fri Jul 31 06:10:25 2009 (r14058)
@@ -1225,6 +1225,7 @@
CONFIG_VIA_VELOCITY=m
CONFIG_TIGON3=m
CONFIG_BNX2=m
+CONFIG_BNX2X=m
CONFIG_QLA3XXX=m
CONFIG_ATL1=m
CONFIG_FDDI=y
Modified: dists/lenny/linux-2.6/debian/config/mipsel/config.r5k-cobalt
==============================================================================
--- dists/lenny/linux-2.6/debian/config/mipsel/config.r5k-cobalt Fri Jul 31 05:46:40 2009 (r14057)
+++ dists/lenny/linux-2.6/debian/config/mipsel/config.r5k-cobalt Fri Jul 31 06:10:25 2009 (r14058)
@@ -519,6 +519,7 @@
# CONFIG_VIA_VELOCITY is not set
# CONFIG_TIGON3 is not set
# CONFIG_BNX2 is not set
+# CONFIG_BNX2X is not set
# CONFIG_CHELSIO_T1 is not set
# CONFIG_IXGB is not set
# CONFIG_S2IO is not set
Modified: dists/lenny/linux-2.6/debian/config/mipsel/config.sb1-bcm91250a
==============================================================================
--- dists/lenny/linux-2.6/debian/config/mipsel/config.sb1-bcm91250a Fri Jul 31 05:46:40 2009 (r14057)
+++ dists/lenny/linux-2.6/debian/config/mipsel/config.sb1-bcm91250a Fri Jul 31 06:10:25 2009 (r14058)
@@ -577,6 +577,7 @@
CONFIG_VIA_VELOCITY=m
CONFIG_TIGON3=m
CONFIG_BNX2=m
+CONFIG_BNX2X=m
# CONFIG_CHELSIO_T1 is not set
# CONFIG_IXGB is not set
# CONFIG_S2IO is not set
Modified: dists/lenny/linux-2.6/debian/config/mipsel/config.sb1a-bcm91480b
==============================================================================
--- dists/lenny/linux-2.6/debian/config/mipsel/config.sb1a-bcm91480b Fri Jul 31 05:46:40 2009 (r14057)
+++ dists/lenny/linux-2.6/debian/config/mipsel/config.sb1a-bcm91480b Fri Jul 31 06:10:25 2009 (r14058)
@@ -577,6 +577,7 @@
CONFIG_VIA_VELOCITY=m
CONFIG_TIGON3=m
CONFIG_BNX2=m
+CONFIG_BNX2X=m
# CONFIG_CHELSIO_T1 is not set
# CONFIG_IXGB is not set
# CONFIG_S2IO is not set
Modified: dists/lenny/linux-2.6/debian/config/powerpc/config
==============================================================================
--- dists/lenny/linux-2.6/debian/config/powerpc/config Fri Jul 31 05:46:40 2009 (r14057)
+++ dists/lenny/linux-2.6/debian/config/powerpc/config Fri Jul 31 06:10:25 2009 (r14058)
@@ -592,6 +592,7 @@
CONFIG_VIA_VELOCITY=m
CONFIG_TIGON3=m
CONFIG_BNX2=m
+CONFIG_BNX2X=m
CONFIG_MV643XX_ETH=m
CONFIG_FDDI=y
CONFIG_DEFXX=m
Modified: dists/lenny/linux-2.6/debian/config/sparc/config
==============================================================================
--- dists/lenny/linux-2.6/debian/config/sparc/config Fri Jul 31 05:46:40 2009 (r14057)
+++ dists/lenny/linux-2.6/debian/config/sparc/config Fri Jul 31 06:10:25 2009 (r14058)
@@ -132,6 +132,7 @@
# CONFIG_HAMACHI is not set
# CONFIG_R8169 is not set
CONFIG_BNX2=m
+CONFIG_BNX2X=m
# CONFIG_HIPPI is not set
CONFIG_PLIP=m
CONFIG_NET_FC=y
Copied: dists/lenny/linux-2.6/debian/patches/features/all/bnx2x-Separated-FW-from-the-source.patch (from r14057, people/dannf/lenny-bnx2x/debian/patches/features/all/bnx2x-Separated-FW-from-the-source.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/lenny/linux-2.6/debian/patches/features/all/bnx2x-Separated-FW-from-the-source.patch Fri Jul 31 06:10:25 2009 (r14058, copy of r14057, people/dannf/lenny-bnx2x/debian/patches/features/all/bnx2x-Separated-FW-from-the-source.patch)
@@ -0,0 +1,1790 @@
+diff -urpN linux-source-2.6.26.orig/drivers/net/bnx2x_fw_file_hdr.h linux-source-2.6.26/drivers/net/bnx2x_fw_file_hdr.h
+--- linux-source-2.6.26.orig/drivers/net/bnx2x_fw_file_hdr.h 1969-12-31 17:00:00.000000000 -0700
++++ linux-source-2.6.26/drivers/net/bnx2x_fw_file_hdr.h 2009-07-24 16:42:53.000000000 -0600
+@@ -0,0 +1,37 @@
++/* bnx2x_fw_file_hdr.h: FW binary file header structure.
++ *
++ * Copyright (c) 2007-2009 Broadcom Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation.
++ *
++ * Maintained by: Eilon Greenstein <eilong at broadcom.com>
++ * Written by: Vladislav Zolotarov <vladz at broadcom.com>
++ * Based on the original idea of John Wright <john.wright at hp.com>.
++ */
++
++#ifndef BNX2X_INIT_FILE_HDR_H
++#define BNX2X_INIT_FILE_HDR_H
++
++struct bnx2x_fw_file_section {
++ __be32 len;
++ __be32 offset;
++};
++
++struct bnx2x_fw_file_hdr {
++ struct bnx2x_fw_file_section init_ops;
++ struct bnx2x_fw_file_section init_ops_offsets;
++ struct bnx2x_fw_file_section init_data;
++ struct bnx2x_fw_file_section tsem_int_table_data;
++ struct bnx2x_fw_file_section tsem_pram_data;
++ struct bnx2x_fw_file_section usem_int_table_data;
++ struct bnx2x_fw_file_section usem_pram_data;
++ struct bnx2x_fw_file_section csem_int_table_data;
++ struct bnx2x_fw_file_section csem_pram_data;
++ struct bnx2x_fw_file_section xsem_int_table_data;
++ struct bnx2x_fw_file_section xsem_pram_data;
++ struct bnx2x_fw_file_section fw_version;
++};
++
++#endif /* BNX2X_INIT_FILE_HDR_H */
+diff -urpN linux-source-2.6.26.orig/drivers/net/bnx2x.h linux-source-2.6.26/drivers/net/bnx2x.h
+--- linux-source-2.6.26.orig/drivers/net/bnx2x.h 2009-07-24 16:42:24.000000000 -0600
++++ linux-source-2.6.26/drivers/net/bnx2x.h 2009-07-24 16:42:53.000000000 -0600
+@@ -909,6 +909,21 @@ struct bnx2x {
+ int gunzip_outlen;
+ #define FW_BUF_SIZE 0x8000
+
++ struct raw_op *init_ops;
++ /* Init blocks offsets inside init_ops */
++ u16 *init_ops_offsets;
++ /* Data blob - has 32 bit granularity */
++ u32 *init_data;
++ /* Zipped PRAM blobs - raw data */
++ const u8 *tsem_int_table_data;
++ const u8 *tsem_pram_data;
++ const u8 *usem_int_table_data;
++ const u8 *usem_pram_data;
++ const u8 *xsem_int_table_data;
++ const u8 *xsem_pram_data;
++ const u8 *csem_int_table_data;
++ const u8 *csem_pram_data;
++ const struct firmware *firmware;
+ };
+
+
+diff -urpN linux-source-2.6.26.orig/drivers/net/bnx2x_init.h linux-source-2.6.26/drivers/net/bnx2x_init.h
+--- linux-source-2.6.26.orig/drivers/net/bnx2x_init.h 2009-07-24 16:42:24.000000000 -0600
++++ linux-source-2.6.26/drivers/net/bnx2x_init.h 2009-07-24 16:43:18.000000000 -0600
+@@ -1,4 +1,5 @@
+ /* bnx2x_init.h: Broadcom Everest network driver.
++ * Structures and macroes needed during the initialization.
+ *
+ * Copyright (c) 2007-2008 Broadcom Corporation
+ *
+@@ -8,6 +9,7 @@
+ *
+ * Maintained by: Eilon Greenstein <eilong at broadcom.com>
+ * Written by: Eliezer Tamir
++ * Modified by: Vladislav Zolotarov <vladz at broadcom.com>
+ */
+
+ #ifndef BNX2X_INIT_H
+@@ -42,33 +44,71 @@
+ #define OP_WR_64 0x8 /* write 64 bit pattern */
+ #define OP_WB 0x9 /* copy a string using DMAE */
+
+-/* Operation specific for E1 */
+-#define OP_RD_E1 0xa /* read single register */
+-#define OP_WR_E1 0xb /* write single register */
+-#define OP_IW_E1 0xc /* write single register using mailbox */
+-#define OP_SW_E1 0xd /* copy a string to the device */
+-#define OP_SI_E1 0xe /* copy a string using mailbox */
+-#define OP_ZR_E1 0xf /* clear memory */
+-#define OP_ZP_E1 0x10 /* unzip then copy with DMAE */
+-#define OP_WR_64_E1 0x11 /* write 64 bit pattern on E1 */
+-#define OP_WB_E1 0x12 /* copy a string using DMAE */
+-
+-/* Operation specific for E1H */
+-#define OP_RD_E1H 0x13 /* read single register */
+-#define OP_WR_E1H 0x14 /* write single register */
+-#define OP_IW_E1H 0x15 /* write single register using mailbox */
+-#define OP_SW_E1H 0x16 /* copy a string to the device */
+-#define OP_SI_E1H 0x17 /* copy a string using mailbox */
+-#define OP_ZR_E1H 0x18 /* clear memory */
+-#define OP_ZP_E1H 0x19 /* unzip then copy with DMAE */
+-#define OP_WR_64_E1H 0x1a /* write 64 bit pattern on E1H */
+-#define OP_WB_E1H 0x1b /* copy a string using DMAE */
+-
+ /* FPGA and EMUL specific operations */
+-#define OP_WR_EMUL_E1H 0x1c /* write single register on E1H Emul */
+-#define OP_WR_EMUL 0x1d /* write single register on Emulation */
+-#define OP_WR_FPGA 0x1e /* write single register on FPGA */
+-#define OP_WR_ASIC 0x1f /* write single register on ASIC */
++#define OP_WR_EMUL 0xa /* write single register on Emulation */
++#define OP_WR_FPGA 0xb /* write single register on FPGA */
++#define OP_WR_ASIC 0xc /* write single register on ASIC */
++
++/* Init stages */
++#define COMMON_STAGE 0
++#define PORT0_STAGE 1
++#define PORT1_STAGE 2
++/* Never reorder FUNCx stages !!! */
++#define FUNC0_STAGE 3
++#define FUNC1_STAGE 4
++#define FUNC2_STAGE 5
++#define FUNC3_STAGE 6
++#define FUNC4_STAGE 7
++#define FUNC5_STAGE 8
++#define FUNC6_STAGE 9
++#define FUNC7_STAGE 10
++#define STAGE_IDX_MAX 11
++
++#define STAGE_START 0
++#define STAGE_END 1
++
++
++/* Indices of blocks */
++#define PRS_BLOCK 0
++#define SRCH_BLOCK 1
++#define TSDM_BLOCK 2
++#define TCM_BLOCK 3
++#define BRB1_BLOCK 4
++#define TSEM_BLOCK 5
++#define PXPCS_BLOCK 6
++#define EMAC0_BLOCK 7
++#define EMAC1_BLOCK 8
++#define DBU_BLOCK 9
++#define MISC_BLOCK 10
++#define DBG_BLOCK 11
++#define NIG_BLOCK 12
++#define MCP_BLOCK 13
++#define UPB_BLOCK 14
++#define CSDM_BLOCK 15
++#define USDM_BLOCK 16
++#define CCM_BLOCK 17
++#define UCM_BLOCK 18
++#define USEM_BLOCK 19
++#define CSEM_BLOCK 20
++#define XPB_BLOCK 21
++#define DQ_BLOCK 22
++#define TIMERS_BLOCK 23
++#define XSDM_BLOCK 24
++#define QM_BLOCK 25
++#define PBF_BLOCK 26
++#define XCM_BLOCK 27
++#define XSEM_BLOCK 28
++#define CDU_BLOCK 29
++#define DMAE_BLOCK 30
++#define PXP_BLOCK 31
++#define CFC_BLOCK 32
++#define HC_BLOCK 33
++#define PXP2_BLOCK 34
++#define MISC_AEU_BLOCK 35
++
++/* Returns the index of start or end of a specific block stage in ops array*/
++#define BLOCK_OPS_IDX(block, stage, end) \
++ (2*(((block)*STAGE_IDX_MAX) + (stage)) + (end))
+
+
+ struct raw_op {
+@@ -115,290 +155,6 @@ union init_op {
+ struct raw_op raw;
+ };
+
+-#include "bnx2x_init_values.h"
+-
+-static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
+-static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len);
+-
+-static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, const u32 *data,
+- u32 len)
+-{
+- int i;
+-
+- for (i = 0; i < len; i++) {
+- REG_WR(bp, addr + i*4, data[i]);
+- if (!(i % 10000)) {
+- touch_softlockup_watchdog();
+- cpu_relax();
+- }
+- }
+-}
+-
+-static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr, const u32 *data,
+- u16 len)
+-{
+- int i;
+-
+- for (i = 0; i < len; i++) {
+- REG_WR_IND(bp, addr + i*4, data[i]);
+- if (!(i % 10000)) {
+- touch_softlockup_watchdog();
+- cpu_relax();
+- }
+- }
+-}
+-
+-static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len)
+-{
+-#ifdef USE_DMAE
+- int offset = 0;
+-
+- if (bp->dmae_ready) {
+- while (len > DMAE_LEN32_WR_MAX) {
+- bnx2x_write_dmae(bp, bp->gunzip_mapping + offset,
+- addr + offset, DMAE_LEN32_WR_MAX);
+- offset += DMAE_LEN32_WR_MAX * 4;
+- len -= DMAE_LEN32_WR_MAX;
+- }
+- bnx2x_write_dmae(bp, bp->gunzip_mapping + offset,
+- addr + offset, len);
+- } else
+- bnx2x_init_str_wr(bp, addr, bp->gunzip_buf, len);
+-#else
+- bnx2x_init_str_wr(bp, addr, bp->gunzip_buf, len);
+-#endif
+-}
+-
+-static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
+-{
+- if ((len * 4) > FW_BUF_SIZE) {
+- BNX2X_ERR("LARGE DMAE OPERATION ! addr 0x%x len 0x%x\n",
+- addr, len*4);
+- return;
+- }
+- memset(bp->gunzip_buf, fill, len * 4);
+-
+- bnx2x_write_big_buf(bp, addr, len);
+-}
+-
+-static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, const u32 *data,
+- u32 len64)
+-{
+- u32 buf_len32 = FW_BUF_SIZE/4;
+- u32 len = len64*2;
+- u64 data64 = 0;
+- int i;
+-
+- /* 64 bit value is in a blob: first low DWORD, then high DWORD */
+- data64 = HILO_U64((*(data + 1)), (*data));
+- len64 = min((u32)(FW_BUF_SIZE/8), len64);
+- for (i = 0; i < len64; i++) {
+- u64 *pdata = ((u64 *)(bp->gunzip_buf)) + i;
+-
+- *pdata = data64;
+- }
+-
+- for (i = 0; i < len; i += buf_len32) {
+- u32 cur_len = min(buf_len32, len - i);
+-
+- bnx2x_write_big_buf(bp, addr + i * 4, cur_len);
+- }
+-}
+-
+-/*********************************************************
+- There are different blobs for each PRAM section.
+- In addition, each blob write operation is divided into a few operations
+- in order to decrease the amount of phys. contiguous buffer needed.
+- Thus, when we select a blob the address may be with some offset
+- from the beginning of PRAM section.
+- The same holds for the INT_TABLE sections.
+-**********************************************************/
+-#define IF_IS_INT_TABLE_ADDR(base, addr) \
+- if (((base) <= (addr)) && ((base) + 0x400 >= (addr)))
+-
+-#define IF_IS_PRAM_ADDR(base, addr) \
+- if (((base) <= (addr)) && ((base) + 0x40000 >= (addr)))
+-
+-static const u32 *bnx2x_sel_blob(u32 addr, const u32 *data, int is_e1)
+-{
+- IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr)
+- data = is_e1 ? tsem_int_table_data_e1 :
+- tsem_int_table_data_e1h;
+- else
+- IF_IS_INT_TABLE_ADDR(CSEM_REG_INT_TABLE, addr)
+- data = is_e1 ? csem_int_table_data_e1 :
+- csem_int_table_data_e1h;
+- else
+- IF_IS_INT_TABLE_ADDR(USEM_REG_INT_TABLE, addr)
+- data = is_e1 ? usem_int_table_data_e1 :
+- usem_int_table_data_e1h;
+- else
+- IF_IS_INT_TABLE_ADDR(XSEM_REG_INT_TABLE, addr)
+- data = is_e1 ? xsem_int_table_data_e1 :
+- xsem_int_table_data_e1h;
+- else
+- IF_IS_PRAM_ADDR(TSEM_REG_PRAM, addr)
+- data = is_e1 ? tsem_pram_data_e1 : tsem_pram_data_e1h;
+- else
+- IF_IS_PRAM_ADDR(CSEM_REG_PRAM, addr)
+- data = is_e1 ? csem_pram_data_e1 : csem_pram_data_e1h;
+- else
+- IF_IS_PRAM_ADDR(USEM_REG_PRAM, addr)
+- data = is_e1 ? usem_pram_data_e1 : usem_pram_data_e1h;
+- else
+- IF_IS_PRAM_ADDR(XSEM_REG_PRAM, addr)
+- data = is_e1 ? xsem_pram_data_e1 : xsem_pram_data_e1h;
+-
+- return data;
+-}
+-
+-static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr, const u32 *data,
+- u32 len, int gunzip, int is_e1, u32 blob_off)
+-{
+- int offset = 0;
+-
+- data = bnx2x_sel_blob(addr, data, is_e1) + blob_off;
+-
+- if (gunzip) {
+- int rc;
+-#ifdef __BIG_ENDIAN
+- int i, size;
+- u32 *temp;
+-
+- temp = kmalloc(len, GFP_KERNEL);
+- size = (len / 4) + ((len % 4) ? 1 : 0);
+- for (i = 0; i < size; i++)
+- temp[i] = swab32(data[i]);
+- data = temp;
+-#endif
+- rc = bnx2x_gunzip(bp, (u8 *)data, len);
+- if (rc) {
+- BNX2X_ERR("gunzip failed ! rc %d\n", rc);
+- return;
+- }
+- len = bp->gunzip_outlen;
+-#ifdef __BIG_ENDIAN
+- kfree(temp);
+- for (i = 0; i < len; i++)
+- ((u32 *)bp->gunzip_buf)[i] =
+- swab32(((u32 *)bp->gunzip_buf)[i]);
+-#endif
+- } else {
+- if ((len * 4) > FW_BUF_SIZE) {
+- BNX2X_ERR("LARGE DMAE OPERATION ! "
+- "addr 0x%x len 0x%x\n", addr, len*4);
+- return;
+- }
+- memcpy(bp->gunzip_buf, data, len * 4);
+- }
+-
+- if (bp->dmae_ready) {
+- while (len > DMAE_LEN32_WR_MAX) {
+- bnx2x_write_dmae(bp, bp->gunzip_mapping + offset,
+- addr + offset, DMAE_LEN32_WR_MAX);
+- offset += DMAE_LEN32_WR_MAX * 4;
+- len -= DMAE_LEN32_WR_MAX;
+- }
+- bnx2x_write_dmae(bp, bp->gunzip_mapping + offset,
+- addr + offset, len);
+- } else
+- bnx2x_init_ind_wr(bp, addr, bp->gunzip_buf, len);
+-}
+-
+-static void bnx2x_init_block(struct bnx2x *bp, u32 op_start, u32 op_end)
+-{
+- int is_e1 = CHIP_IS_E1(bp);
+- int is_e1h = CHIP_IS_E1H(bp);
+- int is_emul_e1h = (CHIP_REV_IS_EMUL(bp) && is_e1h);
+- int hw_wr, i;
+- union init_op *op;
+- u32 op_type, addr, len;
+- const u32 *data, *data_base;
+-
+- if (CHIP_REV_IS_FPGA(bp))
+- hw_wr = OP_WR_FPGA;
+- else if (CHIP_REV_IS_EMUL(bp))
+- hw_wr = OP_WR_EMUL;
+- else
+- hw_wr = OP_WR_ASIC;
+-
+- if (is_e1)
+- data_base = init_data_e1;
+- else /* CHIP_IS_E1H(bp) */
+- data_base = init_data_e1h;
+-
+- for (i = op_start; i < op_end; i++) {
+-
+- op = (union init_op *)&(init_ops[i]);
+-
+- op_type = op->str_wr.op;
+- addr = op->str_wr.offset;
+- len = op->str_wr.data_len;
+- data = data_base + op->str_wr.data_off;
+-
+- /* careful! it must be in order */
+- if (unlikely(op_type > OP_WB)) {
+-
+- /* If E1 only */
+- if (op_type <= OP_WB_E1) {
+- if (is_e1)
+- op_type -= (OP_RD_E1 - OP_RD);
+-
+- /* If E1H only */
+- } else if (op_type <= OP_WB_E1H) {
+- if (is_e1h)
+- op_type -= (OP_RD_E1H - OP_RD);
+- }
+-
+- /* HW/EMUL specific */
+- if (op_type == hw_wr)
+- op_type = OP_WR;
+-
+- /* EMUL on E1H is special */
+- if ((op_type == OP_WR_EMUL_E1H) && is_emul_e1h)
+- op_type = OP_WR;
+- }
+-
+- switch (op_type) {
+- case OP_RD:
+- REG_RD(bp, addr);
+- break;
+- case OP_WR:
+- REG_WR(bp, addr, op->write.val);
+- break;
+- case OP_SW:
+- bnx2x_init_str_wr(bp, addr, data, len);
+- break;
+- case OP_WB:
+- bnx2x_init_wr_wb(bp, addr, data, len, 0, is_e1, 0);
+- break;
+- case OP_SI:
+- bnx2x_init_ind_wr(bp, addr, data, len);
+- break;
+- case OP_ZR:
+- bnx2x_init_fill(bp, addr, 0, op->zero.len);
+- break;
+- case OP_ZP:
+- bnx2x_init_wr_wb(bp, addr, data, len, 1, is_e1,
+- op->str_wr.data_off);
+- break;
+- case OP_WR_64:
+- bnx2x_init_wr_64(bp, addr, data, len);
+- break;
+- default:
+- /* happens whenever an op is of a diff HW */
+-#if 0
+- DP(NETIF_MSG_HW, "skipping init operation "
+- "index %d[%d:%d]: type %d addr 0x%x "
+- "len %d(0x%x)\n",
+- i, op_start, op_end, op_type, addr, len, len);
+-#endif
+- break;
+- }
+- }
+-}
+-
+-
+ /****************************************************************************
+ * PXP
+ ****************************************************************************/
+@@ -562,111 +318,6 @@ static const struct arb_line write_arb_a
+ PXP2_REG_RQ_BW_WR_UBOUND30}
+ };
+
+-static void bnx2x_init_pxp(struct bnx2x *bp)
+-{
+- u16 devctl;
+- int r_order, w_order;
+- u32 val, i;
+-
+- pci_read_config_word(bp->pdev,
+- bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
+- DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
+- w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
+- if (bp->mrrs == -1)
+- r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
+- else {
+- DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
+- r_order = bp->mrrs;
+- }
+-
+- if (r_order > MAX_RD_ORD) {
+- DP(NETIF_MSG_HW, "read order of %d order adjusted to %d\n",
+- r_order, MAX_RD_ORD);
+- r_order = MAX_RD_ORD;
+- }
+- if (w_order > MAX_WR_ORD) {
+- DP(NETIF_MSG_HW, "write order of %d order adjusted to %d\n",
+- w_order, MAX_WR_ORD);
+- w_order = MAX_WR_ORD;
+- }
+- if (CHIP_REV_IS_FPGA(bp)) {
+- DP(NETIF_MSG_HW, "write order adjusted to 1 for FPGA\n");
+- w_order = 0;
+- }
+- DP(NETIF_MSG_HW, "read order %d write order %d\n", r_order, w_order);
+-
+- for (i = 0; i < NUM_RD_Q-1; i++) {
+- REG_WR(bp, read_arb_addr[i].l, read_arb_data[i][r_order].l);
+- REG_WR(bp, read_arb_addr[i].add,
+- read_arb_data[i][r_order].add);
+- REG_WR(bp, read_arb_addr[i].ubound,
+- read_arb_data[i][r_order].ubound);
+- }
+-
+- for (i = 0; i < NUM_WR_Q-1; i++) {
+- if ((write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L29) ||
+- (write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L30)) {
+-
+- REG_WR(bp, write_arb_addr[i].l,
+- write_arb_data[i][w_order].l);
+-
+- REG_WR(bp, write_arb_addr[i].add,
+- write_arb_data[i][w_order].add);
+-
+- REG_WR(bp, write_arb_addr[i].ubound,
+- write_arb_data[i][w_order].ubound);
+- } else {
+-
+- val = REG_RD(bp, write_arb_addr[i].l);
+- REG_WR(bp, write_arb_addr[i].l,
+- val | (write_arb_data[i][w_order].l << 10));
+-
+- val = REG_RD(bp, write_arb_addr[i].add);
+- REG_WR(bp, write_arb_addr[i].add,
+- val | (write_arb_data[i][w_order].add << 10));
+-
+- val = REG_RD(bp, write_arb_addr[i].ubound);
+- REG_WR(bp, write_arb_addr[i].ubound,
+- val | (write_arb_data[i][w_order].ubound << 7));
+- }
+- }
+-
+- val = write_arb_data[NUM_WR_Q-1][w_order].add;
+- val += write_arb_data[NUM_WR_Q-1][w_order].ubound << 10;
+- val += write_arb_data[NUM_WR_Q-1][w_order].l << 17;
+- REG_WR(bp, PXP2_REG_PSWRQ_BW_RD, val);
+-
+- val = read_arb_data[NUM_RD_Q-1][r_order].add;
+- val += read_arb_data[NUM_RD_Q-1][r_order].ubound << 10;
+- val += read_arb_data[NUM_RD_Q-1][r_order].l << 17;
+- REG_WR(bp, PXP2_REG_PSWRQ_BW_WR, val);
+-
+- REG_WR(bp, PXP2_REG_RQ_WR_MBS0, w_order);
+- REG_WR(bp, PXP2_REG_RQ_WR_MBS1, w_order);
+- REG_WR(bp, PXP2_REG_RQ_RD_MBS0, r_order);
+- REG_WR(bp, PXP2_REG_RQ_RD_MBS1, r_order);
+-
+- if (r_order == MAX_RD_ORD)
+- REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00);
+-
+- REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order));
+-
+- if (CHIP_IS_E1H(bp)) {
+- REG_WR(bp, PXP2_REG_WR_HC_MPS, w_order+1);
+- REG_WR(bp, PXP2_REG_WR_USDM_MPS, w_order+1);
+- REG_WR(bp, PXP2_REG_WR_CSDM_MPS, w_order+1);
+- REG_WR(bp, PXP2_REG_WR_TSDM_MPS, w_order+1);
+- REG_WR(bp, PXP2_REG_WR_XSDM_MPS, w_order+1);
+- REG_WR(bp, PXP2_REG_WR_QM_MPS, w_order+1);
+- REG_WR(bp, PXP2_REG_WR_TM_MPS, w_order+1);
+- REG_WR(bp, PXP2_REG_WR_SRC_MPS, w_order+1);
+- REG_WR(bp, PXP2_REG_WR_DBG_MPS, w_order+1);
+- REG_WR(bp, PXP2_REG_WR_DMAE_MPS, 2); /* DMAE is special */
+- REG_WR(bp, PXP2_REG_WR_CDU_MPS, w_order+1);
+- }
+-}
+-
+-
+ /****************************************************************************
+ * CDU
+ ****************************************************************************/
+@@ -689,128 +340,12 @@ static void bnx2x_init_pxp(struct bnx2x
+ (0x80 | ((_type) & 0xf << 3) | (CDU_CRC8(_cid, _region, _type) & 0x7))
+ #define CDU_RSRVD_INVALIDATE_CONTEXT_VALUE(_val) ((_val) & ~0x80)
+
+-/*****************************************************************************
+- * Description:
+- * Calculates crc 8 on a word value: polynomial 0-1-2-8
+- * Code was translated from Verilog.
+- ****************************************************************************/
+-static u8 calc_crc8(u32 data, u8 crc)
+-{
+- u8 D[32];
+- u8 NewCRC[8];
+- u8 C[8];
+- u8 crc_res;
+- u8 i;
+-
+- /* split the data into 31 bits */
+- for (i = 0; i < 32; i++) {
+- D[i] = data & 1;
+- data = data >> 1;
+- }
+-
+- /* split the crc into 8 bits */
+- for (i = 0; i < 8; i++) {
+- C[i] = crc & 1;
+- crc = crc >> 1;
+- }
+-
+- NewCRC[0] = D[31] ^ D[30] ^ D[28] ^ D[23] ^ D[21] ^ D[19] ^ D[18] ^
+- D[16] ^ D[14] ^ D[12] ^ D[8] ^ D[7] ^ D[6] ^ D[0] ^ C[4] ^
+- C[6] ^ C[7];
+- NewCRC[1] = D[30] ^ D[29] ^ D[28] ^ D[24] ^ D[23] ^ D[22] ^ D[21] ^
+- D[20] ^ D[18] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[13] ^
+- D[12] ^ D[9] ^ D[6] ^ D[1] ^ D[0] ^ C[0] ^ C[4] ^ C[5] ^ C[6];
+- NewCRC[2] = D[29] ^ D[28] ^ D[25] ^ D[24] ^ D[22] ^ D[17] ^ D[15] ^
+- D[13] ^ D[12] ^ D[10] ^ D[8] ^ D[6] ^ D[2] ^ D[1] ^ D[0] ^
+- C[0] ^ C[1] ^ C[4] ^ C[5];
+- NewCRC[3] = D[30] ^ D[29] ^ D[26] ^ D[25] ^ D[23] ^ D[18] ^ D[16] ^
+- D[14] ^ D[13] ^ D[11] ^ D[9] ^ D[7] ^ D[3] ^ D[2] ^ D[1] ^
+- C[1] ^ C[2] ^ C[5] ^ C[6];
+- NewCRC[4] = D[31] ^ D[30] ^ D[27] ^ D[26] ^ D[24] ^ D[19] ^ D[17] ^
+- D[15] ^ D[14] ^ D[12] ^ D[10] ^ D[8] ^ D[4] ^ D[3] ^ D[2] ^
+- C[0] ^ C[2] ^ C[3] ^ C[6] ^ C[7];
+- NewCRC[5] = D[31] ^ D[28] ^ D[27] ^ D[25] ^ D[20] ^ D[18] ^ D[16] ^
+- D[15] ^ D[13] ^ D[11] ^ D[9] ^ D[5] ^ D[4] ^ D[3] ^ C[1] ^
+- C[3] ^ C[4] ^ C[7];
+- NewCRC[6] = D[29] ^ D[28] ^ D[26] ^ D[21] ^ D[19] ^ D[17] ^ D[16] ^
+- D[14] ^ D[12] ^ D[10] ^ D[6] ^ D[5] ^ D[4] ^ C[2] ^ C[4] ^
+- C[5];
+- NewCRC[7] = D[30] ^ D[29] ^ D[27] ^ D[22] ^ D[20] ^ D[18] ^ D[17] ^
+- D[15] ^ D[13] ^ D[11] ^ D[7] ^ D[6] ^ D[5] ^ C[3] ^ C[5] ^
+- C[6];
+-
+- crc_res = 0;
+- for (i = 0; i < 8; i++)
+- crc_res |= (NewCRC[i] << i);
+-
+- return crc_res;
+-}
+
+ /* registers addresses are not in order
+ so these arrays help simplify the code */
+-static const int cm_start[E1H_FUNC_MAX][9] = {
+- {MISC_FUNC0_START, TCM_FUNC0_START, UCM_FUNC0_START, CCM_FUNC0_START,
+- XCM_FUNC0_START, TSEM_FUNC0_START, USEM_FUNC0_START, CSEM_FUNC0_START,
+- XSEM_FUNC0_START},
+- {MISC_FUNC1_START, TCM_FUNC1_START, UCM_FUNC1_START, CCM_FUNC1_START,
+- XCM_FUNC1_START, TSEM_FUNC1_START, USEM_FUNC1_START, CSEM_FUNC1_START,
+- XSEM_FUNC1_START},
+- {MISC_FUNC2_START, TCM_FUNC2_START, UCM_FUNC2_START, CCM_FUNC2_START,
+- XCM_FUNC2_START, TSEM_FUNC2_START, USEM_FUNC2_START, CSEM_FUNC2_START,
+- XSEM_FUNC2_START},
+- {MISC_FUNC3_START, TCM_FUNC3_START, UCM_FUNC3_START, CCM_FUNC3_START,
+- XCM_FUNC3_START, TSEM_FUNC3_START, USEM_FUNC3_START, CSEM_FUNC3_START,
+- XSEM_FUNC3_START},
+- {MISC_FUNC4_START, TCM_FUNC4_START, UCM_FUNC4_START, CCM_FUNC4_START,
+- XCM_FUNC4_START, TSEM_FUNC4_START, USEM_FUNC4_START, CSEM_FUNC4_START,
+- XSEM_FUNC4_START},
+- {MISC_FUNC5_START, TCM_FUNC5_START, UCM_FUNC5_START, CCM_FUNC5_START,
+- XCM_FUNC5_START, TSEM_FUNC5_START, USEM_FUNC5_START, CSEM_FUNC5_START,
+- XSEM_FUNC5_START},
+- {MISC_FUNC6_START, TCM_FUNC6_START, UCM_FUNC6_START, CCM_FUNC6_START,
+- XCM_FUNC6_START, TSEM_FUNC6_START, USEM_FUNC6_START, CSEM_FUNC6_START,
+- XSEM_FUNC6_START},
+- {MISC_FUNC7_START, TCM_FUNC7_START, UCM_FUNC7_START, CCM_FUNC7_START,
+- XCM_FUNC7_START, TSEM_FUNC7_START, USEM_FUNC7_START, CSEM_FUNC7_START,
+- XSEM_FUNC7_START}
+-};
+-
+-static const int cm_end[E1H_FUNC_MAX][9] = {
+- {MISC_FUNC0_END, TCM_FUNC0_END, UCM_FUNC0_END, CCM_FUNC0_END,
+- XCM_FUNC0_END, TSEM_FUNC0_END, USEM_FUNC0_END, CSEM_FUNC0_END,
+- XSEM_FUNC0_END},
+- {MISC_FUNC1_END, TCM_FUNC1_END, UCM_FUNC1_END, CCM_FUNC1_END,
+- XCM_FUNC1_END, TSEM_FUNC1_END, USEM_FUNC1_END, CSEM_FUNC1_END,
+- XSEM_FUNC1_END},
+- {MISC_FUNC2_END, TCM_FUNC2_END, UCM_FUNC2_END, CCM_FUNC2_END,
+- XCM_FUNC2_END, TSEM_FUNC2_END, USEM_FUNC2_END, CSEM_FUNC2_END,
+- XSEM_FUNC2_END},
+- {MISC_FUNC3_END, TCM_FUNC3_END, UCM_FUNC3_END, CCM_FUNC3_END,
+- XCM_FUNC3_END, TSEM_FUNC3_END, USEM_FUNC3_END, CSEM_FUNC3_END,
+- XSEM_FUNC3_END},
+- {MISC_FUNC4_END, TCM_FUNC4_END, UCM_FUNC4_END, CCM_FUNC4_END,
+- XCM_FUNC4_END, TSEM_FUNC4_END, USEM_FUNC4_END, CSEM_FUNC4_END,
+- XSEM_FUNC4_END},
+- {MISC_FUNC5_END, TCM_FUNC5_END, UCM_FUNC5_END, CCM_FUNC5_END,
+- XCM_FUNC5_END, TSEM_FUNC5_END, USEM_FUNC5_END, CSEM_FUNC5_END,
+- XSEM_FUNC5_END},
+- {MISC_FUNC6_END, TCM_FUNC6_END, UCM_FUNC6_END, CCM_FUNC6_END,
+- XCM_FUNC6_END, TSEM_FUNC6_END, USEM_FUNC6_END, CSEM_FUNC6_END,
+- XSEM_FUNC6_END},
+- {MISC_FUNC7_END, TCM_FUNC7_END, UCM_FUNC7_END, CCM_FUNC7_END,
+- XCM_FUNC7_END, TSEM_FUNC7_END, USEM_FUNC7_END, CSEM_FUNC7_END,
+- XSEM_FUNC7_END},
+-};
+-
+-static const int hc_limits[E1H_FUNC_MAX][2] = {
+- {HC_FUNC0_START, HC_FUNC0_END},
+- {HC_FUNC1_START, HC_FUNC1_END},
+- {HC_FUNC2_START, HC_FUNC2_END},
+- {HC_FUNC3_START, HC_FUNC3_END},
+- {HC_FUNC4_START, HC_FUNC4_END},
+- {HC_FUNC5_START, HC_FUNC5_END},
+- {HC_FUNC6_START, HC_FUNC6_END},
+- {HC_FUNC7_START, HC_FUNC7_END}
++static const int cm_blocks[9] = {
++ MISC_BLOCK, TCM_BLOCK, UCM_BLOCK, CCM_BLOCK, XCM_BLOCK,
++ TSEM_BLOCK, USEM_BLOCK, CSEM_BLOCK, XSEM_BLOCK
+ };
+
+ #endif /* BNX2X_INIT_H */
+diff -urpN linux-source-2.6.26.orig/drivers/net/bnx2x_init_ops.h linux-source-2.6.26/drivers/net/bnx2x_init_ops.h
+--- linux-source-2.6.26.orig/drivers/net/bnx2x_init_ops.h 1969-12-31 17:00:00.000000000 -0700
++++ linux-source-2.6.26/drivers/net/bnx2x_init_ops.h 2009-07-24 16:42:53.000000000 -0600
+@@ -0,0 +1,442 @@
++/* bnx2x_init_ops.h: Broadcom Everest network driver.
++ * Static functions needed during the initialization.
++ * This file is "included" in bnx2x_main.c.
++ *
++ * Copyright (c) 2007-2009 Broadcom Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation.
++ *
++ * Maintained by: Eilon Greenstein <eilong at broadcom.com>
++ * Written by: Vladislav Zolotarov <vladz at broadcom.com>
++ */
++#ifndef BNX2X_INIT_OPS_H
++#define BNX2X_INIT_OPS_H
++
++static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
++static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len);
++
++static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, const u32 *data,
++ u32 len)
++{
++ int i;
++
++ for (i = 0; i < len; i++) {
++ REG_WR(bp, addr + i*4, data[i]);
++ if (!(i % 10000)) {
++ touch_softlockup_watchdog();
++ cpu_relax();
++ }
++ }
++}
++
++static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr, const u32 *data,
++ u16 len)
++{
++ int i;
++
++ for (i = 0; i < len; i++) {
++ REG_WR_IND(bp, addr + i*4, data[i]);
++ if (!(i % 10000)) {
++ touch_softlockup_watchdog();
++ cpu_relax();
++ }
++ }
++}
++
++static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len)
++{
++ int offset = 0;
++
++ if (bp->dmae_ready) {
++ while (len > DMAE_LEN32_WR_MAX) {
++ bnx2x_write_dmae(bp, bp->gunzip_mapping + offset,
++ addr + offset, DMAE_LEN32_WR_MAX);
++ offset += DMAE_LEN32_WR_MAX * 4;
++ len -= DMAE_LEN32_WR_MAX;
++ }
++ bnx2x_write_dmae(bp, bp->gunzip_mapping + offset,
++ addr + offset, len);
++ } else
++ bnx2x_init_str_wr(bp, addr, bp->gunzip_buf, len);
++}
++
++static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
++{
++ u32 buf_len = (((len * 4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len * 4));
++ u32 buf_len32 = buf_len / 4;
++ int i;
++
++ memset(bp->gunzip_buf, fill, buf_len);
++
++ for (i = 0; i < len; i += buf_len32) {
++ u32 cur_len = min(buf_len32, len - i);
++
++ bnx2x_write_big_buf(bp, addr + i * 4, cur_len);
++ }
++}
++
++static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, const u32 *data,
++ u32 len64)
++{
++ u32 buf_len32 = FW_BUF_SIZE / 4;
++ u32 len = len64 * 2;
++ u64 data64 = 0;
++ int i;
++
++ /* 64 bit value is in a blob: first low DWORD, then high DWORD */
++ data64 = HILO_U64((*(data + 1)), (*data));
++ len64 = min((u32)(FW_BUF_SIZE/8), len64);
++ for (i = 0; i < len64; i++) {
++ u64 *pdata = ((u64 *)(bp->gunzip_buf)) + i;
++
++ *pdata = data64;
++ }
++
++ for (i = 0; i < len; i += buf_len32) {
++ u32 cur_len = min(buf_len32, len - i);
++
++ bnx2x_write_big_buf(bp, addr + i * 4, cur_len);
++ }
++}
++
++/*********************************************************
++ There are different blobs for each PRAM section.
++ In addition, each blob write operation is divided into a few operations
++ in order to decrease the amount of phys. contiguous buffer needed.
++ Thus, when we select a blob the address may be with some offset
++ from the beginning of PRAM section.
++ The same holds for the INT_TABLE sections.
++**********************************************************/
++#define IF_IS_INT_TABLE_ADDR(base, addr) \
++ if (((base) <= (addr)) && ((base) + 0x400 >= (addr)))
++
++#define IF_IS_PRAM_ADDR(base, addr) \
++ if (((base) <= (addr)) && ((base) + 0x40000 >= (addr)))
++
++static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr, const u8 *data)
++{
++ IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr)
++ data = bp->tsem_int_table_data;
++ else IF_IS_INT_TABLE_ADDR(CSEM_REG_INT_TABLE, addr)
++ data = bp->csem_int_table_data;
++ else IF_IS_INT_TABLE_ADDR(USEM_REG_INT_TABLE, addr)
++ data = bp->usem_int_table_data;
++ else IF_IS_INT_TABLE_ADDR(XSEM_REG_INT_TABLE, addr)
++ data = bp->xsem_int_table_data;
++ else IF_IS_PRAM_ADDR(TSEM_REG_PRAM, addr)
++ data = bp->tsem_pram_data;
++ else IF_IS_PRAM_ADDR(CSEM_REG_PRAM, addr)
++ data = bp->csem_pram_data;
++ else IF_IS_PRAM_ADDR(USEM_REG_PRAM, addr)
++ data = bp->usem_pram_data;
++ else IF_IS_PRAM_ADDR(XSEM_REG_PRAM, addr)
++ data = bp->xsem_pram_data;
++
++ return data;
++}
++
++static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len)
++{
++ int offset = 0;
++
++ if (bp->dmae_ready) {
++ while (len > DMAE_LEN32_WR_MAX) {
++ bnx2x_write_dmae(bp, bp->gunzip_mapping + offset,
++ addr + offset, DMAE_LEN32_WR_MAX);
++ offset += DMAE_LEN32_WR_MAX * 4;
++ len -= DMAE_LEN32_WR_MAX;
++ }
++ bnx2x_write_dmae(bp, bp->gunzip_mapping + offset,
++ addr + offset, len);
++ } else
++ bnx2x_init_ind_wr(bp, addr, bp->gunzip_buf, len);
++}
++
++static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr, const u32 *data,
++ u32 len)
++{
++ /* This is needed for NO_ZIP mode, currently supported
++ in little endian mode only */
++ data = (const u32*)bnx2x_sel_blob(bp, addr, (const u8*)data);
++
++ if ((len * 4) > FW_BUF_SIZE) {
++ BNX2X_ERR("LARGE DMAE OPERATION ! "
++ "addr 0x%x len 0x%x\n", addr, len*4);
++ return;
++ }
++ memcpy(bp->gunzip_buf, data, len * 4);
++
++ bnx2x_write_big_buf_wb(bp, addr, len);
++}
++
++static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr,
++ u32 len, u32 blob_off)
++{
++ int rc, i;
++ const u8 *data = NULL;
++
++ data = bnx2x_sel_blob(bp, addr, data) + 4*blob_off;
++
++ if (data == NULL) {
++ panic("Blob not found for addr 0x%x\n", addr);
++ return;
++ }
++
++ rc = bnx2x_gunzip(bp, data, len);
++ if (rc) {
++ BNX2X_ERR("gunzip failed ! addr 0x%x rc %d\n", addr, rc);
++ BNX2X_ERR("blob_offset=0x%x\n", blob_off);
++ return;
++ }
++
++ /* gunzip_outlen is in dwords */
++ len = bp->gunzip_outlen;
++ for (i = 0; i < len; i++)
++ ((u32 *)bp->gunzip_buf)[i] =
++ cpu_to_le32(((u32 *)bp->gunzip_buf)[i]);
++
++ bnx2x_write_big_buf_wb(bp, addr, len);
++}
++
++static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage)
++{
++ int hw_wr, i;
++ u16 op_start =
++ bp->init_ops_offsets[BLOCK_OPS_IDX(block,stage,STAGE_START)];
++ u16 op_end =
++ bp->init_ops_offsets[BLOCK_OPS_IDX(block,stage,STAGE_END)];
++ union init_op *op;
++ u32 op_type, addr, len;
++ const u32 *data, *data_base;
++
++ /* If empty block */
++ if (op_start == op_end)
++ return;
++
++ if (CHIP_REV_IS_FPGA(bp))
++ hw_wr = OP_WR_FPGA;
++ else if (CHIP_REV_IS_EMUL(bp))
++ hw_wr = OP_WR_EMUL;
++ else
++ hw_wr = OP_WR_ASIC;
++
++ data_base = bp->init_data;
++
++ for (i = op_start; i < op_end; i++) {
++
++ op = (union init_op *)&(bp->init_ops[i]);
++
++ op_type = op->str_wr.op;
++ addr = op->str_wr.offset;
++ len = op->str_wr.data_len;
++ data = data_base + op->str_wr.data_off;
++
++ /* HW/EMUL specific */
++ if (unlikely((op_type > OP_WB) && (op_type == hw_wr)))
++ op_type = OP_WR;
++
++ switch (op_type) {
++ case OP_RD:
++ REG_RD(bp, addr);
++ break;
++ case OP_WR:
++ REG_WR(bp, addr, op->write.val);
++ break;
++ case OP_SW:
++ bnx2x_init_str_wr(bp, addr, data, len);
++ break;
++ case OP_WB:
++ bnx2x_init_wr_wb(bp, addr, data, len);
++ break;
++ case OP_SI:
++ bnx2x_init_ind_wr(bp, addr, data, len);
++ break;
++ case OP_ZR:
++ bnx2x_init_fill(bp, addr, 0, op->zero.len);
++ break;
++ case OP_ZP:
++ bnx2x_init_wr_zp(bp, addr, len,
++ op->str_wr.data_off);
++ break;
++ case OP_WR_64:
++ bnx2x_init_wr_64(bp, addr, data, len);
++ break;
++ default:
++ /* happens whenever an op is of a diff HW */
++#if 0
++ DP(NETIF_MSG_HW, "skipping init operation "
++ "index %d[%d:%d]: type %d addr 0x%x "
++ "len %d(0x%x)\n",
++ i, op_start, op_end, op_type, addr, len, len);
++#endif
++ break;
++ }
++ }
++}
++
++/* PXP */
++static void bnx2x_init_pxp(struct bnx2x *bp)
++{
++ u16 devctl;
++ int r_order, w_order;
++ u32 val, i;
++
++ pci_read_config_word(bp->pdev,
++ bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
++ DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
++ w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
++ if (bp->mrrs == -1)
++ r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
++ else {
++ DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
++ r_order = bp->mrrs;
++ }
++
++ if (r_order > MAX_RD_ORD) {
++ DP(NETIF_MSG_HW, "read order of %d order adjusted to %d\n",
++ r_order, MAX_RD_ORD);
++ r_order = MAX_RD_ORD;
++ }
++ if (w_order > MAX_WR_ORD) {
++ DP(NETIF_MSG_HW, "write order of %d order adjusted to %d\n",
++ w_order, MAX_WR_ORD);
++ w_order = MAX_WR_ORD;
++ }
++ if (CHIP_REV_IS_FPGA(bp)) {
++ DP(NETIF_MSG_HW, "write order adjusted to 1 for FPGA\n");
++ w_order = 0;
++ }
++ DP(NETIF_MSG_HW, "read order %d write order %d\n", r_order, w_order);
++
++ for (i = 0; i < NUM_RD_Q-1; i++) {
++ REG_WR(bp, read_arb_addr[i].l, read_arb_data[i][r_order].l);
++ REG_WR(bp, read_arb_addr[i].add,
++ read_arb_data[i][r_order].add);
++ REG_WR(bp, read_arb_addr[i].ubound,
++ read_arb_data[i][r_order].ubound);
++ }
++
++ for (i = 0; i < NUM_WR_Q-1; i++) {
++ if ((write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L29) ||
++ (write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L30)) {
++
++ REG_WR(bp, write_arb_addr[i].l,
++ write_arb_data[i][w_order].l);
++
++ REG_WR(bp, write_arb_addr[i].add,
++ write_arb_data[i][w_order].add);
++
++ REG_WR(bp, write_arb_addr[i].ubound,
++ write_arb_data[i][w_order].ubound);
++ } else {
++
++ val = REG_RD(bp, write_arb_addr[i].l);
++ REG_WR(bp, write_arb_addr[i].l,
++ val | (write_arb_data[i][w_order].l << 10));
++
++ val = REG_RD(bp, write_arb_addr[i].add);
++ REG_WR(bp, write_arb_addr[i].add,
++ val | (write_arb_data[i][w_order].add << 10));
++
++ val = REG_RD(bp, write_arb_addr[i].ubound);
++ REG_WR(bp, write_arb_addr[i].ubound,
++ val | (write_arb_data[i][w_order].ubound << 7));
++ }
++ }
++
++ val = write_arb_data[NUM_WR_Q-1][w_order].add;
++ val += write_arb_data[NUM_WR_Q-1][w_order].ubound << 10;
++ val += write_arb_data[NUM_WR_Q-1][w_order].l << 17;
++ REG_WR(bp, PXP2_REG_PSWRQ_BW_RD, val);
++
++ val = read_arb_data[NUM_RD_Q-1][r_order].add;
++ val += read_arb_data[NUM_RD_Q-1][r_order].ubound << 10;
++ val += read_arb_data[NUM_RD_Q-1][r_order].l << 17;
++ REG_WR(bp, PXP2_REG_PSWRQ_BW_WR, val);
++
++ REG_WR(bp, PXP2_REG_RQ_WR_MBS0, w_order);
++ REG_WR(bp, PXP2_REG_RQ_WR_MBS1, w_order);
++ REG_WR(bp, PXP2_REG_RQ_RD_MBS0, r_order);
++ REG_WR(bp, PXP2_REG_RQ_RD_MBS1, r_order);
++
++ if (r_order == MAX_RD_ORD)
++ REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00);
++
++ REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order));
++
++ if (CHIP_IS_E1H(bp)) {
++ val = ((w_order == 0) ? 2 : 3);
++ REG_WR(bp, PXP2_REG_WR_HC_MPS, val);
++ REG_WR(bp, PXP2_REG_WR_USDM_MPS, val);
++ REG_WR(bp, PXP2_REG_WR_CSDM_MPS, val);
++ REG_WR(bp, PXP2_REG_WR_TSDM_MPS, val);
++ REG_WR(bp, PXP2_REG_WR_XSDM_MPS, val);
++ REG_WR(bp, PXP2_REG_WR_QM_MPS, val);
++ REG_WR(bp, PXP2_REG_WR_TM_MPS, val);
++ REG_WR(bp, PXP2_REG_WR_SRC_MPS, val);
++ REG_WR(bp, PXP2_REG_WR_DBG_MPS, val);
++ REG_WR(bp, PXP2_REG_WR_DMAE_MPS, 2); /* DMAE is special */
++ REG_WR(bp, PXP2_REG_WR_CDU_MPS, val);
++ }
++}
++
++/*****************************************************************************
++ * Description:
++ * Calculates crc 8 on a word value: polynomial 0-1-2-8
++ * Code was translated from Verilog.
++ ****************************************************************************/
++static u8 calc_crc8(u32 data, u8 crc)
++{
++ u8 D[32];
++ u8 NewCRC[8];
++ u8 C[8];
++ u8 crc_res;
++ u8 i;
++
++ /* split the data into 31 bits */
++ for (i = 0; i < 32; i++) {
++ D[i] = data & 1;
++ data = data >> 1;
++ }
++
++ /* split the crc into 8 bits */
++ for (i = 0; i < 8; i++) {
++ C[i] = crc & 1;
++ crc = crc >> 1;
++ }
++
++ NewCRC[0] = D[31] ^ D[30] ^ D[28] ^ D[23] ^ D[21] ^ D[19] ^ D[18] ^
++ D[16] ^ D[14] ^ D[12] ^ D[8] ^ D[7] ^ D[6] ^ D[0] ^ C[4] ^
++ C[6] ^ C[7];
++ NewCRC[1] = D[30] ^ D[29] ^ D[28] ^ D[24] ^ D[23] ^ D[22] ^ D[21] ^
++ D[20] ^ D[18] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[13] ^
++ D[12] ^ D[9] ^ D[6] ^ D[1] ^ D[0] ^ C[0] ^ C[4] ^ C[5] ^ C[6];
++ NewCRC[2] = D[29] ^ D[28] ^ D[25] ^ D[24] ^ D[22] ^ D[17] ^ D[15] ^
++ D[13] ^ D[12] ^ D[10] ^ D[8] ^ D[6] ^ D[2] ^ D[1] ^ D[0] ^
++ C[0] ^ C[1] ^ C[4] ^ C[5];
++ NewCRC[3] = D[30] ^ D[29] ^ D[26] ^ D[25] ^ D[23] ^ D[18] ^ D[16] ^
++ D[14] ^ D[13] ^ D[11] ^ D[9] ^ D[7] ^ D[3] ^ D[2] ^ D[1] ^
++ C[1] ^ C[2] ^ C[5] ^ C[6];
++ NewCRC[4] = D[31] ^ D[30] ^ D[27] ^ D[26] ^ D[24] ^ D[19] ^ D[17] ^
++ D[15] ^ D[14] ^ D[12] ^ D[10] ^ D[8] ^ D[4] ^ D[3] ^ D[2] ^
++ C[0] ^ C[2] ^ C[3] ^ C[6] ^ C[7];
++ NewCRC[5] = D[31] ^ D[28] ^ D[27] ^ D[25] ^ D[20] ^ D[18] ^ D[16] ^
++ D[15] ^ D[13] ^ D[11] ^ D[9] ^ D[5] ^ D[4] ^ D[3] ^ C[1] ^
++ C[3] ^ C[4] ^ C[7];
++ NewCRC[6] = D[29] ^ D[28] ^ D[26] ^ D[21] ^ D[19] ^ D[17] ^ D[16] ^
++ D[14] ^ D[12] ^ D[10] ^ D[6] ^ D[5] ^ D[4] ^ C[2] ^ C[4] ^
++ C[5];
++ NewCRC[7] = D[30] ^ D[29] ^ D[27] ^ D[22] ^ D[20] ^ D[18] ^ D[17] ^
++ D[15] ^ D[13] ^ D[11] ^ D[7] ^ D[6] ^ D[5] ^ C[3] ^ C[5] ^
++ C[6];
++
++ crc_res = 0;
++ for (i = 0; i < 8; i++)
++ crc_res |= (NewCRC[i] << i);
++
++ return crc_res;
++}
++
++#endif /* BNX2X_INIT_OPS_H */
+diff -urpN linux-source-2.6.26.orig/drivers/net/bnx2x_main.c linux-source-2.6.26/drivers/net/bnx2x_main.c
+--- linux-source-2.6.26.orig/drivers/net/bnx2x_main.c 2009-07-24 16:42:24.000000000 -0600
++++ linux-source-2.6.26/drivers/net/bnx2x_main.c 2009-07-24 16:42:53.000000000 -0600
+@@ -57,11 +57,18 @@
+ #include "bnx2x_link.h"
+ #include "bnx2x.h"
+ #include "bnx2x_init.h"
++#include "bnx2x_init_ops.h"
+
+ #define DRV_MODULE_VERSION "1.45.26"
+ #define DRV_MODULE_RELDATE "2009/01/26"
+ #define BNX2X_BC_VER 0x040200
+
++#include <linux/firmware.h>
++#include "bnx2x_fw_file_hdr.h"
++/* FW files */
++#define FW_FILE_PREFIX_E1 "bnx2x-e1-"
++#define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
++
+ /* Time in jiffies before concluding the transmitter is hung */
+ #define TX_TIMEOUT (5*HZ)
+
+@@ -4904,13 +4911,15 @@ static void bnx2x_gunzip_end(struct bnx2
+ }
+ }
+
+-static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
++static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
+ {
+ int n, rc;
+
+ /* check gzip header */
+- if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
++ if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
++ BNX2X_ERR("Bad gzip header\n");
+ return -EINVAL;
++ }
+
+ n = 10;
+
+@@ -4919,7 +4928,7 @@ static int bnx2x_gunzip(struct bnx2x *bp
+ if (zbuf[3] & FNAME)
+ while ((zbuf[n++] != 0) && (n < len));
+
+- bp->strm->next_in = zbuf + n;
++ bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
+ bp->strm->avail_in = len - n;
+ bp->strm->next_out = bp->gunzip_buf;
+ bp->strm->avail_out = FW_BUF_SIZE;
+@@ -5041,8 +5050,8 @@ static int bnx2x_int_mem_test(struct bnx
+ msleep(50);
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
+ msleep(50);
+- bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
+- bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
++ bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
++ bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
+
+ DP(NETIF_MSG_HW, "part2\n");
+
+@@ -5106,8 +5115,8 @@ static int bnx2x_int_mem_test(struct bnx
+ msleep(50);
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
+ msleep(50);
+- bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
+- bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
++ bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
++ bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
+ #ifndef BCM_ISCSI
+ /* set NIC mode */
+ REG_WR(bp, PRS_REG_NIC_MODE, 1);
+@@ -5182,7 +5191,7 @@ static int bnx2x_init_common(struct bnx2
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
+
+- bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
++ bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
+ if (CHIP_IS_E1H(bp))
+ REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
+
+@@ -5190,14 +5199,14 @@ static int bnx2x_init_common(struct bnx2
+ msleep(30);
+ REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
+
+- bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
++ bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
+ if (CHIP_IS_E1(bp)) {
+ /* enable HW interrupt from PXP on USDM overflow
+ bit 16 on INT_MASK_0 */
+ REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
+ }
+
+- bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
++ bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
+ bnx2x_init_pxp(bp);
+
+ #ifdef __BIG_ENDIAN
+@@ -5241,39 +5250,39 @@ static int bnx2x_init_common(struct bnx2
+ REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
+ REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
+
+- bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
++ bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
+
+ /* clean the DMAE memory */
+ bp->dmae_ready = 1;
+ bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
+
+- bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
+- bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
+- bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
+- bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
++ bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
++ bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
++ bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
++ bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
+
+ bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
+ bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
+ bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
+ bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
+
+- bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
++ bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
+ /* soft reset pulse */
+ REG_WR(bp, QM_REG_SOFT_RESET, 1);
+ REG_WR(bp, QM_REG_SOFT_RESET, 0);
+
+ #ifdef BCM_ISCSI
+- bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
++ bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
+ #endif
+
+- bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
++ bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
+ REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
+ if (!CHIP_REV_IS_SLOW(bp)) {
+ /* enable hw interrupt from doorbell Q */
+ REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
+ }
+
+- bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
++ bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
+ if (CHIP_REV_IS_SLOW(bp)) {
+ /* fix for emulation and FPGA for no pause */
+ REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
+@@ -5282,17 +5291,17 @@ static int bnx2x_init_common(struct bnx2
+ REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
+ }
+
+- bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
++ bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
+ REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
+ /* set NIC mode */
+ REG_WR(bp, PRS_REG_NIC_MODE, 1);
+ if (CHIP_IS_E1H(bp))
+ REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
+
+- bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
+- bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
+- bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
+- bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
++ bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
++ bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
++ bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
++ bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
+
+ if (CHIP_IS_E1H(bp)) {
+ bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
+@@ -5326,10 +5335,10 @@ static int bnx2x_init_common(struct bnx2
+ STORM_INTMEM_SIZE_E1);
+ }
+
+- bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
+- bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
+- bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
+- bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
++ bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
++ bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
++ bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
++ bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
+
+ /* sync semi rtc */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+@@ -5337,16 +5346,16 @@ static int bnx2x_init_common(struct bnx2
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
+ 0x80000000);
+
+- bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
+- bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
+- bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
++ bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
++ bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
++ bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
+
+ REG_WR(bp, SRC_REG_SOFT_RST, 1);
+ for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
+ REG_WR(bp, i, 0xc0cac01a);
+ /* TODO: replace with something meaningful */
+ }
+- bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
++ bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
+ REG_WR(bp, SRC_REG_SOFT_RST, 0);
+
+ if (sizeof(union cdu_context) != 1024)
+@@ -5354,7 +5363,7 @@ static int bnx2x_init_common(struct bnx2
+ printk(KERN_ALERT PFX "please adjust the size of"
+ " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
+
+- bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
++ bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
+ val = (4 << 24) + (0 << 12) + 1024;
+ REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
+ if (CHIP_IS_E1(bp)) {
+@@ -5363,7 +5372,7 @@ static int bnx2x_init_common(struct bnx2
+ REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
+ }
+
+- bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
++ bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
+ REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
+ /* enable context validation interrupt from CFC */
+ REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
+@@ -5371,20 +5380,25 @@ static int bnx2x_init_common(struct bnx2
+ /* set the thresholds to prevent CFC/CDU race */
+ REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
+
+- bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
+- bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
++ bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
++ bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
+
+ /* PXPCS COMMON comes here */
++ bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
+ /* Reset PCIE errors for debug */
+ REG_WR(bp, 0x2814, 0xffffffff);
+ REG_WR(bp, 0x3820, 0xffffffff);
+
+ /* EMAC0 COMMON comes here */
++ bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
+ /* EMAC1 COMMON comes here */
++ bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
+ /* DBU COMMON comes here */
++ bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
+ /* DBG COMMON comes here */
++ bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
+
+- bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
++ bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
+ if (CHIP_IS_E1H(bp)) {
+ REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
+ REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
+@@ -5463,6 +5477,7 @@ static int bnx2x_init_common(struct bnx2
+ static int bnx2x_init_port(struct bnx2x *bp)
+ {
+ int port = BP_PORT(bp);
++ int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
+ u32 val;
+
+ DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
+@@ -5470,7 +5485,9 @@ static int bnx2x_init_port(struct bnx2x
+ REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
+
+ /* Port PXP comes here */
++ bnx2x_init_block(bp, PXP_BLOCK, init_stage);
+ /* Port PXP2 comes here */
++ bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
+ #ifdef BCM_ISCSI
+ /* Port0 1
+ * Port1 385 */
+@@ -5497,37 +5514,39 @@ static int bnx2x_init_port(struct bnx2x
+ REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
+ #endif
+ /* Port CMs come here */
+- bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
+- (port ? XCM_PORT1_END : XCM_PORT0_END));
++ bnx2x_init_block(bp, XCM_BLOCK, init_stage);
+
+ /* Port QM comes here */
+ #ifdef BCM_ISCSI
+ REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
+ REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
+
+- bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
+- func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
++ bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
+ #endif
+ /* Port DQ comes here */
++ bnx2x_init_block(bp, DQ_BLOCK, init_stage);
+ /* Port BRB1 comes here */
+ /* Port PRS comes here */
++ bnx2x_init_block(bp, PRS_BLOCK, init_stage);
+ /* Port TSDM comes here */
++ bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
+ /* Port CSDM comes here */
++ bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
+ /* Port USDM comes here */
++ bnx2x_init_block(bp, USDM_BLOCK, init_stage);
+ /* Port XSDM comes here */
+- bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
+- port ? TSEM_PORT1_END : TSEM_PORT0_END);
+- bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
+- port ? USEM_PORT1_END : USEM_PORT0_END);
+- bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
+- port ? CSEM_PORT1_END : CSEM_PORT0_END);
+- bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
+- port ? XSEM_PORT1_END : XSEM_PORT0_END);
++ bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
++
++ bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
++ bnx2x_init_block(bp, USEM_BLOCK, init_stage);
++ bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
++ bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
+ /* Port UPB comes here */
++ bnx2x_init_block(bp, UPB_BLOCK, init_stage);
+ /* Port XPB comes here */
++ bnx2x_init_block(bp, XPB_BLOCK, init_stage);
+
+- bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
+- port ? PBF_PORT1_END : PBF_PORT0_END);
++ bnx2x_init_block(bp, PBF_BLOCK, init_stage);
+
+ /* configure PBF to work without PAUSE mtu 9000 */
+ REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
+@@ -5557,18 +5576,17 @@ static int bnx2x_init_port(struct bnx2x
+ /* Port SRCH comes here */
+ #endif
+ /* Port CDU comes here */
++ bnx2x_init_block(bp, CDU_BLOCK, init_stage);
+ /* Port CFC comes here */
++ bnx2x_init_block(bp, CFC_BLOCK, init_stage);
+
+ if (CHIP_IS_E1(bp)) {
+ REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
+ REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
+ }
+- bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
+- port ? HC_PORT1_END : HC_PORT0_END);
++ bnx2x_init_block(bp, HC_BLOCK, init_stage);
+
+- bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
+- MISC_AEU_PORT0_START,
+- port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
++ bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
+ /* init aeu_mask_attn_func_0/1:
+ * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
+ * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
+@@ -5577,12 +5595,17 @@ static int bnx2x_init_port(struct bnx2x
+ (IS_E1HMF(bp) ? 0xF7 : 0x7));
+
+ /* Port PXPCS comes here */
++ bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
+ /* Port EMAC0 comes here */
++ bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
+ /* Port EMAC1 comes here */
++ bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
+ /* Port DBU comes here */
++ bnx2x_init_block(bp, DBU_BLOCK, init_stage);
+ /* Port DBG comes here */
+- bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
+- port ? NIG_PORT1_END : NIG_PORT0_END);
++ bnx2x_init_block(bp, DBG_BLOCK, init_stage);
++
++ bnx2x_init_block(bp, NIG_BLOCK, init_stage);
+
+ REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
+
+@@ -5606,7 +5629,9 @@ static int bnx2x_init_port(struct bnx2x
+ }
+
+ /* Port MCP comes here */
++ bnx2x_init_block(bp, MCP_BLOCK, init_stage);
+ /* Port DMAE comes here */
++ bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
+
+ switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
+ case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
+@@ -5674,7 +5699,7 @@ static int bnx2x_init_func(struct bnx2x
+ if (CHIP_IS_E1H(bp)) {
+ for (i = 0; i < 9; i++)
+ bnx2x_init_block(bp,
+- cm_start[func][i], cm_end[func][i]);
++ cm_blocks[i], FUNC0_STAGE + func);
+
+ REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
+ REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
+@@ -5687,7 +5712,7 @@ static int bnx2x_init_func(struct bnx2x
+ REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
+ REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
+ }
+- bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
++ bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
+
+ /* Reset PCIE errors for debug */
+ REG_WR(bp, 0x2114, 0xffffffff);
+@@ -10312,6 +10337,190 @@ static int __devinit bnx2x_get_pcie_spee
+ val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
+ return val;
+ }
++static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
++{
++ struct bnx2x_fw_file_hdr *fw_hdr;
++ struct bnx2x_fw_file_section *sections;
++ u16 *ops_offsets;
++ u32 offset, len, num_ops;
++ int i;
++ const struct firmware *firmware = bp->firmware;
++ const u8 * fw_ver;
++
++ if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
++ return -EINVAL;
++
++ fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
++ sections = (struct bnx2x_fw_file_section *)fw_hdr;
++
++ /* Make sure none of the offsets and sizes make us read beyond
++ * the end of the firmware data */
++ for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
++ offset = be32_to_cpu(sections[i].offset);
++ len = be32_to_cpu(sections[i].len);
++ if (offset + len > firmware->size) {
++ printk(KERN_ERR PFX "Section %d length is out of bounds\n", i);
++ return -EINVAL;
++ }
++ }
++
++ /* Likewise for the init_ops offsets */
++ offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
++ ops_offsets = (u16 *)(firmware->data + offset);
++ num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
++
++ for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
++ if (be16_to_cpu(ops_offsets[i]) > num_ops) {
++ printk(KERN_ERR PFX "Section offset %d is out of bounds\n", i);
++ return -EINVAL;
++ }
++ }
++
++ /* Check FW version */
++ offset = be32_to_cpu(fw_hdr->fw_version.offset);
++ fw_ver = firmware->data + offset;
++ if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
++ (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
++ (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
++ (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
++ printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
++ " Should be %d.%d.%d.%d\n",
++ fw_ver[0], fw_ver[1], fw_ver[2],
++ fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
++ BCM_5710_FW_MINOR_VERSION,
++ BCM_5710_FW_REVISION_VERSION,
++ BCM_5710_FW_ENGINEERING_VERSION);
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
++{
++ u32 i;
++ const __be32 *source = (const __be32*)_source;
++ u32 *target = (u32*)_target;
++
++ for (i = 0; i < n/4; i++)
++ target[i] = be32_to_cpu(source[i]);
++}
++
++/*
++ Ops array is stored in the following format:
++ {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
++ */
++static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
++{
++ u32 i, j, tmp;
++ const __be32 *source = (const __be32*)_source;
++ struct raw_op *target = (struct raw_op*)_target;
++
++ for (i = 0, j = 0; i < n/8; i++, j+=2) {
++ tmp = be32_to_cpu(source[j]);
++ target[i].op = (tmp >> 24) & 0xff;
++ target[i].offset = tmp & 0xffffff;
++ target[i].raw_data = be32_to_cpu(source[j+1]);
++ }
++}
++static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
++{
++ u32 i;
++ u16 *target = (u16*)_target;
++ const __be16 *source = (const __be16*)_source;
++
++ for (i = 0; i < n/2; i++)
++ target[i] = be16_to_cpu(source[i]);
++}
++
++#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
++ do { \
++ u32 len = be32_to_cpu(fw_hdr->arr.len); \
++ bp->arr = kmalloc(len, GFP_KERNEL); \
++ if (!bp->arr) { \
++ printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
++ goto lbl; \
++ } \
++ func(bp->firmware->data + \
++ be32_to_cpu(fw_hdr->arr.offset), \
++ (u8*)bp->arr, len); \
++ } while (0)
++
++
++static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
++{
++ char fw_file_name[40] = {0};
++ int rc, offset;
++ struct bnx2x_fw_file_hdr *fw_hdr;
++
++ /* Create a FW file name */
++ if (CHIP_IS_E1(bp))
++ offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
++ else
++ offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
++
++ sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
++ BCM_5710_FW_MAJOR_VERSION,
++ BCM_5710_FW_MINOR_VERSION,
++ BCM_5710_FW_REVISION_VERSION,
++ BCM_5710_FW_ENGINEERING_VERSION);
++
++ printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
++
++ rc = request_firmware(&bp->firmware, fw_file_name, dev);
++ if (rc) {
++ printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
++ goto request_firmware_exit;
++ }
++
++ rc = bnx2x_check_firmware(bp);
++ if (rc) {
++ printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
++ goto request_firmware_exit;
++ }
++
++ fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
++
++ /* Initialize the pointers to the init arrays */
++ /* Blob */
++ BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
++
++ /* Opcodes */
++ BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
++
++ /* Offsets */
++ BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
++
++ /* STORMs firmware */
++ bp->tsem_int_table_data = bp->firmware->data +
++ be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
++ bp->tsem_pram_data = bp->firmware->data +
++ be32_to_cpu(fw_hdr->tsem_pram_data.offset);
++ bp->usem_int_table_data = bp->firmware->data +
++ be32_to_cpu(fw_hdr->usem_int_table_data.offset);
++ bp->usem_pram_data = bp->firmware->data +
++ be32_to_cpu(fw_hdr->usem_pram_data.offset);
++ bp->xsem_int_table_data = bp->firmware->data +
++ be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
++ bp->xsem_pram_data = bp->firmware->data +
++ be32_to_cpu(fw_hdr->xsem_pram_data.offset);
++ bp->csem_int_table_data = bp->firmware->data +
++ be32_to_cpu(fw_hdr->csem_int_table_data.offset);
++ bp->csem_pram_data = bp->firmware->data +
++ be32_to_cpu(fw_hdr->csem_pram_data.offset);
++
++ return 0;
++init_offsets_alloc_err:
++ kfree(bp->init_ops);
++init_ops_alloc_err:
++ kfree(bp->init_data);
++request_firmware_exit:
++ release_firmware(bp->firmware);
++
++ return rc;
++}
++
++
+
+ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+@@ -10346,6 +10555,13 @@ static int __devinit bnx2x_init_one(stru
+ if (rc)
+ goto init_one_exit;
+
++ /* Set init arrays */
++ rc = bnx2x_init_firmware(bp, &pdev->dev);
++ if (rc) {
++ printk(KERN_ERR PFX "Error loading firmware\n");
++ goto init_one_exit;
++ }
++
+ rc = register_netdev(dev);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot register net device\n");
+@@ -10393,6 +10609,11 @@ static void __devexit bnx2x_remove_one(s
+
+ unregister_netdev(dev);
+
++ kfree(bp->init_ops_offsets);
++ kfree(bp->init_ops);
++ kfree(bp->init_data);
++ release_firmware(bp->firmware);
++
+ if (bp->regview)
+ iounmap(bp->regview);
+
+@@ -10659,3 +10880,4 @@ static void __exit bnx2x_cleanup(void)
+ module_init(bnx2x_init);
+ module_exit(bnx2x_cleanup);
+
++
+diff -urpN linux-source-2.6.26.orig/drivers/net/Kconfig linux-source-2.6.26/drivers/net/Kconfig
+--- linux-source-2.6.26.orig/drivers/net/Kconfig 2009-07-24 16:42:24.000000000 -0600
++++ linux-source-2.6.26/drivers/net/Kconfig 2009-07-24 16:42:53.000000000 -0600
+@@ -2587,6 +2587,7 @@ config TEHUTI
+ config BNX2X
+ tristate "Broadcom NetXtremeII 10Gb support"
+ depends on PCI
++ select FW_LOADER
+ select ZLIB_INFLATE
+ select LIBCRC32C
+ help
Copied: dists/lenny/linux-2.6/debian/patches/features/all/bnx2x-update.patch (from r14057, people/dannf/lenny-bnx2x/debian/patches/features/all/bnx2x-update.patch)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/lenny/linux-2.6/debian/patches/features/all/bnx2x-update.patch Fri Jul 31 06:10:25 2009 (r14058, copy of r14057, people/dannf/lenny-bnx2x/debian/patches/features/all/bnx2x-update.patch)
@@ -0,0 +1,33861 @@
+diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
+index f4182cf..0c58943 100644
+--- a/drivers/net/Kconfig
++++ b/drivers/net/Kconfig
+@@ -2574,6 +2574,7 @@ config BNX2X
+ tristate "Broadcom NetXtremeII 10Gb support"
+ depends on PCI
+ select ZLIB_INFLATE
++ select LIBCRC32C
+ help
+ This driver supports Broadcom NetXtremeII 10 gigabit Ethernet cards.
+ To compile this driver as a module, choose M here: the module
+diff --git a/drivers/net/Makefile b/drivers/net/Makefile
+index dcbfe84..29c6697 100644
+--- a/drivers/net/Makefile
++++ b/drivers/net/Makefile
+@@ -67,6 +67,7 @@ obj-$(CONFIG_FEALNX) += fealnx.o
+ obj-$(CONFIG_TIGON3) += tg3.o
+ obj-$(CONFIG_BNX2) += bnx2.o
+ obj-$(CONFIG_BNX2X) += bnx2x.o
++bnx2x-objs := bnx2x_main.o bnx2x_link.o
+ spidernet-y += spider_net.o spider_net_ethtool.o
+ obj-$(CONFIG_SPIDER_NET) += spidernet.o sungem_phy.o
+ obj-$(CONFIG_GELIC_NET) += ps3_gelic.o
+diff --git a/drivers/net/bnx2x.c b/drivers/net/bnx2x.c
+deleted file mode 100644
+index 70cba64..0000000
+--- a/drivers/net/bnx2x.c
++++ /dev/null
+@@ -1,9988 +0,0 @@
+-/* bnx2x.c: Broadcom Everest network driver.
+- *
+- * Copyright (c) 2007-2008 Broadcom Corporation
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation.
+- *
+- * Maintained by: Eilon Greenstein <eilong at broadcom.com>
+- * Written by: Eliezer Tamir
+- * Based on code from Michael Chan's bnx2 driver
+- * UDP CSUM errata workaround by Arik Gendelman
+- * Slowpath rework by Vladislav Zolotarov
+- * Statistics and Link management by Yitchak Gertner
+- *
+- */
+-
+-/* define this to make the driver freeze on error
+- * to allow getting debug info
+- * (you will need to reboot afterwards)
+- */
+-/*#define BNX2X_STOP_ON_ERROR*/
+-
+-#include <linux/module.h>
+-#include <linux/moduleparam.h>
+-#include <linux/kernel.h>
+-#include <linux/device.h> /* for dev_info() */
+-#include <linux/timer.h>
+-#include <linux/errno.h>
+-#include <linux/ioport.h>
+-#include <linux/slab.h>
+-#include <linux/vmalloc.h>
+-#include <linux/interrupt.h>
+-#include <linux/pci.h>
+-#include <linux/init.h>
+-#include <linux/netdevice.h>
+-#include <linux/etherdevice.h>
+-#include <linux/skbuff.h>
+-#include <linux/dma-mapping.h>
+-#include <linux/bitops.h>
+-#include <linux/irq.h>
+-#include <linux/delay.h>
+-#include <asm/byteorder.h>
+-#include <linux/time.h>
+-#include <linux/ethtool.h>
+-#include <linux/mii.h>
+-#ifdef NETIF_F_HW_VLAN_TX
+- #include <linux/if_vlan.h>
+- #define BCM_VLAN 1
+-#endif
+-#include <net/ip.h>
+-#include <net/tcp.h>
+-#include <net/checksum.h>
+-#include <linux/workqueue.h>
+-#include <linux/crc32.h>
+-#include <linux/prefetch.h>
+-#include <linux/zlib.h>
+-#include <linux/version.h>
+-#include <linux/io.h>
+-
+-#include "bnx2x_reg.h"
+-#include "bnx2x_fw_defs.h"
+-#include "bnx2x_hsi.h"
+-#include "bnx2x.h"
+-#include "bnx2x_init.h"
+-
+-#define DRV_MODULE_VERSION "1.42.4"
+-#define DRV_MODULE_RELDATE "2008/4/9"
+-#define BNX2X_BC_VER 0x040200
+-
+-/* Time in jiffies before concluding the transmitter is hung. */
+-#define TX_TIMEOUT (5*HZ)
+-
+-static char version[] __devinitdata =
+- "Broadcom NetXtreme II 5771X 10Gigabit Ethernet Driver "
+- DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+-
+-MODULE_AUTHOR("Eliezer Tamir");
+-MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
+-MODULE_LICENSE("GPL");
+-MODULE_VERSION(DRV_MODULE_VERSION);
+-
+-static int use_inta;
+-static int poll;
+-static int onefunc;
+-static int nomcp;
+-static int debug;
+-static int use_multi;
+-
+-module_param(use_inta, int, 0);
+-module_param(poll, int, 0);
+-module_param(onefunc, int, 0);
+-module_param(debug, int, 0);
+-MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
+-MODULE_PARM_DESC(poll, "use polling (for debug)");
+-MODULE_PARM_DESC(onefunc, "enable only first function");
+-MODULE_PARM_DESC(nomcp, "ignore management CPU (Implies onefunc)");
+-MODULE_PARM_DESC(debug, "default debug msglevel");
+-
+-#ifdef BNX2X_MULTI
+-module_param(use_multi, int, 0);
+-MODULE_PARM_DESC(use_multi, "use per-CPU queues");
+-#endif
+-
+-enum bnx2x_board_type {
+- BCM57710 = 0,
+-};
+-
+-/* indexed by board_t, above */
+-static struct {
+- char *name;
+-} board_info[] __devinitdata = {
+- { "Broadcom NetXtreme II BCM57710 XGb" }
+-};
+-
+-static const struct pci_device_id bnx2x_pci_tbl[] = {
+- { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
+- PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
+- { 0 }
+-};
+-
+-MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
+-
+-/****************************************************************************
+-* General service functions
+-****************************************************************************/
+-
+-/* used only at init
+- * locking is done by mcp
+- */
+-static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
+-{
+- pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
+- pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
+- pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
+- PCICFG_VENDOR_ID_OFFSET);
+-}
+-
+-#ifdef BNX2X_IND_RD
+-static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
+-{
+- u32 val;
+-
+- pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
+- pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
+- pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
+- PCICFG_VENDOR_ID_OFFSET);
+-
+- return val;
+-}
+-#endif
+-
+-static const u32 dmae_reg_go_c[] = {
+- DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
+- DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
+- DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
+- DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
+-};
+-
+-/* copy command into DMAE command memory and set DMAE command go */
+-static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
+- int idx)
+-{
+- u32 cmd_offset;
+- int i;
+-
+- cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
+- for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
+- REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
+-
+-/* DP(NETIF_MSG_DMAE, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
+- idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i)); */
+- }
+- REG_WR(bp, dmae_reg_go_c[idx], 1);
+-}
+-
+-static void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr,
+- u32 dst_addr, u32 len32)
+-{
+- struct dmae_command *dmae = &bp->dmae;
+- int port = bp->port;
+- u32 *wb_comp = bnx2x_sp(bp, wb_comp);
+- int timeout = 200;
+-
+- memset(dmae, 0, sizeof(struct dmae_command));
+-
+- dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
+- DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
+- DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
+-#ifdef __BIG_ENDIAN
+- DMAE_CMD_ENDIANITY_B_DW_SWAP |
+-#else
+- DMAE_CMD_ENDIANITY_DW_SWAP |
+-#endif
+- (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
+- dmae->src_addr_lo = U64_LO(dma_addr);
+- dmae->src_addr_hi = U64_HI(dma_addr);
+- dmae->dst_addr_lo = dst_addr >> 2;
+- dmae->dst_addr_hi = 0;
+- dmae->len = len32;
+- dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
+- dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
+- dmae->comp_val = BNX2X_WB_COMP_VAL;
+-
+-/*
+- DP(NETIF_MSG_DMAE, "dmae: opcode 0x%08x\n"
+- DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
+- "dst_addr [%x:%08x (%08x)]\n"
+- DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
+- dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
+- dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
+- dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
+-*/
+-/*
+- DP(NETIF_MSG_DMAE, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
+- bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
+- bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
+-*/
+-
+- *wb_comp = 0;
+-
+- bnx2x_post_dmae(bp, dmae, port * 8);
+-
+- udelay(5);
+- /* adjust timeout for emulation/FPGA */
+- if (CHIP_REV_IS_SLOW(bp))
+- timeout *= 100;
+- while (*wb_comp != BNX2X_WB_COMP_VAL) {
+-/* DP(NETIF_MSG_DMAE, "wb_comp 0x%08x\n", *wb_comp); */
+- udelay(5);
+- if (!timeout) {
+- BNX2X_ERR("dmae timeout!\n");
+- break;
+- }
+- timeout--;
+- }
+-}
+-
+-#ifdef BNX2X_DMAE_RD
+-static void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
+-{
+- struct dmae_command *dmae = &bp->dmae;
+- int port = bp->port;
+- u32 *wb_comp = bnx2x_sp(bp, wb_comp);
+- int timeout = 200;
+-
+- memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
+- memset(dmae, 0, sizeof(struct dmae_command));
+-
+- dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
+- DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
+- DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
+-#ifdef __BIG_ENDIAN
+- DMAE_CMD_ENDIANITY_B_DW_SWAP |
+-#else
+- DMAE_CMD_ENDIANITY_DW_SWAP |
+-#endif
+- (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
+- dmae->src_addr_lo = src_addr >> 2;
+- dmae->src_addr_hi = 0;
+- dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
+- dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
+- dmae->len = len32;
+- dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
+- dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
+- dmae->comp_val = BNX2X_WB_COMP_VAL;
+-
+-/*
+- DP(NETIF_MSG_DMAE, "dmae: opcode 0x%08x\n"
+- DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
+- "dst_addr [%x:%08x (%08x)]\n"
+- DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
+- dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
+- dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
+- dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
+-*/
+-
+- *wb_comp = 0;
+-
+- bnx2x_post_dmae(bp, dmae, port * 8);
+-
+- udelay(5);
+- while (*wb_comp != BNX2X_WB_COMP_VAL) {
+- udelay(5);
+- if (!timeout) {
+- BNX2X_ERR("dmae timeout!\n");
+- break;
+- }
+- timeout--;
+- }
+-/*
+- DP(NETIF_MSG_DMAE, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
+- bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
+- bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
+-*/
+-}
+-#endif
+-
+-static int bnx2x_mc_assert(struct bnx2x *bp)
+-{
+- int i, j, rc = 0;
+- char last_idx;
+- const char storm[] = {"XTCU"};
+- const u32 intmem_base[] = {
+- BAR_XSTRORM_INTMEM,
+- BAR_TSTRORM_INTMEM,
+- BAR_CSTRORM_INTMEM,
+- BAR_USTRORM_INTMEM
+- };
+-
+- /* Go through all instances of all SEMIs */
+- for (i = 0; i < 4; i++) {
+- last_idx = REG_RD8(bp, XSTORM_ASSERT_LIST_INDEX_OFFSET +
+- intmem_base[i]);
+- if (last_idx)
+- BNX2X_LOG("DATA %cSTORM_ASSERT_LIST_INDEX 0x%x\n",
+- storm[i], last_idx);
+-
+- /* print the asserts */
+- for (j = 0; j < STROM_ASSERT_ARRAY_SIZE; j++) {
+- u32 row0, row1, row2, row3;
+-
+- row0 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) +
+- intmem_base[i]);
+- row1 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 4 +
+- intmem_base[i]);
+- row2 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 8 +
+- intmem_base[i]);
+- row3 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 12 +
+- intmem_base[i]);
+-
+- if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
+- BNX2X_LOG("DATA %cSTORM_ASSERT_INDEX 0x%x ="
+- " 0x%08x 0x%08x 0x%08x 0x%08x\n",
+- storm[i], j, row3, row2, row1, row0);
+- rc++;
+- } else {
+- break;
+- }
+- }
+- }
+- return rc;
+-}
+-
+-static void bnx2x_fw_dump(struct bnx2x *bp)
+-{
+- u32 mark, offset;
+- u32 data[9];
+- int word;
+-
+- mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
+- mark = ((mark + 0x3) & ~0x3);
+- printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
+-
+- for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
+- for (word = 0; word < 8; word++)
+- data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
+- offset + 4*word));
+- data[8] = 0x0;
+- printk(KERN_CONT "%s", (char *)data);
+- }
+- for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
+- for (word = 0; word < 8; word++)
+- data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
+- offset + 4*word));
+- data[8] = 0x0;
+- printk(KERN_CONT "%s", (char *)data);
+- }
+- printk("\n" KERN_ERR PFX "end of fw dump\n");
+-}
+-
+-static void bnx2x_panic_dump(struct bnx2x *bp)
+-{
+- int i;
+- u16 j, start, end;
+-
+- BNX2X_ERR("begin crash dump -----------------\n");
+-
+- for_each_queue(bp, i) {
+- struct bnx2x_fastpath *fp = &bp->fp[i];
+- struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
+-
+- BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
+- " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)"
+- " *rx_cons_sb(%x) rx_comp_prod(%x)"
+- " rx_comp_cons(%x) fp_c_idx(%x) fp_u_idx(%x)"
+- " bd data(%x,%x)\n",
+- i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
+- fp->tx_bd_cons, *fp->tx_cons_sb, *fp->rx_cons_sb,
+- fp->rx_comp_prod, fp->rx_comp_cons, fp->fp_c_idx,
+- fp->fp_u_idx, hw_prods->packets_prod,
+- hw_prods->bds_prod);
+-
+- start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
+- end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
+- for (j = start; j < end; j++) {
+- struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
+-
+- BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
+- sw_bd->skb, sw_bd->first_bd);
+- }
+-
+- start = TX_BD(fp->tx_bd_cons - 10);
+- end = TX_BD(fp->tx_bd_cons + 254);
+- for (j = start; j < end; j++) {
+- u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
+-
+- BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
+- j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
+- }
+-
+- start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
+- end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
+- for (j = start; j < end; j++) {
+- u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
+- struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
+-
+- BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
+- j, rx_bd[0], rx_bd[1], sw_bd->skb);
+- }
+-
+- start = RCQ_BD(fp->rx_comp_cons - 10);
+- end = RCQ_BD(fp->rx_comp_cons + 503);
+- for (j = start; j < end; j++) {
+- u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
+-
+- BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
+- j, cqe[0], cqe[1], cqe[2], cqe[3]);
+- }
+- }
+-
+- BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
+- " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
+- " spq_prod_idx(%u)\n",
+- bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
+- bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
+-
+-
+- bnx2x_mc_assert(bp);
+- BNX2X_ERR("end crash dump -----------------\n");
+-
+- bp->stats_state = STATS_STATE_DISABLE;
+- DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
+-}
+-
+-static void bnx2x_int_enable(struct bnx2x *bp)
+-{
+- int port = bp->port;
+- u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
+- u32 val = REG_RD(bp, addr);
+- int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
+-
+- if (msix) {
+- val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
+- val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+- HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+- } else {
+- val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+- HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+- HC_CONFIG_0_REG_INT_LINE_EN_0 |
+- HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+-
+- /* Errata A0.158 workaround */
+- DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
+- val, port, addr, msix);
+-
+- REG_WR(bp, addr, val);
+-
+- val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
+- }
+-
+- DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
+- val, port, addr, msix);
+-
+- REG_WR(bp, addr, val);
+-}
+-
+-static void bnx2x_int_disable(struct bnx2x *bp)
+-{
+- int port = bp->port;
+- u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
+- u32 val = REG_RD(bp, addr);
+-
+- val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+- HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+- HC_CONFIG_0_REG_INT_LINE_EN_0 |
+- HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+-
+- DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
+- val, port, addr);
+-
+- REG_WR(bp, addr, val);
+- if (REG_RD(bp, addr) != val)
+- BNX2X_ERR("BUG! proper val not read from IGU!\n");
+-}
+-
+-static void bnx2x_int_disable_sync(struct bnx2x *bp)
+-{
+-
+- int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
+- int i;
+-
+- atomic_inc(&bp->intr_sem);
+- /* prevent the HW from sending interrupts */
+- bnx2x_int_disable(bp);
+-
+- /* make sure all ISRs are done */
+- if (msix) {
+- for_each_queue(bp, i)
+- synchronize_irq(bp->msix_table[i].vector);
+-
+- /* one more for the Slow Path IRQ */
+- synchronize_irq(bp->msix_table[i].vector);
+- } else
+- synchronize_irq(bp->pdev->irq);
+-
+- /* make sure sp_task is not running */
+- cancel_work_sync(&bp->sp_task);
+-
+-}
+-
+-/* fast path code */
+-
+-/*
+- * general service functions
+- */
+-
+-static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 id,
+- u8 storm, u16 index, u8 op, u8 update)
+-{
+- u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_PORT_BASE * bp->port) * 8;
+- struct igu_ack_register igu_ack;
+-
+- igu_ack.status_block_index = index;
+- igu_ack.sb_id_and_flags =
+- ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
+- (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
+- (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
+- (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
+-
+-/* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
+- (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr); */
+- REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack));
+-}
+-
+-static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
+-{
+- struct host_status_block *fpsb = fp->status_blk;
+- u16 rc = 0;
+-
+- barrier(); /* status block is written to by the chip */
+- if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
+- fp->fp_c_idx = fpsb->c_status_block.status_block_index;
+- rc |= 1;
+- }
+- if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
+- fp->fp_u_idx = fpsb->u_status_block.status_block_index;
+- rc |= 2;
+- }
+- return rc;
+-}
+-
+-static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
+-{
+- u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
+-
+- if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
+- rx_cons_sb++;
+-
+- if ((rx_cons_sb != fp->rx_comp_cons) ||
+- (le16_to_cpu(*fp->tx_cons_sb) != fp->tx_pkt_cons))
+- return 1;
+-
+- return 0;
+-}
+-
+-static u16 bnx2x_ack_int(struct bnx2x *bp)
+-{
+- u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_PORT_BASE * bp->port) * 8;
+- u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr);
+-
+-/* DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n",
+- result, BAR_IGU_INTMEM + igu_addr); */
+-
+-#ifdef IGU_DEBUG
+-#warning IGU_DEBUG active
+- if (result == 0) {
+- BNX2X_ERR("read %x from IGU\n", result);
+- REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
+- }
+-#endif
+- return result;
+-}
+-
+-
+-/*
+- * fast path service functions
+- */
+-
+-/* free skb in the packet ring at pos idx
+- * return idx of last bd freed
+- */
+-static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+- u16 idx)
+-{
+- struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
+- struct eth_tx_bd *tx_bd;
+- struct sk_buff *skb = tx_buf->skb;
+- u16 bd_idx = tx_buf->first_bd;
+- int nbd;
+-
+- DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
+- idx, tx_buf, skb);
+-
+- /* unmap first bd */
+- DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
+- tx_bd = &fp->tx_desc_ring[bd_idx];
+- pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
+- BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
+-
+- nbd = le16_to_cpu(tx_bd->nbd) - 1;
+-#ifdef BNX2X_STOP_ON_ERROR
+- if (nbd > (MAX_SKB_FRAGS + 2)) {
+- BNX2X_ERR("bad nbd!\n");
+- bnx2x_panic();
+- }
+-#endif
+-
+- /* Skip a parse bd and the TSO split header bd
+- since they have no mapping */
+- if (nbd)
+- bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+-
+- if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
+- ETH_TX_BD_FLAGS_TCP_CSUM |
+- ETH_TX_BD_FLAGS_SW_LSO)) {
+- if (--nbd)
+- bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+- tx_bd = &fp->tx_desc_ring[bd_idx];
+- /* is this a TSO split header bd? */
+- if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
+- if (--nbd)
+- bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+- }
+- }
+-
+- /* now free frags */
+- while (nbd > 0) {
+-
+- DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
+- tx_bd = &fp->tx_desc_ring[bd_idx];
+- pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
+- BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
+- if (--nbd)
+- bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+- }
+-
+- /* release skb */
+- BUG_TRAP(skb);
+- dev_kfree_skb(skb);
+- tx_buf->first_bd = 0;
+- tx_buf->skb = NULL;
+-
+- return bd_idx;
+-}
+-
+-static inline u32 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
+-{
+- u16 used;
+- u32 prod;
+- u32 cons;
+-
+- /* Tell compiler that prod and cons can change */
+- barrier();
+- prod = fp->tx_bd_prod;
+- cons = fp->tx_bd_cons;
+-
+- used = (NUM_TX_BD - NUM_TX_RINGS + prod - cons +
+- (cons / TX_DESC_CNT) - (prod / TX_DESC_CNT));
+-
+- if (prod >= cons) {
+- /* used = prod - cons - prod/size + cons/size */
+- used -= NUM_TX_BD - NUM_TX_RINGS;
+- }
+-
+- BUG_TRAP(used <= fp->bp->tx_ring_size);
+- BUG_TRAP((fp->bp->tx_ring_size - used) <= MAX_TX_AVAIL);
+-
+- return (fp->bp->tx_ring_size - used);
+-}
+-
+-static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
+-{
+- struct bnx2x *bp = fp->bp;
+- u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
+- int done = 0;
+-
+-#ifdef BNX2X_STOP_ON_ERROR
+- if (unlikely(bp->panic))
+- return;
+-#endif
+-
+- hw_cons = le16_to_cpu(*fp->tx_cons_sb);
+- sw_cons = fp->tx_pkt_cons;
+-
+- while (sw_cons != hw_cons) {
+- u16 pkt_cons;
+-
+- pkt_cons = TX_BD(sw_cons);
+-
+- /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
+-
+- DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %d\n",
+- hw_cons, sw_cons, pkt_cons);
+-
+-/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
+- rmb();
+- prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
+- }
+-*/
+- bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
+- sw_cons++;
+- done++;
+-
+- if (done == work)
+- break;
+- }
+-
+- fp->tx_pkt_cons = sw_cons;
+- fp->tx_bd_cons = bd_cons;
+-
+- /* Need to make the tx_cons update visible to start_xmit()
+- * before checking for netif_queue_stopped(). Without the
+- * memory barrier, there is a small possibility that start_xmit()
+- * will miss it and cause the queue to be stopped forever.
+- */
+- smp_mb();
+-
+- /* TBD need a thresh? */
+- if (unlikely(netif_queue_stopped(bp->dev))) {
+-
+- netif_tx_lock(bp->dev);
+-
+- if (netif_queue_stopped(bp->dev) &&
+- (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
+- netif_wake_queue(bp->dev);
+-
+- netif_tx_unlock(bp->dev);
+-
+- }
+-}
+-
+-static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
+- union eth_rx_cqe *rr_cqe)
+-{
+- struct bnx2x *bp = fp->bp;
+- int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
+- int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
+-
+- DP(NETIF_MSG_RX_STATUS,
+- "fp %d cid %d got ramrod #%d state is %x type is %d\n",
+- fp->index, cid, command, bp->state, rr_cqe->ramrod_cqe.type);
+-
+- bp->spq_left++;
+-
+- if (fp->index) {
+- switch (command | fp->state) {
+- case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
+- BNX2X_FP_STATE_OPENING):
+- DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
+- cid);
+- fp->state = BNX2X_FP_STATE_OPEN;
+- break;
+-
+- case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
+- DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
+- cid);
+- fp->state = BNX2X_FP_STATE_HALTED;
+- break;
+-
+- default:
+- BNX2X_ERR("unexpected MC reply(%d) state is %x\n",
+- command, fp->state);
+- }
+- mb(); /* force bnx2x_wait_ramrod to see the change */
+- return;
+- }
+-
+- switch (command | bp->state) {
+- case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
+- DP(NETIF_MSG_IFUP, "got setup ramrod\n");
+- bp->state = BNX2X_STATE_OPEN;
+- break;
+-
+- case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
+- DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
+- bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
+- fp->state = BNX2X_FP_STATE_HALTED;
+- break;
+-
+- case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
+- DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n",
+- cid);
+- bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
+- break;
+-
+- case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
+- DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
+- break;
+-
+- case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
+- DP(NETIF_MSG_IFUP, "got (un)set mac ramrod\n");
+- break;
+-
+- default:
+- BNX2X_ERR("unexpected ramrod (%d) state is %x\n",
+- command, bp->state);
+- }
+-
+- mb(); /* force bnx2x_wait_ramrod to see the change */
+-}
+-
+-static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
+- struct bnx2x_fastpath *fp, u16 index)
+-{
+- struct sk_buff *skb;
+- struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
+- struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
+- dma_addr_t mapping;
+-
+- skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
+- if (unlikely(skb == NULL))
+- return -ENOMEM;
+-
+- mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
+- PCI_DMA_FROMDEVICE);
+- if (unlikely(dma_mapping_error(mapping))) {
+-
+- dev_kfree_skb(skb);
+- return -ENOMEM;
+- }
+-
+- rx_buf->skb = skb;
+- pci_unmap_addr_set(rx_buf, mapping, mapping);
+-
+- rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+- rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+-
+- return 0;
+-}
+-
+-/* note that we are not allocating a new skb,
+- * we are just moving one from cons to prod
+- * we are not creating a new mapping,
+- * so there is no need to check for dma_mapping_error().
+- */
+-static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
+- struct sk_buff *skb, u16 cons, u16 prod)
+-{
+- struct bnx2x *bp = fp->bp;
+- struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
+- struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
+- struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
+- struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
+-
+- pci_dma_sync_single_for_device(bp->pdev,
+- pci_unmap_addr(cons_rx_buf, mapping),
+- bp->rx_offset + RX_COPY_THRESH,
+- PCI_DMA_FROMDEVICE);
+-
+- prod_rx_buf->skb = cons_rx_buf->skb;
+- pci_unmap_addr_set(prod_rx_buf, mapping,
+- pci_unmap_addr(cons_rx_buf, mapping));
+- *prod_bd = *cons_bd;
+-}
+-
+-static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
+-{
+- struct bnx2x *bp = fp->bp;
+- u16 bd_cons, bd_prod, comp_ring_cons;
+- u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
+- int rx_pkt = 0;
+-
+-#ifdef BNX2X_STOP_ON_ERROR
+- if (unlikely(bp->panic))
+- return 0;
+-#endif
+-
+- hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
+- if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
+- hw_comp_cons++;
+-
+- bd_cons = fp->rx_bd_cons;
+- bd_prod = fp->rx_bd_prod;
+- sw_comp_cons = fp->rx_comp_cons;
+- sw_comp_prod = fp->rx_comp_prod;
+-
+- /* Memory barrier necessary as speculative reads of the rx
+- * buffer can be ahead of the index in the status block
+- */
+- rmb();
+-
+- DP(NETIF_MSG_RX_STATUS,
+- "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
+- fp->index, hw_comp_cons, sw_comp_cons);
+-
+- while (sw_comp_cons != hw_comp_cons) {
+- unsigned int len, pad;
+- struct sw_rx_bd *rx_buf;
+- struct sk_buff *skb;
+- union eth_rx_cqe *cqe;
+-
+- comp_ring_cons = RCQ_BD(sw_comp_cons);
+- bd_prod = RX_BD(bd_prod);
+- bd_cons = RX_BD(bd_cons);
+-
+- cqe = &fp->rx_comp_ring[comp_ring_cons];
+-
+- DP(NETIF_MSG_RX_STATUS, "hw_comp_cons %u sw_comp_cons %u"
+- " comp_ring (%u) bd_ring (%u,%u)\n",
+- hw_comp_cons, sw_comp_cons,
+- comp_ring_cons, bd_prod, bd_cons);
+- DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
+- " queue %x vlan %x len %x\n",
+- cqe->fast_path_cqe.type,
+- cqe->fast_path_cqe.error_type_flags,
+- cqe->fast_path_cqe.status_flags,
+- cqe->fast_path_cqe.rss_hash_result,
+- cqe->fast_path_cqe.vlan_tag, cqe->fast_path_cqe.pkt_len);
+-
+- /* is this a slowpath msg? */
+- if (unlikely(cqe->fast_path_cqe.type)) {
+- bnx2x_sp_event(fp, cqe);
+- goto next_cqe;
+-
+- /* this is an rx packet */
+- } else {
+- rx_buf = &fp->rx_buf_ring[bd_cons];
+- skb = rx_buf->skb;
+-
+- len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
+- pad = cqe->fast_path_cqe.placement_offset;
+-
+- pci_dma_sync_single_for_device(bp->pdev,
+- pci_unmap_addr(rx_buf, mapping),
+- pad + RX_COPY_THRESH,
+- PCI_DMA_FROMDEVICE);
+- prefetch(skb);
+- prefetch(((char *)(skb)) + 128);
+-
+- /* is this an error packet? */
+- if (unlikely(cqe->fast_path_cqe.error_type_flags &
+- ETH_RX_ERROR_FALGS)) {
+- /* do we sometimes forward error packets anyway? */
+- DP(NETIF_MSG_RX_ERR,
+- "ERROR flags(%u) Rx packet(%u)\n",
+- cqe->fast_path_cqe.error_type_flags,
+- sw_comp_cons);
+- /* TBD make sure MC counts this as a drop */
+- goto reuse_rx;
+- }
+-
+- /* Since we don't have a jumbo ring
+- * copy small packets if mtu > 1500
+- */
+- if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
+- (len <= RX_COPY_THRESH)) {
+- struct sk_buff *new_skb;
+-
+- new_skb = netdev_alloc_skb(bp->dev,
+- len + pad);
+- if (new_skb == NULL) {
+- DP(NETIF_MSG_RX_ERR,
+- "ERROR packet dropped "
+- "because of alloc failure\n");
+- /* TBD count this as a drop? */
+- goto reuse_rx;
+- }
+-
+- /* aligned copy */
+- skb_copy_from_linear_data_offset(skb, pad,
+- new_skb->data + pad, len);
+- skb_reserve(new_skb, pad);
+- skb_put(new_skb, len);
+-
+- bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
+-
+- skb = new_skb;
+-
+- } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
+- pci_unmap_single(bp->pdev,
+- pci_unmap_addr(rx_buf, mapping),
+- bp->rx_buf_use_size,
+- PCI_DMA_FROMDEVICE);
+- skb_reserve(skb, pad);
+- skb_put(skb, len);
+-
+- } else {
+- DP(NETIF_MSG_RX_ERR,
+- "ERROR packet dropped because "
+- "of alloc failure\n");
+-reuse_rx:
+- bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
+- goto next_rx;
+- }
+-
+- skb->protocol = eth_type_trans(skb, bp->dev);
+-
+- skb->ip_summed = CHECKSUM_NONE;
+- if (bp->rx_csum && BNX2X_RX_SUM_OK(cqe))
+- skb->ip_summed = CHECKSUM_UNNECESSARY;
+-
+- /* TBD do we pass bad csum packets in promisc */
+- }
+-
+-#ifdef BCM_VLAN
+- if ((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags)
+- & PARSING_FLAGS_NUMBER_OF_NESTED_VLANS)
+- && (bp->vlgrp != NULL))
+- vlan_hwaccel_receive_skb(skb, bp->vlgrp,
+- le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
+- else
+-#endif
+- netif_receive_skb(skb);
+-
+- bp->dev->last_rx = jiffies;
+-
+-next_rx:
+- rx_buf->skb = NULL;
+-
+- bd_cons = NEXT_RX_IDX(bd_cons);
+- bd_prod = NEXT_RX_IDX(bd_prod);
+-next_cqe:
+- sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
+- sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
+- rx_pkt++;
+-
+- if ((rx_pkt == budget))
+- break;
+- } /* while */
+-
+- fp->rx_bd_cons = bd_cons;
+- fp->rx_bd_prod = bd_prod;
+- fp->rx_comp_cons = sw_comp_cons;
+- fp->rx_comp_prod = sw_comp_prod;
+-
+- REG_WR(bp, BAR_TSTRORM_INTMEM +
+- TSTORM_RCQ_PROD_OFFSET(bp->port, fp->index), sw_comp_prod);
+-
+- mmiowb(); /* keep prod updates ordered */
+-
+- fp->rx_pkt += rx_pkt;
+- fp->rx_calls++;
+-
+- return rx_pkt;
+-}
+-
+-static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
+-{
+- struct bnx2x_fastpath *fp = fp_cookie;
+- struct bnx2x *bp = fp->bp;
+- struct net_device *dev = bp->dev;
+- int index = fp->index;
+-
+- DP(NETIF_MSG_INTR, "got an msix interrupt on [%d]\n", index);
+- bnx2x_ack_sb(bp, index, USTORM_ID, 0, IGU_INT_DISABLE, 0);
+-
+-#ifdef BNX2X_STOP_ON_ERROR
+- if (unlikely(bp->panic))
+- return IRQ_HANDLED;
+-#endif
+-
+- prefetch(fp->rx_cons_sb);
+- prefetch(fp->tx_cons_sb);
+- prefetch(&fp->status_blk->c_status_block.status_block_index);
+- prefetch(&fp->status_blk->u_status_block.status_block_index);
+-
+- netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
+- return IRQ_HANDLED;
+-}
+-
+-static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
+-{
+- struct net_device *dev = dev_instance;
+- struct bnx2x *bp = netdev_priv(dev);
+- u16 status = bnx2x_ack_int(bp);
+-
+- if (unlikely(status == 0)) {
+- DP(NETIF_MSG_INTR, "not our interrupt!\n");
+- return IRQ_NONE;
+- }
+-
+- DP(NETIF_MSG_INTR, "got an interrupt status is %u\n", status);
+-
+-#ifdef BNX2X_STOP_ON_ERROR
+- if (unlikely(bp->panic))
+- return IRQ_HANDLED;
+-#endif
+-
+- /* Return here if interrupt is shared and is disabled */
+- if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
+- DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
+- return IRQ_HANDLED;
+- }
+-
+- if (status & 0x2) {
+- struct bnx2x_fastpath *fp = &bp->fp[0];
+-
+- prefetch(fp->rx_cons_sb);
+- prefetch(fp->tx_cons_sb);
+- prefetch(&fp->status_blk->c_status_block.status_block_index);
+- prefetch(&fp->status_blk->u_status_block.status_block_index);
+-
+- netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
+-
+- status &= ~0x2;
+- if (!status)
+- return IRQ_HANDLED;
+- }
+-
+- if (unlikely(status & 0x1)) {
+-
+- schedule_work(&bp->sp_task);
+-
+- status &= ~0x1;
+- if (!status)
+- return IRQ_HANDLED;
+- }
+-
+- DP(NETIF_MSG_INTR, "got an unknown interrupt! (status is %u)\n",
+- status);
+-
+- return IRQ_HANDLED;
+-}
+-
+-/* end of fast path */
+-
+-/* PHY/MAC */
+-
+-/*
+- * General service functions
+- */
+-
+-static void bnx2x_leds_set(struct bnx2x *bp, unsigned int speed)
+-{
+- int port = bp->port;
+-
+- NIG_WR(NIG_REG_LED_MODE_P0 + port*4,
+- ((bp->hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
+- SHARED_HW_CFG_LED_MODE_SHIFT));
+- NIG_WR(NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
+-
+- /* Set blinking rate to ~15.9Hz */
+- NIG_WR(NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
+- LED_BLINK_RATE_VAL);
+- NIG_WR(NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + port*4, 1);
+-
+- /* On Ax chip versions for speeds less than 10G
+- LED scheme is different */
+- if ((CHIP_REV(bp) == CHIP_REV_Ax) && (speed < SPEED_10000)) {
+- NIG_WR(NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 1);
+- NIG_WR(NIG_REG_LED_CONTROL_TRAFFIC_P0 + port*4, 0);
+- NIG_WR(NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 + port*4, 1);
+- }
+-}
+-
+-static void bnx2x_leds_unset(struct bnx2x *bp)
+-{
+- int port = bp->port;
+-
+- NIG_WR(NIG_REG_LED_10G_P0 + port*4, 0);
+- NIG_WR(NIG_REG_LED_MODE_P0 + port*4, SHARED_HW_CFG_LED_MAC1);
+-}
+-
+-static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
+-{
+- u32 val = REG_RD(bp, reg);
+-
+- val |= bits;
+- REG_WR(bp, reg, val);
+- return val;
+-}
+-
+-static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
+-{
+- u32 val = REG_RD(bp, reg);
+-
+- val &= ~bits;
+- REG_WR(bp, reg, val);
+- return val;
+-}
+-
+-static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
+-{
+- u32 cnt;
+- u32 lock_status;
+- u32 resource_bit = (1 << resource);
+- u8 func = bp->port;
+-
+- /* Validating that the resource is within range */
+- if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
+- DP(NETIF_MSG_HW,
+- "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
+- resource, HW_LOCK_MAX_RESOURCE_VALUE);
+- return -EINVAL;
+- }
+-
+- /* Validating that the resource is not already taken */
+- lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
+- if (lock_status & resource_bit) {
+- DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
+- lock_status, resource_bit);
+- return -EEXIST;
+- }
+-
+- /* Try for 1 second every 5ms */
+- for (cnt = 0; cnt < 200; cnt++) {
+- /* Try to acquire the lock */
+- REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + func*8 + 4,
+- resource_bit);
+- lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
+- if (lock_status & resource_bit)
+- return 0;
+-
+- msleep(5);
+- }
+- DP(NETIF_MSG_HW, "Timeout\n");
+- return -EAGAIN;
+-}
+-
+-static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
+-{
+- u32 lock_status;
+- u32 resource_bit = (1 << resource);
+- u8 func = bp->port;
+-
+- /* Validating that the resource is within range */
+- if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
+- DP(NETIF_MSG_HW,
+- "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
+- resource, HW_LOCK_MAX_RESOURCE_VALUE);
+- return -EINVAL;
+- }
+-
+- /* Validating that the resource is currently taken */
+- lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
+- if (!(lock_status & resource_bit)) {
+- DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
+- lock_status, resource_bit);
+- return -EFAULT;
+- }
+-
+- REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + func*8, resource_bit);
+- return 0;
+-}
+-
+-static int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
+-{
+- /* The GPIO should be swapped if swap register is set and active */
+- int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
+- REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ bp->port;
+- int gpio_shift = gpio_num +
+- (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
+- u32 gpio_mask = (1 << gpio_shift);
+- u32 gpio_reg;
+-
+- if (gpio_num > MISC_REGISTERS_GPIO_3) {
+- BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
+- return -EINVAL;
+- }
+-
+- bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+- /* read GPIO and mask except the float bits */
+- gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
+-
+- switch (mode) {
+- case MISC_REGISTERS_GPIO_OUTPUT_LOW:
+- DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
+- gpio_num, gpio_shift);
+- /* clear FLOAT and set CLR */
+- gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
+- gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
+- break;
+-
+- case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
+- DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
+- gpio_num, gpio_shift);
+- /* clear FLOAT and set SET */
+- gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
+- gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
+- break;
+-
+- case MISC_REGISTERS_GPIO_INPUT_HI_Z :
+- DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
+- gpio_num, gpio_shift);
+- /* set FLOAT */
+- gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
+- break;
+-
+- default:
+- break;
+- }
+-
+- REG_WR(bp, MISC_REG_GPIO, gpio_reg);
+- bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO);
+-
+- return 0;
+-}
+-
+-static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
+-{
+- u32 spio_mask = (1 << spio_num);
+- u32 spio_reg;
+-
+- if ((spio_num < MISC_REGISTERS_SPIO_4) ||
+- (spio_num > MISC_REGISTERS_SPIO_7)) {
+- BNX2X_ERR("Invalid SPIO %d\n", spio_num);
+- return -EINVAL;
+- }
+-
+- bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
+- /* read SPIO and mask except the float bits */
+- spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
+-
+- switch (mode) {
+- case MISC_REGISTERS_SPIO_OUTPUT_LOW :
+- DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
+- /* clear FLOAT and set CLR */
+- spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
+- spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
+- break;
+-
+- case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
+- DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
+- /* clear FLOAT and set SET */
+- spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
+- spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
+- break;
+-
+- case MISC_REGISTERS_SPIO_INPUT_HI_Z:
+- DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
+- /* set FLOAT */
+- spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
+- break;
+-
+- default:
+- break;
+- }
+-
+- REG_WR(bp, MISC_REG_SPIO, spio_reg);
+- bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO);
+-
+- return 0;
+-}
+-
+-static int bnx2x_mdio22_write(struct bnx2x *bp, u32 reg, u32 val)
+-{
+- int port = bp->port;
+- u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+- u32 tmp;
+- int i, rc;
+-
+-/* DP(NETIF_MSG_HW, "phy_addr 0x%x reg 0x%x val 0x%08x\n",
+- bp->phy_addr, reg, val); */
+-
+- if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
+-
+- tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
+- tmp &= ~EMAC_MDIO_MODE_AUTO_POLL;
+- EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
+- REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
+- udelay(40);
+- }
+-
+- tmp = ((bp->phy_addr << 21) | (reg << 16) |
+- (val & EMAC_MDIO_COMM_DATA) |
+- EMAC_MDIO_COMM_COMMAND_WRITE_22 |
+- EMAC_MDIO_COMM_START_BUSY);
+- EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, tmp);
+-
+- for (i = 0; i < 50; i++) {
+- udelay(10);
+-
+- tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
+- if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
+- udelay(5);
+- break;
+- }
+- }
+-
+- if (tmp & EMAC_MDIO_COMM_START_BUSY) {
+- BNX2X_ERR("write phy register failed\n");
+-
+- rc = -EBUSY;
+- } else {
+- rc = 0;
+- }
+-
+- if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
+-
+- tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
+- tmp |= EMAC_MDIO_MODE_AUTO_POLL;
+- EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
+- }
+-
+- return rc;
+-}
+-
+-static int bnx2x_mdio22_read(struct bnx2x *bp, u32 reg, u32 *ret_val)
+-{
+- int port = bp->port;
+- u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+- u32 val;
+- int i, rc;
+-
+- if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
+-
+- val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
+- val &= ~EMAC_MDIO_MODE_AUTO_POLL;
+- EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
+- REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
+- udelay(40);
+- }
+-
+- val = ((bp->phy_addr << 21) | (reg << 16) |
+- EMAC_MDIO_COMM_COMMAND_READ_22 |
+- EMAC_MDIO_COMM_START_BUSY);
+- EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, val);
+-
+- for (i = 0; i < 50; i++) {
+- udelay(10);
+-
+- val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
+- if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
+- val &= EMAC_MDIO_COMM_DATA;
+- break;
+- }
+- }
+-
+- if (val & EMAC_MDIO_COMM_START_BUSY) {
+- BNX2X_ERR("read phy register failed\n");
+-
+- *ret_val = 0x0;
+- rc = -EBUSY;
+- } else {
+- *ret_val = val;
+- rc = 0;
+- }
+-
+- if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
+-
+- val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
+- val |= EMAC_MDIO_MODE_AUTO_POLL;
+- EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
+- }
+-
+-/* DP(NETIF_MSG_HW, "phy_addr 0x%x reg 0x%x ret_val 0x%08x\n",
+- bp->phy_addr, reg, *ret_val); */
+-
+- return rc;
+-}
+-
+-static int bnx2x_mdio45_ctrl_write(struct bnx2x *bp, u32 mdio_ctrl,
+- u32 phy_addr, u32 reg, u32 addr, u32 val)
+-{
+- u32 tmp;
+- int i, rc = 0;
+-
+- /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
+- * (a value of 49==0x31) and make sure that the AUTO poll is off
+- */
+- tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
+- tmp &= ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
+- tmp |= (EMAC_MDIO_MODE_CLAUSE_45 |
+- (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
+- REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
+- REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
+- udelay(40);
+-
+- /* address */
+- tmp = ((phy_addr << 21) | (reg << 16) | addr |
+- EMAC_MDIO_COMM_COMMAND_ADDRESS |
+- EMAC_MDIO_COMM_START_BUSY);
+- REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
+-
+- for (i = 0; i < 50; i++) {
+- udelay(10);
+-
+- tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
+- if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
+- udelay(5);
+- break;
+- }
+- }
+- if (tmp & EMAC_MDIO_COMM_START_BUSY) {
+- BNX2X_ERR("write phy register failed\n");
+-
+- rc = -EBUSY;
+-
+- } else {
+- /* data */
+- tmp = ((phy_addr << 21) | (reg << 16) | val |
+- EMAC_MDIO_COMM_COMMAND_WRITE_45 |
+- EMAC_MDIO_COMM_START_BUSY);
+- REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
+-
+- for (i = 0; i < 50; i++) {
+- udelay(10);
+-
+- tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
+- if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
+- udelay(5);
+- break;
+- }
+- }
+-
+- if (tmp & EMAC_MDIO_COMM_START_BUSY) {
+- BNX2X_ERR("write phy register failed\n");
+-
+- rc = -EBUSY;
+- }
+- }
+-
+- /* unset clause 45 mode, set the MDIO clock to a faster value
+- * (0x13 => 6.25Mhz) and restore the AUTO poll if needed
+- */
+- tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
+- tmp &= ~(EMAC_MDIO_MODE_CLAUSE_45 | EMAC_MDIO_MODE_CLOCK_CNT);
+- tmp |= (0x13 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
+- if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG)
+- tmp |= EMAC_MDIO_MODE_AUTO_POLL;
+- REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
+-
+- return rc;
+-}
+-
+-static int bnx2x_mdio45_write(struct bnx2x *bp, u32 phy_addr, u32 reg,
+- u32 addr, u32 val)
+-{
+- u32 emac_base = bp->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+-
+- return bnx2x_mdio45_ctrl_write(bp, emac_base, phy_addr,
+- reg, addr, val);
+-}
+-
+-static int bnx2x_mdio45_ctrl_read(struct bnx2x *bp, u32 mdio_ctrl,
+- u32 phy_addr, u32 reg, u32 addr,
+- u32 *ret_val)
+-{
+- u32 val;
+- int i, rc = 0;
+-
+- /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
+- * (a value of 49==0x31) and make sure that the AUTO poll is off
+- */
+- val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
+- val &= ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
+- val |= (EMAC_MDIO_MODE_CLAUSE_45 |
+- (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
+- REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
+- REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
+- udelay(40);
+-
+- /* address */
+- val = ((phy_addr << 21) | (reg << 16) | addr |
+- EMAC_MDIO_COMM_COMMAND_ADDRESS |
+- EMAC_MDIO_COMM_START_BUSY);
+- REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
+-
+- for (i = 0; i < 50; i++) {
+- udelay(10);
+-
+- val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
+- if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
+- udelay(5);
+- break;
+- }
+- }
+- if (val & EMAC_MDIO_COMM_START_BUSY) {
+- BNX2X_ERR("read phy register failed\n");
+-
+- *ret_val = 0;
+- rc = -EBUSY;
+-
+- } else {
+- /* data */
+- val = ((phy_addr << 21) | (reg << 16) |
+- EMAC_MDIO_COMM_COMMAND_READ_45 |
+- EMAC_MDIO_COMM_START_BUSY);
+- REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
+-
+- for (i = 0; i < 50; i++) {
+- udelay(10);
+-
+- val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
+- if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
+- val &= EMAC_MDIO_COMM_DATA;
+- break;
+- }
+- }
+-
+- if (val & EMAC_MDIO_COMM_START_BUSY) {
+- BNX2X_ERR("read phy register failed\n");
+-
+- val = 0;
+- rc = -EBUSY;
+- }
+-
+- *ret_val = val;
+- }
+-
+- /* unset clause 45 mode, set the MDIO clock to a faster value
+- * (0x13 => 6.25Mhz) and restore the AUTO poll if needed
+- */
+- val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
+- val &= ~(EMAC_MDIO_MODE_CLAUSE_45 | EMAC_MDIO_MODE_CLOCK_CNT);
+- val |= (0x13 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
+- if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG)
+- val |= EMAC_MDIO_MODE_AUTO_POLL;
+- REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
+-
+- return rc;
+-}
+-
+-static int bnx2x_mdio45_read(struct bnx2x *bp, u32 phy_addr, u32 reg,
+- u32 addr, u32 *ret_val)
+-{
+- u32 emac_base = bp->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+-
+- return bnx2x_mdio45_ctrl_read(bp, emac_base, phy_addr,
+- reg, addr, ret_val);
+-}
+-
+-static int bnx2x_mdio45_vwrite(struct bnx2x *bp, u32 phy_addr, u32 reg,
+- u32 addr, u32 val)
+-{
+- int i;
+- u32 rd_val;
+-
+- might_sleep();
+- for (i = 0; i < 10; i++) {
+- bnx2x_mdio45_write(bp, phy_addr, reg, addr, val);
+- msleep(5);
+- bnx2x_mdio45_read(bp, phy_addr, reg, addr, &rd_val);
+- /* if the read value is not the same as the value we wrote,
+- we should write it again */
+- if (rd_val == val)
+- return 0;
+- }
+- BNX2X_ERR("MDIO write in CL45 failed\n");
+- return -EBUSY;
+-}
+-
+-/*
+- * link management
+- */
+-
+-static void bnx2x_pause_resolve(struct bnx2x *bp, u32 pause_result)
+-{
+- switch (pause_result) { /* ASYM P ASYM P */
+- case 0xb: /* 1 0 1 1 */
+- bp->flow_ctrl = FLOW_CTRL_TX;
+- break;
+-
+- case 0xe: /* 1 1 1 0 */
+- bp->flow_ctrl = FLOW_CTRL_RX;
+- break;
+-
+- case 0x5: /* 0 1 0 1 */
+- case 0x7: /* 0 1 1 1 */
+- case 0xd: /* 1 1 0 1 */
+- case 0xf: /* 1 1 1 1 */
+- bp->flow_ctrl = FLOW_CTRL_BOTH;
+- break;
+-
+- default:
+- break;
+- }
+-}
+-
+-static u8 bnx2x_ext_phy_resove_fc(struct bnx2x *bp)
+-{
+- u32 ext_phy_addr;
+- u32 ld_pause; /* local */
+- u32 lp_pause; /* link partner */
+- u32 an_complete; /* AN complete */
+- u32 pause_result;
+- u8 ret = 0;
+-
+- ext_phy_addr = ((bp->ext_phy_config &
+- PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
+- PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
+-
+- /* read twice */
+- bnx2x_mdio45_read(bp, ext_phy_addr,
+- EXT_PHY_KR_AUTO_NEG_DEVAD,
+- EXT_PHY_KR_STATUS, &an_complete);
+- bnx2x_mdio45_read(bp, ext_phy_addr,
+- EXT_PHY_KR_AUTO_NEG_DEVAD,
+- EXT_PHY_KR_STATUS, &an_complete);
+-
+- if (an_complete & EXT_PHY_KR_AUTO_NEG_COMPLETE) {
+- ret = 1;
+- bnx2x_mdio45_read(bp, ext_phy_addr,
+- EXT_PHY_KR_AUTO_NEG_DEVAD,
+- EXT_PHY_KR_AUTO_NEG_ADVERT, &ld_pause);
+- bnx2x_mdio45_read(bp, ext_phy_addr,
+- EXT_PHY_KR_AUTO_NEG_DEVAD,
+- EXT_PHY_KR_LP_AUTO_NEG, &lp_pause);
+- pause_result = (ld_pause &
+- EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_MASK) >> 8;
+- pause_result |= (lp_pause &
+- EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_MASK) >> 10;
+- DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x \n",
+- pause_result);
+- bnx2x_pause_resolve(bp, pause_result);
+- }
+- return ret;
+-}
+-
+-static void bnx2x_flow_ctrl_resolve(struct bnx2x *bp, u32 gp_status)
+-{
+- u32 ld_pause; /* local driver */
+- u32 lp_pause; /* link partner */
+- u32 pause_result;
+-
+- bp->flow_ctrl = 0;
+-
+- /* resolve from gp_status in case of AN complete and not sgmii */
+- if ((bp->req_autoneg & AUTONEG_FLOW_CTRL) &&
+- (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
+- (!(bp->phy_flags & PHY_SGMII_FLAG)) &&
+- (XGXS_EXT_PHY_TYPE(bp) == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)) {
+-
+- MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
+- bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
+- &ld_pause);
+- bnx2x_mdio22_read(bp,
+- MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
+- &lp_pause);
+- pause_result = (ld_pause &
+- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
+- pause_result |= (lp_pause &
+- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
+- DP(NETIF_MSG_LINK, "pause_result 0x%x\n", pause_result);
+- bnx2x_pause_resolve(bp, pause_result);
+- } else if (!(bp->req_autoneg & AUTONEG_FLOW_CTRL) ||
+- !(bnx2x_ext_phy_resove_fc(bp))) {
+- /* forced speed */
+- if (bp->req_autoneg & AUTONEG_FLOW_CTRL) {
+- switch (bp->req_flow_ctrl) {
+- case FLOW_CTRL_AUTO:
+- if (bp->dev->mtu <= 4500)
+- bp->flow_ctrl = FLOW_CTRL_BOTH;
+- else
+- bp->flow_ctrl = FLOW_CTRL_TX;
+- break;
+-
+- case FLOW_CTRL_TX:
+- bp->flow_ctrl = FLOW_CTRL_TX;
+- break;
+-
+- case FLOW_CTRL_RX:
+- if (bp->dev->mtu <= 4500)
+- bp->flow_ctrl = FLOW_CTRL_RX;
+- break;
+-
+- case FLOW_CTRL_BOTH:
+- if (bp->dev->mtu <= 4500)
+- bp->flow_ctrl = FLOW_CTRL_BOTH;
+- else
+- bp->flow_ctrl = FLOW_CTRL_TX;
+- break;
+-
+- case FLOW_CTRL_NONE:
+- default:
+- break;
+- }
+- } else { /* forced mode */
+- switch (bp->req_flow_ctrl) {
+- case FLOW_CTRL_AUTO:
+- DP(NETIF_MSG_LINK, "req_flow_ctrl 0x%x while"
+- " req_autoneg 0x%x\n",
+- bp->req_flow_ctrl, bp->req_autoneg);
+- break;
+-
+- case FLOW_CTRL_TX:
+- case FLOW_CTRL_RX:
+- case FLOW_CTRL_BOTH:
+- bp->flow_ctrl = bp->req_flow_ctrl;
+- break;
+-
+- case FLOW_CTRL_NONE:
+- default:
+- break;
+- }
+- }
+- }
+- DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", bp->flow_ctrl);
+-}
+-
+-static void bnx2x_link_settings_status(struct bnx2x *bp, u32 gp_status)
+-{
+- bp->link_status = 0;
+-
+- if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
+- DP(NETIF_MSG_LINK, "phy link up\n");
+-
+- bp->phy_link_up = 1;
+- bp->link_status |= LINK_STATUS_LINK_UP;
+-
+- if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS)
+- bp->duplex = DUPLEX_FULL;
+- else
+- bp->duplex = DUPLEX_HALF;
+-
+- bnx2x_flow_ctrl_resolve(bp, gp_status);
+-
+- switch (gp_status & GP_STATUS_SPEED_MASK) {
+- case GP_STATUS_10M:
+- bp->line_speed = SPEED_10;
+- if (bp->duplex == DUPLEX_FULL)
+- bp->link_status |= LINK_10TFD;
+- else
+- bp->link_status |= LINK_10THD;
+- break;
+-
+- case GP_STATUS_100M:
+- bp->line_speed = SPEED_100;
+- if (bp->duplex == DUPLEX_FULL)
+- bp->link_status |= LINK_100TXFD;
+- else
+- bp->link_status |= LINK_100TXHD;
+- break;
+-
+- case GP_STATUS_1G:
+- case GP_STATUS_1G_KX:
+- bp->line_speed = SPEED_1000;
+- if (bp->duplex == DUPLEX_FULL)
+- bp->link_status |= LINK_1000TFD;
+- else
+- bp->link_status |= LINK_1000THD;
+- break;
+-
+- case GP_STATUS_2_5G:
+- bp->line_speed = SPEED_2500;
+- if (bp->duplex == DUPLEX_FULL)
+- bp->link_status |= LINK_2500TFD;
+- else
+- bp->link_status |= LINK_2500THD;
+- break;
+-
+- case GP_STATUS_5G:
+- case GP_STATUS_6G:
+- BNX2X_ERR("link speed unsupported gp_status 0x%x\n",
+- gp_status);
+- break;
+-
+- case GP_STATUS_10G_KX4:
+- case GP_STATUS_10G_HIG:
+- case GP_STATUS_10G_CX4:
+- bp->line_speed = SPEED_10000;
+- bp->link_status |= LINK_10GTFD;
+- break;
+-
+- case GP_STATUS_12G_HIG:
+- bp->line_speed = SPEED_12000;
+- bp->link_status |= LINK_12GTFD;
+- break;
+-
+- case GP_STATUS_12_5G:
+- bp->line_speed = SPEED_12500;
+- bp->link_status |= LINK_12_5GTFD;
+- break;
+-
+- case GP_STATUS_13G:
+- bp->line_speed = SPEED_13000;
+- bp->link_status |= LINK_13GTFD;
+- break;
+-
+- case GP_STATUS_15G:
+- bp->line_speed = SPEED_15000;
+- bp->link_status |= LINK_15GTFD;
+- break;
+-
+- case GP_STATUS_16G:
+- bp->line_speed = SPEED_16000;
+- bp->link_status |= LINK_16GTFD;
+- break;
+-
+- default:
+- BNX2X_ERR("link speed unsupported gp_status 0x%x\n",
+- gp_status);
+- break;
+- }
+-
+- bp->link_status |= LINK_STATUS_SERDES_LINK;
+-
+- if (bp->req_autoneg & AUTONEG_SPEED) {
+- bp->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
+-
+- if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE)
+- bp->link_status |=
+- LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+-
+- if (bp->autoneg & AUTONEG_PARALLEL)
+- bp->link_status |=
+- LINK_STATUS_PARALLEL_DETECTION_USED;
+- }
+-
+- if (bp->flow_ctrl & FLOW_CTRL_TX)
+- bp->link_status |= LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
+-
+- if (bp->flow_ctrl & FLOW_CTRL_RX)
+- bp->link_status |= LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
+-
+- } else { /* link_down */
+- DP(NETIF_MSG_LINK, "phy link down\n");
+-
+- bp->phy_link_up = 0;
+-
+- bp->line_speed = 0;
+- bp->duplex = DUPLEX_FULL;
+- bp->flow_ctrl = 0;
+- }
+-
+- DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %d\n"
+- DP_LEVEL " line_speed %d duplex %d flow_ctrl 0x%x"
+- " link_status 0x%x\n",
+- gp_status, bp->phy_link_up, bp->line_speed, bp->duplex,
+- bp->flow_ctrl, bp->link_status);
+-}
+-
+-static void bnx2x_link_int_ack(struct bnx2x *bp, int is_10g)
+-{
+- int port = bp->port;
+-
+- /* first reset all status
+- * we assume only one line will be change at a time */
+- bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
+- (NIG_STATUS_XGXS0_LINK10G |
+- NIG_STATUS_XGXS0_LINK_STATUS |
+- NIG_STATUS_SERDES0_LINK_STATUS));
+- if (bp->phy_link_up) {
+- if (is_10g) {
+- /* Disable the 10G link interrupt
+- * by writing 1 to the status register
+- */
+- DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
+- bnx2x_bits_en(bp,
+- NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
+- NIG_STATUS_XGXS0_LINK10G);
+-
+- } else if (bp->phy_flags & PHY_XGXS_FLAG) {
+- /* Disable the link interrupt
+- * by writing 1 to the relevant lane
+- * in the status register
+- */
+- DP(NETIF_MSG_LINK, "1G XGXS phy link up\n");
+- bnx2x_bits_en(bp,
+- NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
+- ((1 << bp->ser_lane) <<
+- NIG_STATUS_XGXS0_LINK_STATUS_SIZE));
+-
+- } else { /* SerDes */
+- DP(NETIF_MSG_LINK, "SerDes phy link up\n");
+- /* Disable the link interrupt
+- * by writing 1 to the status register
+- */
+- bnx2x_bits_en(bp,
+- NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
+- NIG_STATUS_SERDES0_LINK_STATUS);
+- }
+-
+- } else { /* link_down */
+- }
+-}
+-
+-static int bnx2x_ext_phy_is_link_up(struct bnx2x *bp)
+-{
+- u32 ext_phy_type;
+- u32 ext_phy_addr;
+- u32 val1 = 0, val2;
+- u32 rx_sd, pcs_status;
+-
+- if (bp->phy_flags & PHY_XGXS_FLAG) {
+- ext_phy_addr = ((bp->ext_phy_config &
+- PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
+- PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
+-
+- ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
+- switch (ext_phy_type) {
+- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
+- DP(NETIF_MSG_LINK, "XGXS Direct\n");
+- val1 = 1;
+- break;
+-
+- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
+- DP(NETIF_MSG_LINK, "XGXS 8705\n");
+- bnx2x_mdio45_read(bp, ext_phy_addr,
+- EXT_PHY_OPT_WIS_DEVAD,
+- EXT_PHY_OPT_LASI_STATUS, &val1);
+- DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
+-
+- bnx2x_mdio45_read(bp, ext_phy_addr,
+- EXT_PHY_OPT_WIS_DEVAD,
+- EXT_PHY_OPT_LASI_STATUS, &val1);
+- DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
+-
+- bnx2x_mdio45_read(bp, ext_phy_addr,
+- EXT_PHY_OPT_PMA_PMD_DEVAD,
+- EXT_PHY_OPT_PMD_RX_SD, &rx_sd);
+- DP(NETIF_MSG_LINK, "8705 rx_sd 0x%x\n", rx_sd);
+- val1 = (rx_sd & 0x1);
+- break;
+-
+- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
+- DP(NETIF_MSG_LINK, "XGXS 8706\n");
+- bnx2x_mdio45_read(bp, ext_phy_addr,
+- EXT_PHY_OPT_PMA_PMD_DEVAD,
+- EXT_PHY_OPT_LASI_STATUS, &val1);
+- DP(NETIF_MSG_LINK, "8706 LASI status 0x%x\n", val1);
+-
+- bnx2x_mdio45_read(bp, ext_phy_addr,
+- EXT_PHY_OPT_PMA_PMD_DEVAD,
+- EXT_PHY_OPT_LASI_STATUS, &val1);
+- DP(NETIF_MSG_LINK, "8706 LASI status 0x%x\n", val1);
+-
+- bnx2x_mdio45_read(bp, ext_phy_addr,
+- EXT_PHY_OPT_PMA_PMD_DEVAD,
+- EXT_PHY_OPT_PMD_RX_SD, &rx_sd);
+- bnx2x_mdio45_read(bp, ext_phy_addr,
+- EXT_PHY_OPT_PCS_DEVAD,
+- EXT_PHY_OPT_PCS_STATUS, &pcs_status);
+- bnx2x_mdio45_read(bp, ext_phy_addr,
+- EXT_PHY_AUTO_NEG_DEVAD,
+- EXT_PHY_OPT_AN_LINK_STATUS, &val2);
+-
+- DP(NETIF_MSG_LINK, "8706 rx_sd 0x%x"
+- " pcs_status 0x%x 1Gbps link_status 0x%x 0x%x\n",
+- rx_sd, pcs_status, val2, (val2 & (1<<1)));
+- /* link is up if both bit 0 of pmd_rx_sd and
+- * bit 0 of pcs_status are set, or if the autoneg bit
+- 1 is set
+- */
+- val1 = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
+- break;
+-
+- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
+- bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
+-
+- /* clear the interrupt LASI status register */
+- bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
+- ext_phy_addr,
+- EXT_PHY_KR_PCS_DEVAD,
+- EXT_PHY_KR_LASI_STATUS, &val2);
+- bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
+- ext_phy_addr,
+- EXT_PHY_KR_PCS_DEVAD,
+- EXT_PHY_KR_LASI_STATUS, &val1);
+- DP(NETIF_MSG_LINK, "KR LASI status 0x%x->0x%x\n",
+- val2, val1);
+- /* Check the LASI */
+- bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
+- ext_phy_addr,
+- EXT_PHY_KR_PMA_PMD_DEVAD,
+- 0x9003, &val2);
+- bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
+- ext_phy_addr,
+- EXT_PHY_KR_PMA_PMD_DEVAD,
+- 0x9003, &val1);
+- DP(NETIF_MSG_LINK, "KR 0x9003 0x%x->0x%x\n",
+- val2, val1);
+- /* Check the link status */
+- bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
+- ext_phy_addr,
+- EXT_PHY_KR_PCS_DEVAD,
+- EXT_PHY_KR_PCS_STATUS, &val2);
+- DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2);
+- /* Check the link status on 1.1.2 */
+- bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
+- ext_phy_addr,
+- EXT_PHY_OPT_PMA_PMD_DEVAD,
+- EXT_PHY_KR_STATUS, &val2);
+- bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
+- ext_phy_addr,
+- EXT_PHY_OPT_PMA_PMD_DEVAD,
+- EXT_PHY_KR_STATUS, &val1);
+- DP(NETIF_MSG_LINK,
+- "KR PMA status 0x%x->0x%x\n", val2, val1);
+- val1 = ((val1 & 4) == 4);
+- /* If 1G was requested assume the link is up */
+- if (!(bp->req_autoneg & AUTONEG_SPEED) &&
+- (bp->req_line_speed == SPEED_1000))
+- val1 = 1;
+- bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
+- break;
+-
+- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
+- bnx2x_mdio45_read(bp, ext_phy_addr,
+- EXT_PHY_OPT_PMA_PMD_DEVAD,
+- EXT_PHY_OPT_LASI_STATUS, &val2);
+- bnx2x_mdio45_read(bp, ext_phy_addr,
+- EXT_PHY_OPT_PMA_PMD_DEVAD,
+- EXT_PHY_OPT_LASI_STATUS, &val1);
+- DP(NETIF_MSG_LINK,
+- "10G-base-T LASI status 0x%x->0x%x\n", val2, val1);
+- bnx2x_mdio45_read(bp, ext_phy_addr,
+- EXT_PHY_OPT_PMA_PMD_DEVAD,
+- EXT_PHY_KR_STATUS, &val2);
+- bnx2x_mdio45_read(bp, ext_phy_addr,
+- EXT_PHY_OPT_PMA_PMD_DEVAD,
+- EXT_PHY_KR_STATUS, &val1);
+- DP(NETIF_MSG_LINK,
+- "10G-base-T PMA status 0x%x->0x%x\n", val2, val1);
+- val1 = ((val1 & 4) == 4);
+- /* if link is up
+- * print the AN outcome of the SFX7101 PHY
+- */
+- if (val1) {
+- bnx2x_mdio45_read(bp, ext_phy_addr,
+- EXT_PHY_KR_AUTO_NEG_DEVAD,
+- 0x21, &val2);
+- DP(NETIF_MSG_LINK,
+- "SFX7101 AN status 0x%x->%s\n", val2,
+- (val2 & (1<<14)) ? "Master" : "Slave");
+- }
+- break;
+-
+- default:
+- DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
+- bp->ext_phy_config);
+- val1 = 0;
+- break;
+- }
+-
+- } else { /* SerDes */
+- ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
+- switch (ext_phy_type) {
+- case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
+- DP(NETIF_MSG_LINK, "SerDes Direct\n");
+- val1 = 1;
+- break;
+-
+- case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
+- DP(NETIF_MSG_LINK, "SerDes 5482\n");
+- val1 = 1;
+- break;
+-
+- default:
+- DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
+- bp->ext_phy_config);
+- val1 = 0;
+- break;
+- }
+- }
+-
+- return val1;
+-}
+-
+-static void bnx2x_bmac_enable(struct bnx2x *bp, int is_lb)
+-{
+- int port = bp->port;
+- u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
+- NIG_REG_INGRESS_BMAC0_MEM;
+- u32 wb_write[2];
+- u32 val;
+-
+- DP(NETIF_MSG_LINK, "enabling BigMAC\n");
+- /* reset and unreset the BigMac */
+- REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+- (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+- msleep(5);
+- REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+- (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+-
+- /* enable access for bmac registers */
+- NIG_WR(NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
+-
+- /* XGXS control */
+- wb_write[0] = 0x3c;
+- wb_write[1] = 0;
+- REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
+- wb_write, 2);
+-
+- /* tx MAC SA */
+- wb_write[0] = ((bp->dev->dev_addr[2] << 24) |
+- (bp->dev->dev_addr[3] << 16) |
+- (bp->dev->dev_addr[4] << 8) |
+- bp->dev->dev_addr[5]);
+- wb_write[1] = ((bp->dev->dev_addr[0] << 8) |
+- bp->dev->dev_addr[1]);
+- REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR,
+- wb_write, 2);
+-
+- /* tx control */
+- val = 0xc0;
+- if (bp->flow_ctrl & FLOW_CTRL_TX)
+- val |= 0x800000;
+- wb_write[0] = val;
+- wb_write[1] = 0;
+- REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL, wb_write, 2);
+-
+- /* set tx mtu */
+- wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; /* -CRC */
+- wb_write[1] = 0;
+- REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_write, 2);
+-
+- /* mac control */
+- val = 0x3;
+- if (is_lb) {
+- val |= 0x4;
+- DP(NETIF_MSG_LINK, "enable bmac loopback\n");
+- }
+- wb_write[0] = val;
+- wb_write[1] = 0;
+- REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
+- wb_write, 2);
+-
+- /* rx control set to don't strip crc */
+- val = 0x14;
+- if (bp->flow_ctrl & FLOW_CTRL_RX)
+- val |= 0x20;
+- wb_write[0] = val;
+- wb_write[1] = 0;
+- REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_write, 2);
+-
+- /* set rx mtu */
+- wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
+- wb_write[1] = 0;
+- REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_write, 2);
+-
+- /* set cnt max size */
+- wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; /* -VLAN */
+- wb_write[1] = 0;
+- REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE,
+- wb_write, 2);
+-
+- /* configure safc */
+- wb_write[0] = 0x1000200;
+- wb_write[1] = 0;
+- REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
+- wb_write, 2);
+-
+- /* fix for emulation */
+- if (CHIP_REV(bp) == CHIP_REV_EMUL) {
+- wb_write[0] = 0xf000;
+- wb_write[1] = 0;
+- REG_WR_DMAE(bp,
+- bmac_addr + BIGMAC_REGISTER_TX_PAUSE_THRESHOLD,
+- wb_write, 2);
+- }
+-
+- /* reset old bmac stats */
+- memset(&bp->old_bmac, 0, sizeof(struct bmac_stats));
+-
+- NIG_WR(NIG_REG_XCM0_OUT_EN + port*4, 0x0);
+-
+- /* select XGXS */
+- NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1);
+- NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
+-
+- /* disable the NIG in/out to the emac */
+- NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0x0);
+- NIG_WR(NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, 0x0);
+- NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0);
+-
+- /* enable the NIG in/out to the bmac */
+- NIG_WR(NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
+-
+- NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0x1);
+- val = 0;
+- if (bp->flow_ctrl & FLOW_CTRL_TX)
+- val = 1;
+- NIG_WR(NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val);
+- NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0x1);
+-
+- bp->phy_flags |= PHY_BMAC_FLAG;
+-
+- bp->stats_state = STATS_STATE_ENABLE;
+-}
+-
+-static void bnx2x_bmac_rx_disable(struct bnx2x *bp)
+-{
+- int port = bp->port;
+- u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
+- NIG_REG_INGRESS_BMAC0_MEM;
+- u32 wb_write[2];
+-
+- /* Only if the bmac is out of reset */
+- if (REG_RD(bp, MISC_REG_RESET_REG_2) &
+- (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)) {
+- /* Clear Rx Enable bit in BMAC_CONTROL register */
+-#ifdef BNX2X_DMAE_RD
+- bnx2x_read_dmae(bp, bmac_addr +
+- BIGMAC_REGISTER_BMAC_CONTROL, 2);
+- wb_write[0] = *bnx2x_sp(bp, wb_data[0]);
+- wb_write[1] = *bnx2x_sp(bp, wb_data[1]);
+-#else
+- wb_write[0] = REG_RD(bp,
+- bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL);
+- wb_write[1] = REG_RD(bp,
+- bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL + 4);
+-#endif
+- wb_write[0] &= ~BMAC_CONTROL_RX_ENABLE;
+- REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
+- wb_write, 2);
+- msleep(1);
+- }
+-}
+-
+-static void bnx2x_emac_enable(struct bnx2x *bp)
+-{
+- int port = bp->port;
+- u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+- u32 val;
+- int timeout;
+-
+- DP(NETIF_MSG_LINK, "enabling EMAC\n");
+- /* reset and unreset the emac core */
+- REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+- (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
+- msleep(5);
+- REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+- (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
+-
+- /* enable emac and not bmac */
+- NIG_WR(NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
+-
+- /* for paladium */
+- if (CHIP_REV(bp) == CHIP_REV_EMUL) {
+- /* Use lane 1 (of lanes 0-3) */
+- NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
+- NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
+- }
+- /* for fpga */
+- else if (CHIP_REV(bp) == CHIP_REV_FPGA) {
+- /* Use lane 1 (of lanes 0-3) */
+- NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
+- NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
+- }
+- /* ASIC */
+- else {
+- if (bp->phy_flags & PHY_XGXS_FLAG) {
+- DP(NETIF_MSG_LINK, "XGXS\n");
+- /* select the master lanes (out of 0-3) */
+- NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4,
+- bp->ser_lane);
+- /* select XGXS */
+- NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
+-
+- } else { /* SerDes */
+- DP(NETIF_MSG_LINK, "SerDes\n");
+- /* select SerDes */
+- NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
+- }
+- }
+-
+- /* enable emac */
+- NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 1);
+-
+- /* init emac - use read-modify-write */
+- /* self clear reset */
+- val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
+- EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET));
+-
+- timeout = 200;
+- while (val & EMAC_MODE_RESET) {
+- val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
+- DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
+- if (!timeout) {
+- BNX2X_ERR("EMAC timeout!\n");
+- break;
+- }
+- timeout--;
+- }
+-
+- /* reset tx part */
+- EMAC_WR(EMAC_REG_EMAC_TX_MODE, EMAC_TX_MODE_RESET);
+-
+- timeout = 200;
+- while (val & EMAC_TX_MODE_RESET) {
+- val = REG_RD(bp, emac_base + EMAC_REG_EMAC_TX_MODE);
+- DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
+- if (!timeout) {
+- BNX2X_ERR("EMAC timeout!\n");
+- break;
+- }
+- timeout--;
+- }
+-
+- if (CHIP_REV_IS_SLOW(bp)) {
+- /* config GMII mode */
+- val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
+- EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
+-
+- } else { /* ASIC */
+- /* pause enable/disable */
+- bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
+- EMAC_RX_MODE_FLOW_EN);
+- if (bp->flow_ctrl & FLOW_CTRL_RX)
+- bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
+- EMAC_RX_MODE_FLOW_EN);
+-
+- bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
+- EMAC_TX_MODE_EXT_PAUSE_EN);
+- if (bp->flow_ctrl & FLOW_CTRL_TX)
+- bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
+- EMAC_TX_MODE_EXT_PAUSE_EN);
+- }
+-
+- /* KEEP_VLAN_TAG, promiscuous */
+- val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
+- val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
+- EMAC_WR(EMAC_REG_EMAC_RX_MODE, val);
+-
+- /* identify magic packets */
+- val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
+- EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_MPKT));
+-
+- /* enable emac for jumbo packets */
+- EMAC_WR(EMAC_REG_EMAC_RX_MTU_SIZE,
+- (EMAC_RX_MTU_SIZE_JUMBO_ENA |
+- (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD))); /* -VLAN */
+-
+- /* strip CRC */
+- NIG_WR(NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1);
+-
+- val = ((bp->dev->dev_addr[0] << 8) |
+- bp->dev->dev_addr[1]);
+- EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val);
+-
+- val = ((bp->dev->dev_addr[2] << 24) |
+- (bp->dev->dev_addr[3] << 16) |
+- (bp->dev->dev_addr[4] << 8) |
+- bp->dev->dev_addr[5]);
+- EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val);
+-
+- /* disable the NIG in/out to the bmac */
+- NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0x0);
+- NIG_WR(NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0);
+- NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0x0);
+-
+- /* enable the NIG in/out to the emac */
+- NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0x1);
+- val = 0;
+- if (bp->flow_ctrl & FLOW_CTRL_TX)
+- val = 1;
+- NIG_WR(NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
+- NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
+-
+- if (CHIP_REV(bp) == CHIP_REV_FPGA) {
+- /* take the BigMac out of reset */
+- REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+- (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+-
+- /* enable access for bmac registers */
+- NIG_WR(NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
+- }
+-
+- bp->phy_flags |= PHY_EMAC_FLAG;
+-
+- bp->stats_state = STATS_STATE_ENABLE;
+-}
+-
+-static void bnx2x_emac_program(struct bnx2x *bp)
+-{
+- u16 mode = 0;
+- int port = bp->port;
+-
+- DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
+- bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
+- (EMAC_MODE_25G_MODE |
+- EMAC_MODE_PORT_MII_10M |
+- EMAC_MODE_HALF_DUPLEX));
+- switch (bp->line_speed) {
+- case SPEED_10:
+- mode |= EMAC_MODE_PORT_MII_10M;
+- break;
+-
+- case SPEED_100:
+- mode |= EMAC_MODE_PORT_MII;
+- break;
+-
+- case SPEED_1000:
+- mode |= EMAC_MODE_PORT_GMII;
+- break;
+-
+- case SPEED_2500:
+- mode |= (EMAC_MODE_25G_MODE | EMAC_MODE_PORT_GMII);
+- break;
+-
+- default:
+- /* 10G not valid for EMAC */
+- BNX2X_ERR("Invalid line_speed 0x%x\n", bp->line_speed);
+- break;
+- }
+-
+- if (bp->duplex == DUPLEX_HALF)
+- mode |= EMAC_MODE_HALF_DUPLEX;
+- bnx2x_bits_en(bp, GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
+- mode);
+-
+- bnx2x_leds_set(bp, bp->line_speed);
+-}
+-
+-static void bnx2x_set_sgmii_tx_driver(struct bnx2x *bp)
+-{
+- u32 lp_up2;
+- u32 tx_driver;
+-
+- /* read precomp */
+- MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G);
+- bnx2x_mdio22_read(bp, MDIO_OVER_1G_LP_UP2, &lp_up2);
+-
+- MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_TX0);
+- bnx2x_mdio22_read(bp, MDIO_TX0_TX_DRIVER, &tx_driver);
+-
+- /* bits [10:7] at lp_up2, positioned at [15:12] */
+- lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
+- MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) <<
+- MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT);
+-
+- if ((lp_up2 != 0) &&
+- (lp_up2 != (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK))) {
+- /* replace tx_driver bits [15:12] */
+- tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
+- tx_driver |= lp_up2;
+- bnx2x_mdio22_write(bp, MDIO_TX0_TX_DRIVER, tx_driver);
+- }
+-}
+-
+-static void bnx2x_pbf_update(struct bnx2x *bp)
+-{
+- int port = bp->port;
+- u32 init_crd, crd;
+- u32 count = 1000;
+- u32 pause = 0;
+-
+- /* disable port */
+- REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
+-
+- /* wait for init credit */
+- init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4);
+- crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
+- DP(NETIF_MSG_LINK, "init_crd 0x%x crd 0x%x\n", init_crd, crd);
+-
+- while ((init_crd != crd) && count) {
+- msleep(5);
+-
+- crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
+- count--;
+- }
+- crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
+- if (init_crd != crd)
+- BNX2X_ERR("BUG! init_crd 0x%x != crd 0x%x\n", init_crd, crd);
+-
+- if (bp->flow_ctrl & FLOW_CTRL_RX)
+- pause = 1;
+- REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, pause);
+- if (pause) {
+- /* update threshold */
+- REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
+- /* update init credit */
+- init_crd = 778; /* (800-18-4) */
+-
+- } else {
+- u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)/16;
+-
+- /* update threshold */
+- REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
+- /* update init credit */
+- switch (bp->line_speed) {
+- case SPEED_10:
+- case SPEED_100:
+- case SPEED_1000:
+- init_crd = thresh + 55 - 22;
+- break;
+-
+- case SPEED_2500:
+- init_crd = thresh + 138 - 22;
+- break;
+-
+- case SPEED_10000:
+- init_crd = thresh + 553 - 22;
+- break;
+-
+- default:
+- BNX2X_ERR("Invalid line_speed 0x%x\n",
+- bp->line_speed);
+- break;
+- }
+- }
+- REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, init_crd);
+- DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n",
+- bp->line_speed, init_crd);
+-
+- /* probe the credit changes */
+- REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1);
+- msleep(5);
+- REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0);
+-
+- /* enable port */
+- REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0);
+-}
+-
+-static void bnx2x_update_mng(struct bnx2x *bp)
+-{
+- if (!nomcp)
+- SHMEM_WR(bp, port_mb[bp->port].link_status,
+- bp->link_status);
+-}
+-
+-static void bnx2x_link_report(struct bnx2x *bp)
+-{
+- if (bp->link_up) {
+- netif_carrier_on(bp->dev);
+- printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
+-
+- printk("%d Mbps ", bp->line_speed);
+-
+- if (bp->duplex == DUPLEX_FULL)
+- printk("full duplex");
+- else
+- printk("half duplex");
+-
+- if (bp->flow_ctrl) {
+- if (bp->flow_ctrl & FLOW_CTRL_RX) {
+- printk(", receive ");
+- if (bp->flow_ctrl & FLOW_CTRL_TX)
+- printk("& transmit ");
+- } else {
+- printk(", transmit ");
+- }
+- printk("flow control ON");
+- }
+- printk("\n");
+-
+- } else { /* link_down */
+- netif_carrier_off(bp->dev);
+- printk(KERN_INFO PFX "%s NIC Link is Down\n", bp->dev->name);
+- }
+-}
+-
+-static void bnx2x_link_up(struct bnx2x *bp)
+-{
+- int port = bp->port;
+-
+- /* PBF - link up */
+- bnx2x_pbf_update(bp);
+-
+- /* disable drain */
+- NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
+-
+- /* update shared memory */
+- bnx2x_update_mng(bp);
+-
+- /* indicate link up */
+- bnx2x_link_report(bp);
+-}
+-
+-static void bnx2x_link_down(struct bnx2x *bp)
+-{
+- int port = bp->port;
+-
+- /* notify stats */
+- if (bp->stats_state != STATS_STATE_DISABLE) {
+- bp->stats_state = STATS_STATE_STOP;
+- DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
+- }
+-
+- /* indicate no mac active */
+- bp->phy_flags &= ~(PHY_BMAC_FLAG | PHY_EMAC_FLAG);
+-
+- /* update shared memory */
+- bnx2x_update_mng(bp);
+-
+- /* activate nig drain */
+- NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
+-
+- /* reset BigMac */
+- bnx2x_bmac_rx_disable(bp);
+- REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+- (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+-
+- /* indicate link down */
+- bnx2x_link_report(bp);
+-}
+-
+-static void bnx2x_init_mac_stats(struct bnx2x *bp);
+-
+-/* This function is called upon link interrupt */
+-static void bnx2x_link_update(struct bnx2x *bp)
+-{
+- int port = bp->port;
+- int i;
+- u32 gp_status;
+- int link_10g;
+-
+- DP(NETIF_MSG_LINK, "port %x, %s, int_status 0x%x,"
+- " int_mask 0x%x, saved_mask 0x%x, MI_INT %x, SERDES_LINK %x,"
+- " 10G %x, XGXS_LINK %x\n", port,
+- (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
+- REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4),
+- REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), bp->nig_mask,
+- REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
+- REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c),
+- REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
+- REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)
+- );
+-
+- might_sleep();
+- MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_GP_STATUS);
+- /* avoid fast toggling */
+- for (i = 0; i < 10; i++) {
+- msleep(10);
+- bnx2x_mdio22_read(bp, MDIO_GP_STATUS_TOP_AN_STATUS1,
+- &gp_status);
+- }
+-
+- bnx2x_link_settings_status(bp, gp_status);
+-
+- /* anything 10 and over uses the bmac */
+- link_10g = ((bp->line_speed >= SPEED_10000) &&
+- (bp->line_speed <= SPEED_16000));
+-
+- bnx2x_link_int_ack(bp, link_10g);
+-
+- /* link is up only if both local phy and external phy are up */
+- bp->link_up = (bp->phy_link_up && bnx2x_ext_phy_is_link_up(bp));
+- if (bp->link_up) {
+- if (link_10g) {
+- bnx2x_bmac_enable(bp, 0);
+- bnx2x_leds_set(bp, SPEED_10000);
+-
+- } else {
+- bnx2x_emac_enable(bp);
+- bnx2x_emac_program(bp);
+-
+- /* AN complete? */
+- if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
+- if (!(bp->phy_flags & PHY_SGMII_FLAG))
+- bnx2x_set_sgmii_tx_driver(bp);
+- }
+- }
+- bnx2x_link_up(bp);
+-
+- } else { /* link down */
+- bnx2x_leds_unset(bp);
+- bnx2x_link_down(bp);
+- }
+-
+- bnx2x_init_mac_stats(bp);
+-}
+-
+-/*
+- * Init service functions
+- */
+-
+-static void bnx2x_set_aer_mmd(struct bnx2x *bp)
+-{
+- u16 offset = (bp->phy_flags & PHY_XGXS_FLAG) ?
+- (bp->phy_addr + bp->ser_lane) : 0;
+-
+- MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_AER_BLOCK);
+- bnx2x_mdio22_write(bp, MDIO_AER_BLOCK_AER_REG, 0x3800 + offset);
+-}
+-
+-static void bnx2x_set_master_ln(struct bnx2x *bp)
+-{
+- u32 new_master_ln;
+-
+- /* set the master_ln for AN */
+- MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
+- bnx2x_mdio22_read(bp, MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
+- &new_master_ln);
+- bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
+- (new_master_ln | bp->ser_lane));
+-}
+-
+-static void bnx2x_reset_unicore(struct bnx2x *bp)
+-{
+- u32 mii_control;
+- int i;
+-
+- MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
+- bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
+- /* reset the unicore */
+- bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
+- (mii_control | MDIO_COMBO_IEEO_MII_CONTROL_RESET));
+-
+- /* wait for the reset to self clear */
+- for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
+- udelay(5);
+-
+- /* the reset erased the previous bank value */
+- MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
+- bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
+- &mii_control);
+-
+- if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
+- udelay(5);
+- return;
+- }
+- }
+-
+- BNX2X_ERR("BUG! %s (0x%x) is still in reset!\n",
+- (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
+- bp->phy_addr);
+-}
+-
+-static void bnx2x_set_swap_lanes(struct bnx2x *bp)
+-{
+- /* Each two bits represents a lane number:
+- No swap is 0123 => 0x1b no need to enable the swap */
+-
+- MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
+- if (bp->rx_lane_swap != 0x1b) {
+- bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_RX_LN_SWAP,
+- (bp->rx_lane_swap |
+- MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
+- MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
+- } else {
+- bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
+- }
+-
+- if (bp->tx_lane_swap != 0x1b) {
+- bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TX_LN_SWAP,
+- (bp->tx_lane_swap |
+- MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
+- } else {
+- bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
+- }
+-}
+-
+-static void bnx2x_set_parallel_detection(struct bnx2x *bp)
+-{
+- u32 control2;
+-
+- MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
+- bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
+- &control2);
+-
+- if (bp->autoneg & AUTONEG_PARALLEL) {
+- control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
+- } else {
+- control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
+- }
+- bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
+- control2);
+-
+- if (bp->phy_flags & PHY_XGXS_FLAG) {
+- DP(NETIF_MSG_LINK, "XGXS\n");
+- MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_10G_PARALLEL_DETECT);
+-
+- bnx2x_mdio22_write(bp,
+- MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
+- MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
+-
+- bnx2x_mdio22_read(bp,
+- MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
+- &control2);
+-
+- if (bp->autoneg & AUTONEG_PARALLEL) {
+- control2 |=
+- MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
+- } else {
+- control2 &=
+- ~MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
+- }
+- bnx2x_mdio22_write(bp,
+- MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
+- control2);
+-
+- /* Disable parallel detection of HiG */
+- MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
+- bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
+- MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
+- MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
+- }
+-}
+-
+-static void bnx2x_set_autoneg(struct bnx2x *bp)
+-{
+- u32 reg_val;
+-
+- /* CL37 Autoneg */
+- MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
+- bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, ®_val);
+- if ((bp->req_autoneg & AUTONEG_SPEED) &&
+- (bp->autoneg & AUTONEG_CL37)) {
+- /* CL37 Autoneg Enabled */
+- reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN;
+- } else {
+- /* CL37 Autoneg Disabled */
+- reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
+- MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
+- }
+- bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
+-
+- /* Enable/Disable Autodetection */
+- MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
+- bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, ®_val);
+- reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN;
+-
+- if ((bp->req_autoneg & AUTONEG_SPEED) &&
+- (bp->autoneg & AUTONEG_SGMII_FIBER_AUTODET)) {
+- reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
+- } else {
+- reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
+- }
+- bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
+-
+- /* Enable TetonII and BAM autoneg */
+- MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_BAM_NEXT_PAGE);
+- bnx2x_mdio22_read(bp, MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
+- ®_val);
+- if ((bp->req_autoneg & AUTONEG_SPEED) &&
+- (bp->autoneg & AUTONEG_CL37) && (bp->autoneg & AUTONEG_BAM)) {
+- /* Enable BAM aneg Mode and TetonII aneg Mode */
+- reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
+- MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
+- } else {
+- /* TetonII and BAM Autoneg Disabled */
+- reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
+- MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
+- }
+- bnx2x_mdio22_write(bp, MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
+- reg_val);
+-
+- /* Enable Clause 73 Aneg */
+- if ((bp->req_autoneg & AUTONEG_SPEED) &&
+- (bp->autoneg & AUTONEG_CL73)) {
+- /* Enable BAM Station Manager */
+- MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_USERB0);
+- bnx2x_mdio22_write(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL1,
+- (MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
+- MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN |
+- MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN));
+-
+- /* Merge CL73 and CL37 aneg resolution */
+- bnx2x_mdio22_read(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL3,
+- ®_val);
+- bnx2x_mdio22_write(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL3,
+- (reg_val |
+- MDIO_CL73_USERB0_CL73_BAM_CTRL3_USE_CL73_HCD_MR));
+-
+- /* Set the CL73 AN speed */
+- MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB1);
+- bnx2x_mdio22_read(bp, MDIO_CL73_IEEEB1_AN_ADV2, ®_val);
+- /* In the SerDes we support only the 1G.
+- In the XGXS we support the 10G KX4
+- but we currently do not support the KR */
+- if (bp->phy_flags & PHY_XGXS_FLAG) {
+- DP(NETIF_MSG_LINK, "XGXS\n");
+- /* 10G KX4 */
+- reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
+- } else {
+- DP(NETIF_MSG_LINK, "SerDes\n");
+- /* 1000M KX */
+- reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
+- }
+- bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB1_AN_ADV2, reg_val);
+-
+- /* CL73 Autoneg Enabled */
+- reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
+- } else {
+- /* CL73 Autoneg Disabled */
+- reg_val = 0;
+- }
+- MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
+- bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
+-}
+-
+-/* program SerDes, forced speed */
+-static void bnx2x_program_serdes(struct bnx2x *bp)
+-{
+- u32 reg_val;
+-
+- /* program duplex, disable autoneg */
+- MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
+- bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, ®_val);
+- reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
+- MDIO_COMBO_IEEO_MII_CONTROL_AN_EN);
+- if (bp->req_duplex == DUPLEX_FULL)
+- reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
+- bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
+-
+- /* program speed
+- - needed only if the speed is greater than 1G (2.5G or 10G) */
+- if (bp->req_line_speed > SPEED_1000) {
+- MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
+- bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_MISC1, ®_val);
+- /* clearing the speed value before setting the right speed */
+- reg_val &= ~MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK;
+- reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M |
+- MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
+- if (bp->req_line_speed == SPEED_10000)
+- reg_val |=
+- MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4;
+- bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_MISC1, reg_val);
+- }
+-}
+-
+-static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x *bp)
+-{
+- u32 val = 0;
+-
+- /* configure the 48 bits for BAM AN */
+- MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G);
+-
+- /* set extended capabilities */
+- if (bp->advertising & ADVERTISED_2500baseX_Full)
+- val |= MDIO_OVER_1G_UP1_2_5G;
+- if (bp->advertising & ADVERTISED_10000baseT_Full)
+- val |= MDIO_OVER_1G_UP1_10G;
+- bnx2x_mdio22_write(bp, MDIO_OVER_1G_UP1, val);
+-
+- bnx2x_mdio22_write(bp, MDIO_OVER_1G_UP3, 0);
+-}
+-
+-static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x *bp)
+-{
+- u32 an_adv;
+-
+- /* for AN, we are always publishing full duplex */
+- an_adv = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
+-
+- /* resolve pause mode and advertisement
+- * Please refer to Table 28B-3 of the 802.3ab-1999 spec */
+- if (bp->req_autoneg & AUTONEG_FLOW_CTRL) {
+- switch (bp->req_flow_ctrl) {
+- case FLOW_CTRL_AUTO:
+- if (bp->dev->mtu <= 4500) {
+- an_adv |=
+- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+- bp->advertising |= (ADVERTISED_Pause |
+- ADVERTISED_Asym_Pause);
+- } else {
+- an_adv |=
+- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+- bp->advertising |= ADVERTISED_Asym_Pause;
+- }
+- break;
+-
+- case FLOW_CTRL_TX:
+- an_adv |=
+- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+- bp->advertising |= ADVERTISED_Asym_Pause;
+- break;
+-
+- case FLOW_CTRL_RX:
+- if (bp->dev->mtu <= 4500) {
+- an_adv |=
+- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+- bp->advertising |= (ADVERTISED_Pause |
+- ADVERTISED_Asym_Pause);
+- } else {
+- an_adv |=
+- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
+- bp->advertising &= ~(ADVERTISED_Pause |
+- ADVERTISED_Asym_Pause);
+- }
+- break;
+-
+- case FLOW_CTRL_BOTH:
+- if (bp->dev->mtu <= 4500) {
+- an_adv |=
+- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+- bp->advertising |= (ADVERTISED_Pause |
+- ADVERTISED_Asym_Pause);
+- } else {
+- an_adv |=
+- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+- bp->advertising |= ADVERTISED_Asym_Pause;
+- }
+- break;
+-
+- case FLOW_CTRL_NONE:
+- default:
+- an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
+- bp->advertising &= ~(ADVERTISED_Pause |
+- ADVERTISED_Asym_Pause);
+- break;
+- }
+- } else { /* forced mode */
+- switch (bp->req_flow_ctrl) {
+- case FLOW_CTRL_AUTO:
+- DP(NETIF_MSG_LINK, "req_flow_ctrl 0x%x while"
+- " req_autoneg 0x%x\n",
+- bp->req_flow_ctrl, bp->req_autoneg);
+- break;
+-
+- case FLOW_CTRL_TX:
+- an_adv |=
+- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+- bp->advertising |= ADVERTISED_Asym_Pause;
+- break;
+-
+- case FLOW_CTRL_RX:
+- case FLOW_CTRL_BOTH:
+- an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+- bp->advertising |= (ADVERTISED_Pause |
+- ADVERTISED_Asym_Pause);
+- break;
+-
+- case FLOW_CTRL_NONE:
+- default:
+- an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
+- bp->advertising &= ~(ADVERTISED_Pause |
+- ADVERTISED_Asym_Pause);
+- break;
+- }
+- }
+-
+- MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
+- bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_AUTO_NEG_ADV, an_adv);
+-}
+-
+-static void bnx2x_restart_autoneg(struct bnx2x *bp)
+-{
+- if (bp->autoneg & AUTONEG_CL73) {
+- /* enable and restart clause 73 aneg */
+- u32 an_ctrl;
+-
+- MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
+- bnx2x_mdio22_read(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+- &an_ctrl);
+- bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+- (an_ctrl |
+- MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
+- MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
+-
+- } else {
+- /* Enable and restart BAM/CL37 aneg */
+- u32 mii_control;
+-
+- MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
+- bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
+- &mii_control);
+- bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
+- (mii_control |
+- MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
+- MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
+- }
+-}
+-
+-static void bnx2x_initialize_sgmii_process(struct bnx2x *bp)
+-{
+- u32 control1;
+-
+- /* in SGMII mode, the unicore is always slave */
+- MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
+- bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
+- &control1);
+- control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
+- /* set sgmii mode (and not fiber) */
+- control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
+- MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
+- MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
+- bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
+- control1);
+-
+- /* if forced speed */
+- if (!(bp->req_autoneg & AUTONEG_SPEED)) {
+- /* set speed, disable autoneg */
+- u32 mii_control;
+-
+- MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
+- bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
+- &mii_control);
+- mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
+- MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK |
+- MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
+-
+- switch (bp->req_line_speed) {
+- case SPEED_100:
+- mii_control |=
+- MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100;
+- break;
+- case SPEED_1000:
+- mii_control |=
+- MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000;
+- break;
+- case SPEED_10:
+- /* there is nothing to set for 10M */
+- break;
+- default:
+- /* invalid speed for SGMII */
+- DP(NETIF_MSG_LINK, "Invalid req_line_speed 0x%x\n",
+- bp->req_line_speed);
+- break;
+- }
+-
+- /* setting the full duplex */
+- if (bp->req_duplex == DUPLEX_FULL)
+- mii_control |=
+- MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
+- bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
+- mii_control);
+-
+- } else { /* AN mode */
+- /* enable and restart AN */
+- bnx2x_restart_autoneg(bp);
+- }
+-}
+-
+-static void bnx2x_link_int_enable(struct bnx2x *bp)
+-{
+- int port = bp->port;
+- u32 ext_phy_type;
+- u32 mask;
+-
+- /* setting the status to report on link up
+- for either XGXS or SerDes */
+- bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
+- (NIG_STATUS_XGXS0_LINK10G |
+- NIG_STATUS_XGXS0_LINK_STATUS |
+- NIG_STATUS_SERDES0_LINK_STATUS));
+-
+- if (bp->phy_flags & PHY_XGXS_FLAG) {
+- mask = (NIG_MASK_XGXS0_LINK10G |
+- NIG_MASK_XGXS0_LINK_STATUS);
+- DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n");
+- ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
+- if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
+- (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
+- (ext_phy_type !=
+- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) {
+- mask |= NIG_MASK_MI_INT;
+- DP(NETIF_MSG_LINK, "enabled external phy int\n");
+- }
+-
+- } else { /* SerDes */
+- mask = NIG_MASK_SERDES0_LINK_STATUS;
+- DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n");
+- ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
+- if ((ext_phy_type !=
+- PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) &&
+- (ext_phy_type !=
+- PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN)) {
+- mask |= NIG_MASK_MI_INT;
+- DP(NETIF_MSG_LINK, "enabled external phy int\n");
+- }
+- }
+- bnx2x_bits_en(bp,
+- NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
+- mask);
+- DP(NETIF_MSG_LINK, "port %x, %s, int_status 0x%x,"
+- " int_mask 0x%x, MI_INT %x, SERDES_LINK %x,"
+- " 10G %x, XGXS_LINK %x\n", port,
+- (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
+- REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4),
+- REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
+- REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
+- REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c),
+- REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
+- REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)
+- );
+-}
+-
+-static void bnx2x_bcm8072_external_rom_boot(struct bnx2x *bp)
+-{
+- u32 ext_phy_addr = ((bp->ext_phy_config &
+- PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
+- PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
+- u32 fw_ver1, fw_ver2;
+-
+- /* Need to wait 200ms after reset */
+- msleep(200);
+- /* Boot port from external ROM
+- * Set ser_boot_ctl bit in the MISC_CTRL1 register
+- */
+- bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
+- EXT_PHY_KR_PMA_PMD_DEVAD,
+- EXT_PHY_KR_MISC_CTRL1, 0x0001);
+-
+- /* Reset internal microprocessor */
+- bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
+- EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
+- EXT_PHY_KR_ROM_RESET_INTERNAL_MP);
+- /* set micro reset = 0 */
+- bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
+- EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
+- EXT_PHY_KR_ROM_MICRO_RESET);
+- /* Reset internal microprocessor */
+- bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
+- EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
+- EXT_PHY_KR_ROM_RESET_INTERNAL_MP);
+- /* wait for 100ms for code download via SPI port */
+- msleep(100);
+-
+- /* Clear ser_boot_ctl bit */
+- bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
+- EXT_PHY_KR_PMA_PMD_DEVAD,
+- EXT_PHY_KR_MISC_CTRL1, 0x0000);
+- /* Wait 100ms */
+- msleep(100);
+-
+- /* Print the PHY FW version */
+- bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, ext_phy_addr,
+- EXT_PHY_KR_PMA_PMD_DEVAD,
+- 0xca19, &fw_ver1);
+- bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, ext_phy_addr,
+- EXT_PHY_KR_PMA_PMD_DEVAD,
+- 0xca1a, &fw_ver2);
+- DP(NETIF_MSG_LINK,
+- "8072 FW version 0x%x:0x%x\n", fw_ver1, fw_ver2);
+-}
+-
+-static void bnx2x_bcm8072_force_10G(struct bnx2x *bp)
+-{
+- u32 ext_phy_addr = ((bp->ext_phy_config &
+- PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
+- PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
+-
+- /* Force KR or KX */
+- bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
+- EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_CTRL,
+- 0x2040);
+- bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
+- EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_CTRL2,
+- 0x000b);
+- bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
+- EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_PMD_CTRL,
+- 0x0000);
+- bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
+- EXT_PHY_KR_AUTO_NEG_DEVAD, EXT_PHY_KR_CTRL,
+- 0x0000);
+-}
+-
+-static void bnx2x_ext_phy_init(struct bnx2x *bp)
+-{
+- u32 ext_phy_type;
+- u32 ext_phy_addr;
+- u32 cnt;
+- u32 ctrl;
+- u32 val = 0;
+-
+- if (bp->phy_flags & PHY_XGXS_FLAG) {
+- ext_phy_addr = ((bp->ext_phy_config &
+- PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
+- PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
+-
+- ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
+- /* Make sure that the soft reset is off (expect for the 8072:
+- * due to the lock, it will be done inside the specific
+- * handling)
+- */
+- if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
+- (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
+- (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN) &&
+- (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072)) {
+- /* Wait for soft reset to get cleared upto 1 sec */
+- for (cnt = 0; cnt < 1000; cnt++) {
+- bnx2x_mdio45_read(bp, ext_phy_addr,
+- EXT_PHY_OPT_PMA_PMD_DEVAD,
+- EXT_PHY_OPT_CNTL, &ctrl);
+- if (!(ctrl & (1<<15)))
+- break;
+- msleep(1);
+- }
+- DP(NETIF_MSG_LINK,
+- "control reg 0x%x (after %d ms)\n", ctrl, cnt);
+- }
+-
+- switch (ext_phy_type) {
+- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
+- DP(NETIF_MSG_LINK, "XGXS Direct\n");
+- break;
+-
+- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
+- DP(NETIF_MSG_LINK, "XGXS 8705\n");
+-
+- bnx2x_mdio45_vwrite(bp, ext_phy_addr,
+- EXT_PHY_OPT_PMA_PMD_DEVAD,
+- EXT_PHY_OPT_PMD_MISC_CNTL,
+- 0x8288);
+- bnx2x_mdio45_vwrite(bp, ext_phy_addr,
+- EXT_PHY_OPT_PMA_PMD_DEVAD,
+- EXT_PHY_OPT_PHY_IDENTIFIER,
+- 0x7fbf);
+- bnx2x_mdio45_vwrite(bp, ext_phy_addr,
+- EXT_PHY_OPT_PMA_PMD_DEVAD,
+- EXT_PHY_OPT_CMU_PLL_BYPASS,
+- 0x0100);
+- bnx2x_mdio45_vwrite(bp, ext_phy_addr,
+- EXT_PHY_OPT_WIS_DEVAD,
+- EXT_PHY_OPT_LASI_CNTL, 0x1);
+- break;
+-
+- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
+- DP(NETIF_MSG_LINK, "XGXS 8706\n");
+-
+- if (!(bp->req_autoneg & AUTONEG_SPEED)) {
+- /* Force speed */
+- if (bp->req_line_speed == SPEED_10000) {
+- DP(NETIF_MSG_LINK,
+- "XGXS 8706 force 10Gbps\n");
+- bnx2x_mdio45_vwrite(bp, ext_phy_addr,
+- EXT_PHY_OPT_PMA_PMD_DEVAD,
+- EXT_PHY_OPT_PMD_DIGITAL_CNT,
+- 0x400);
+- } else {
+- /* Force 1Gbps */
+- DP(NETIF_MSG_LINK,
+- "XGXS 8706 force 1Gbps\n");
+-
+- bnx2x_mdio45_vwrite(bp, ext_phy_addr,
+- EXT_PHY_OPT_PMA_PMD_DEVAD,
+- EXT_PHY_OPT_CNTL,
+- 0x0040);
+-
+- bnx2x_mdio45_vwrite(bp, ext_phy_addr,
+- EXT_PHY_OPT_PMA_PMD_DEVAD,
+- EXT_PHY_OPT_CNTL2,
+- 0x000D);
+- }
+-
+- /* Enable LASI */
+- bnx2x_mdio45_vwrite(bp, ext_phy_addr,
+- EXT_PHY_OPT_PMA_PMD_DEVAD,
+- EXT_PHY_OPT_LASI_CNTL,
+- 0x1);
+- } else {
+- /* AUTONEG */
+- /* Allow CL37 through CL73 */
+- DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n");
+- bnx2x_mdio45_vwrite(bp, ext_phy_addr,
+- EXT_PHY_AUTO_NEG_DEVAD,
+- EXT_PHY_OPT_AN_CL37_CL73,
+- 0x040c);
+-
+- /* Enable Full-Duplex advertisment on CL37 */
+- bnx2x_mdio45_vwrite(bp, ext_phy_addr,
+- EXT_PHY_AUTO_NEG_DEVAD,
+- EXT_PHY_OPT_AN_CL37_FD,
+- 0x0020);
+- /* Enable CL37 AN */
+- bnx2x_mdio45_vwrite(bp, ext_phy_addr,
+- EXT_PHY_AUTO_NEG_DEVAD,
+- EXT_PHY_OPT_AN_CL37_AN,
+- 0x1000);
+- /* Advertise 10G/1G support */
+- if (bp->advertising &
+- ADVERTISED_1000baseT_Full)
+- val = (1<<5);
+- if (bp->advertising &
+- ADVERTISED_10000baseT_Full)
+- val |= (1<<7);
+-
+- bnx2x_mdio45_vwrite(bp, ext_phy_addr,
+- EXT_PHY_AUTO_NEG_DEVAD,
+- EXT_PHY_OPT_AN_ADV, val);
+- /* Enable LASI */
+- bnx2x_mdio45_vwrite(bp, ext_phy_addr,
+- EXT_PHY_OPT_PMA_PMD_DEVAD,
+- EXT_PHY_OPT_LASI_CNTL,
+- 0x1);
+-
+- /* Enable clause 73 AN */
+- bnx2x_mdio45_write(bp, ext_phy_addr,
+- EXT_PHY_AUTO_NEG_DEVAD,
+- EXT_PHY_OPT_CNTL,
+- 0x1200);
+- }
+- break;
+-
+- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
+- bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
+- /* Wait for soft reset to get cleared upto 1 sec */
+- for (cnt = 0; cnt < 1000; cnt++) {
+- bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
+- ext_phy_addr,
+- EXT_PHY_OPT_PMA_PMD_DEVAD,
+- EXT_PHY_OPT_CNTL, &ctrl);
+- if (!(ctrl & (1<<15)))
+- break;
+- msleep(1);
+- }
+- DP(NETIF_MSG_LINK,
+- "8072 control reg 0x%x (after %d ms)\n",
+- ctrl, cnt);
+-
+- bnx2x_bcm8072_external_rom_boot(bp);
+- DP(NETIF_MSG_LINK, "Finshed loading 8072 KR ROM\n");
+-
+- /* enable LASI */
+- bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
+- ext_phy_addr,
+- EXT_PHY_KR_PMA_PMD_DEVAD,
+- 0x9000, 0x0400);
+- bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
+- ext_phy_addr,
+- EXT_PHY_KR_PMA_PMD_DEVAD,
+- EXT_PHY_KR_LASI_CNTL, 0x0004);
+-
+- /* If this is forced speed, set to KR or KX
+- * (all other are not supported)
+- */
+- if (!(bp->req_autoneg & AUTONEG_SPEED)) {
+- if (bp->req_line_speed == SPEED_10000) {
+- bnx2x_bcm8072_force_10G(bp);
+- DP(NETIF_MSG_LINK,
+- "Forced speed 10G on 8072\n");
+- /* unlock */
+- bnx2x_hw_unlock(bp,
+- HW_LOCK_RESOURCE_8072_MDIO);
+- break;
+- } else
+- val = (1<<5);
+- } else {
+-
+- /* Advertise 10G/1G support */
+- if (bp->advertising &
+- ADVERTISED_1000baseT_Full)
+- val = (1<<5);
+- if (bp->advertising &
+- ADVERTISED_10000baseT_Full)
+- val |= (1<<7);
+- }
+- bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
+- ext_phy_addr,
+- EXT_PHY_KR_AUTO_NEG_DEVAD,
+- 0x11, val);
+- /* Add support for CL37 ( passive mode ) I */
+- bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
+- ext_phy_addr,
+- EXT_PHY_KR_AUTO_NEG_DEVAD,
+- 0x8370, 0x040c);
+- /* Add support for CL37 ( passive mode ) II */
+- bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
+- ext_phy_addr,
+- EXT_PHY_KR_AUTO_NEG_DEVAD,
+- 0xffe4, 0x20);
+- /* Add support for CL37 ( passive mode ) III */
+- bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
+- ext_phy_addr,
+- EXT_PHY_KR_AUTO_NEG_DEVAD,
+- 0xffe0, 0x1000);
+- /* Restart autoneg */
+- msleep(500);
+- bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
+- ext_phy_addr,
+- EXT_PHY_KR_AUTO_NEG_DEVAD,
+- EXT_PHY_KR_CTRL, 0x1200);
+- DP(NETIF_MSG_LINK, "8072 Autoneg Restart: "
+- "1G %ssupported 10G %ssupported\n",
+- (val & (1<<5)) ? "" : "not ",
+- (val & (1<<7)) ? "" : "not ");
+-
+- /* unlock */
+- bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
+- break;
+-
+- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
+- DP(NETIF_MSG_LINK,
+- "Setting the SFX7101 LASI indication\n");
+- bnx2x_mdio45_vwrite(bp, ext_phy_addr,
+- EXT_PHY_OPT_PMA_PMD_DEVAD,
+- EXT_PHY_OPT_LASI_CNTL, 0x1);
+- DP(NETIF_MSG_LINK,
+- "Setting the SFX7101 LED to blink on traffic\n");
+- bnx2x_mdio45_vwrite(bp, ext_phy_addr,
+- EXT_PHY_OPT_PMA_PMD_DEVAD,
+- 0xC007, (1<<3));
+-
+- /* read modify write pause advertizing */
+- bnx2x_mdio45_read(bp, ext_phy_addr,
+- EXT_PHY_KR_AUTO_NEG_DEVAD,
+- EXT_PHY_KR_AUTO_NEG_ADVERT, &val);
+- val &= ~EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_BOTH;
+- /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
+- if (bp->advertising & ADVERTISED_Pause)
+- val |= EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE;
+-
+- if (bp->advertising & ADVERTISED_Asym_Pause) {
+- val |=
+- EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_ASYMMETRIC;
+- }
+- DP(NETIF_MSG_LINK, "SFX7101 AN advertize 0x%x\n", val);
+- bnx2x_mdio45_vwrite(bp, ext_phy_addr,
+- EXT_PHY_KR_AUTO_NEG_DEVAD,
+- EXT_PHY_KR_AUTO_NEG_ADVERT, val);
+- /* Restart autoneg */
+- bnx2x_mdio45_read(bp, ext_phy_addr,
+- EXT_PHY_KR_AUTO_NEG_DEVAD,
+- EXT_PHY_KR_CTRL, &val);
+- val |= 0x200;
+- bnx2x_mdio45_write(bp, ext_phy_addr,
+- EXT_PHY_KR_AUTO_NEG_DEVAD,
+- EXT_PHY_KR_CTRL, val);
+- break;
+-
+- default:
+- BNX2X_ERR("BAD XGXS ext_phy_config 0x%x\n",
+- bp->ext_phy_config);
+- break;
+- }
+-
+- } else { /* SerDes */
+-/* ext_phy_addr = ((bp->ext_phy_config &
+- PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK) >>
+- PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT);
+-*/
+- ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
+- switch (ext_phy_type) {
+- case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
+- DP(NETIF_MSG_LINK, "SerDes Direct\n");
+- break;
+-
+- case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
+- DP(NETIF_MSG_LINK, "SerDes 5482\n");
+- break;
+-
+- default:
+- DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
+- bp->ext_phy_config);
+- break;
+- }
+- }
+-}
+-
+-static void bnx2x_ext_phy_reset(struct bnx2x *bp)
+-{
+- u32 ext_phy_type;
+- u32 ext_phy_addr = ((bp->ext_phy_config &
+- PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
+- PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
+- u32 board = (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK);
+-
+- /* The PHY reset is controled by GPIO 1
+- * Give it 1ms of reset pulse
+- */
+- if ((board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1002G) &&
+- (board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G)) {
+- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+- MISC_REGISTERS_GPIO_OUTPUT_LOW);
+- msleep(1);
+- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+- MISC_REGISTERS_GPIO_OUTPUT_HIGH);
+- }
+-
+- if (bp->phy_flags & PHY_XGXS_FLAG) {
+- ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
+- switch (ext_phy_type) {
+- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
+- DP(NETIF_MSG_LINK, "XGXS Direct\n");
+- break;
+-
+- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
+- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
+- DP(NETIF_MSG_LINK, "XGXS 8705/8706\n");
+- bnx2x_mdio45_write(bp, ext_phy_addr,
+- EXT_PHY_OPT_PMA_PMD_DEVAD,
+- EXT_PHY_OPT_CNTL, 0xa040);
+- break;
+-
+- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
+- DP(NETIF_MSG_LINK, "XGXS 8072\n");
+- bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
+- bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
+- ext_phy_addr,
+- EXT_PHY_KR_PMA_PMD_DEVAD,
+- 0, 1<<15);
+- bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
+- break;
+-
+- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
+- DP(NETIF_MSG_LINK, "XGXS SFX7101\n");
+- break;
+-
+- default:
+- DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
+- bp->ext_phy_config);
+- break;
+- }
+-
+- } else { /* SerDes */
+- ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
+- switch (ext_phy_type) {
+- case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
+- DP(NETIF_MSG_LINK, "SerDes Direct\n");
+- break;
+-
+- case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
+- DP(NETIF_MSG_LINK, "SerDes 5482\n");
+- break;
+-
+- default:
+- DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
+- bp->ext_phy_config);
+- break;
+- }
+- }
+-}
+-
+-static void bnx2x_link_initialize(struct bnx2x *bp)
+-{
+- int port = bp->port;
+-
+- /* disable attentions */
+- bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
+- (NIG_MASK_XGXS0_LINK_STATUS |
+- NIG_MASK_XGXS0_LINK10G |
+- NIG_MASK_SERDES0_LINK_STATUS |
+- NIG_MASK_MI_INT));
+-
+- /* Activate the external PHY */
+- bnx2x_ext_phy_reset(bp);
+-
+- bnx2x_set_aer_mmd(bp);
+-
+- if (bp->phy_flags & PHY_XGXS_FLAG)
+- bnx2x_set_master_ln(bp);
+-
+- /* reset the SerDes and wait for reset bit return low */
+- bnx2x_reset_unicore(bp);
+-
+- bnx2x_set_aer_mmd(bp);
+-
+- /* setting the masterLn_def again after the reset */
+- if (bp->phy_flags & PHY_XGXS_FLAG) {
+- bnx2x_set_master_ln(bp);
+- bnx2x_set_swap_lanes(bp);
+- }
+-
+- /* Set Parallel Detect */
+- if (bp->req_autoneg & AUTONEG_SPEED)
+- bnx2x_set_parallel_detection(bp);
+-
+- if (bp->phy_flags & PHY_XGXS_FLAG) {
+- if (bp->req_line_speed &&
+- bp->req_line_speed < SPEED_1000) {
+- bp->phy_flags |= PHY_SGMII_FLAG;
+- } else {
+- bp->phy_flags &= ~PHY_SGMII_FLAG;
+- }
+- }
+-
+- if (!(bp->phy_flags & PHY_SGMII_FLAG)) {
+- u16 bank, rx_eq;
+-
+- rx_eq = ((bp->serdes_config &
+- PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK) >>
+- PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT);
+-
+- DP(NETIF_MSG_LINK, "setting rx eq to %d\n", rx_eq);
+- for (bank = MDIO_REG_BANK_RX0; bank <= MDIO_REG_BANK_RX_ALL;
+- bank += (MDIO_REG_BANK_RX1 - MDIO_REG_BANK_RX0)) {
+- MDIO_SET_REG_BANK(bp, bank);
+- bnx2x_mdio22_write(bp, MDIO_RX0_RX_EQ_BOOST,
+- ((rx_eq &
+- MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK) |
+- MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL));
+- }
+-
+- /* forced speed requested? */
+- if (!(bp->req_autoneg & AUTONEG_SPEED)) {
+- DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
+-
+- /* disable autoneg */
+- bnx2x_set_autoneg(bp);
+-
+- /* program speed and duplex */
+- bnx2x_program_serdes(bp);
+-
+- } else { /* AN_mode */
+- DP(NETIF_MSG_LINK, "not SGMII, AN\n");
+-
+- /* AN enabled */
+- bnx2x_set_brcm_cl37_advertisment(bp);
+-
+- /* program duplex & pause advertisement (for aneg) */
+- bnx2x_set_ieee_aneg_advertisment(bp);
+-
+- /* enable autoneg */
+- bnx2x_set_autoneg(bp);
+-
+- /* enable and restart AN */
+- bnx2x_restart_autoneg(bp);
+- }
+-
+- } else { /* SGMII mode */
+- DP(NETIF_MSG_LINK, "SGMII\n");
+-
+- bnx2x_initialize_sgmii_process(bp);
+- }
+-
+- /* init ext phy and enable link state int */
+- bnx2x_ext_phy_init(bp);
+-
+- /* enable the interrupt */
+- bnx2x_link_int_enable(bp);
+-}
+-
+-static void bnx2x_phy_deassert(struct bnx2x *bp)
+-{
+- int port = bp->port;
+- u32 val;
+-
+- if (bp->phy_flags & PHY_XGXS_FLAG) {
+- DP(NETIF_MSG_LINK, "XGXS\n");
+- val = XGXS_RESET_BITS;
+-
+- } else { /* SerDes */
+- DP(NETIF_MSG_LINK, "SerDes\n");
+- val = SERDES_RESET_BITS;
+- }
+-
+- val = val << (port*16);
+-
+- /* reset and unreset the SerDes/XGXS */
+- REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
+- msleep(5);
+- REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
+-}
+-
+-static int bnx2x_phy_init(struct bnx2x *bp)
+-{
+- DP(NETIF_MSG_LINK, "started\n");
+- if (CHIP_REV(bp) == CHIP_REV_FPGA) {
+- bp->phy_flags |= PHY_EMAC_FLAG;
+- bp->link_up = 1;
+- bp->line_speed = SPEED_10000;
+- bp->duplex = DUPLEX_FULL;
+- NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + bp->port*4, 0);
+- bnx2x_emac_enable(bp);
+- bnx2x_link_report(bp);
+- return 0;
+-
+- } else if (CHIP_REV(bp) == CHIP_REV_EMUL) {
+- bp->phy_flags |= PHY_BMAC_FLAG;
+- bp->link_up = 1;
+- bp->line_speed = SPEED_10000;
+- bp->duplex = DUPLEX_FULL;
+- NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + bp->port*4, 0);
+- bnx2x_bmac_enable(bp, 0);
+- bnx2x_link_report(bp);
+- return 0;
+-
+- } else {
+- bnx2x_phy_deassert(bp);
+- bnx2x_link_initialize(bp);
+- }
+-
+- return 0;
+-}
+-
+-static void bnx2x_link_reset(struct bnx2x *bp)
+-{
+- int port = bp->port;
+- u32 board = (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK);
+-
+- /* update shared memory */
+- bp->link_status = 0;
+- bnx2x_update_mng(bp);
+-
+- /* disable attentions */
+- bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
+- (NIG_MASK_XGXS0_LINK_STATUS |
+- NIG_MASK_XGXS0_LINK10G |
+- NIG_MASK_SERDES0_LINK_STATUS |
+- NIG_MASK_MI_INT));
+-
+- /* activate nig drain */
+- NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
+-
+- /* disable nig egress interface */
+- NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0);
+- NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
+-
+- /* Stop BigMac rx */
+- bnx2x_bmac_rx_disable(bp);
+-
+- /* disable emac */
+- NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 0);
+-
+- msleep(10);
+-
+- /* The PHY reset is controled by GPIO 1
+- * Hold it as output low
+- */
+- if ((board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1002G) &&
+- (board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G)) {
+- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+- MISC_REGISTERS_GPIO_OUTPUT_LOW);
+- DP(NETIF_MSG_LINK, "reset external PHY\n");
+- }
+-
+- /* reset the SerDes/XGXS */
+- REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
+- (0x1ff << (port*16)));
+-
+- /* reset BigMac */
+- REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+- (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+-
+- /* disable nig ingress interface */
+- NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0);
+- NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0);
+-
+- /* set link down */
+- bp->link_up = 0;
+-}
+-
+-#ifdef BNX2X_XGXS_LB
+-static void bnx2x_set_xgxs_loopback(struct bnx2x *bp, int is_10g)
+-{
+- int port = bp->port;
+-
+- if (is_10g) {
+- u32 md_devad;
+-
+- DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
+-
+- /* change the uni_phy_addr in the nig */
+- REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18),
+- &md_devad);
+- NIG_WR(NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5);
+-
+- /* change the aer mmd */
+- MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_AER_BLOCK);
+- bnx2x_mdio22_write(bp, MDIO_AER_BLOCK_AER_REG, 0x2800);
+-
+- /* config combo IEEE0 control reg for loopback */
+- MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
+- bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+- 0x6041);
+-
+- /* set aer mmd back */
+- bnx2x_set_aer_mmd(bp);
+-
+- /* and md_devad */
+- NIG_WR(NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, md_devad);
+-
+- } else {
+- u32 mii_control;
+-
+- DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
+-
+- MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
+- bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
+- &mii_control);
+- bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
+- (mii_control |
+- MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK));
+- }
+-}
+-#endif
+-
+-/* end of PHY/MAC */
+-
+-/* slow path */
+-
+-/*
+- * General service functions
+- */
+-
+-/* the slow path queue is odd since completions arrive on the fastpath ring */
+-static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
+- u32 data_hi, u32 data_lo, int common)
+-{
+- int port = bp->port;
+-
+- DP(NETIF_MSG_TIMER,
+- "spe (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
+- (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
+- (void *)bp->spq_prod_bd - (void *)bp->spq), command,
+- HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
+-
+-#ifdef BNX2X_STOP_ON_ERROR
+- if (unlikely(bp->panic))
+- return -EIO;
+-#endif
+-
+- spin_lock(&bp->spq_lock);
+-
+- if (!bp->spq_left) {
+- BNX2X_ERR("BUG! SPQ ring full!\n");
+- spin_unlock(&bp->spq_lock);
+- bnx2x_panic();
+- return -EBUSY;
+- }
+-
+- /* CID needs port number to be encoded int it */
+- bp->spq_prod_bd->hdr.conn_and_cmd_data =
+- cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
+- HW_CID(bp, cid)));
+- bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
+- if (common)
+- bp->spq_prod_bd->hdr.type |=
+- cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
+-
+- bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
+- bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
+-
+- bp->spq_left--;
+-
+- if (bp->spq_prod_bd == bp->spq_last_bd) {
+- bp->spq_prod_bd = bp->spq;
+- bp->spq_prod_idx = 0;
+- DP(NETIF_MSG_TIMER, "end of spq\n");
+-
+- } else {
+- bp->spq_prod_bd++;
+- bp->spq_prod_idx++;
+- }
+-
+- REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(port),
+- bp->spq_prod_idx);
+-
+- spin_unlock(&bp->spq_lock);
+- return 0;
+-}
+-
+-/* acquire split MCP access lock register */
+-static int bnx2x_lock_alr(struct bnx2x *bp)
+-{
+- int rc = 0;
+- u32 i, j, val;
+-
+- might_sleep();
+- i = 100;
+- for (j = 0; j < i*10; j++) {
+- val = (1UL << 31);
+- REG_WR(bp, GRCBASE_MCP + 0x9c, val);
+- val = REG_RD(bp, GRCBASE_MCP + 0x9c);
+- if (val & (1L << 31))
+- break;
+-
+- msleep(5);
+- }
+-
+- if (!(val & (1L << 31))) {
+- BNX2X_ERR("Cannot acquire nvram interface\n");
+-
+- rc = -EBUSY;
+- }
+-
+- return rc;
+-}
+-
+-/* Release split MCP access lock register */
+-static void bnx2x_unlock_alr(struct bnx2x *bp)
+-{
+- u32 val = 0;
+-
+- REG_WR(bp, GRCBASE_MCP + 0x9c, val);
+-}
+-
+-static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
+-{
+- struct host_def_status_block *def_sb = bp->def_status_blk;
+- u16 rc = 0;
+-
+- barrier(); /* status block is written to by the chip */
+-
+- if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
+- bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
+- rc |= 1;
+- }
+- if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
+- bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
+- rc |= 2;
+- }
+- if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
+- bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
+- rc |= 4;
+- }
+- if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
+- bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
+- rc |= 8;
+- }
+- if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
+- bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
+- rc |= 16;
+- }
+- return rc;
+-}
+-
+-/*
+- * slow path service functions
+- */
+-
+-static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
+-{
+- int port = bp->port;
+- u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_PORT_BASE * port) * 8;
+- u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+- MISC_REG_AEU_MASK_ATTN_FUNC_0;
+- u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
+- NIG_REG_MASK_INTERRUPT_PORT0;
+-
+- if (~bp->aeu_mask & (asserted & 0xff))
+- BNX2X_ERR("IGU ERROR\n");
+- if (bp->attn_state & asserted)
+- BNX2X_ERR("IGU ERROR\n");
+-
+- DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
+- bp->aeu_mask, asserted);
+- bp->aeu_mask &= ~(asserted & 0xff);
+- DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask);
+-
+- REG_WR(bp, aeu_addr, bp->aeu_mask);
+-
+- bp->attn_state |= asserted;
+-
+- if (asserted & ATTN_HARD_WIRED_MASK) {
+- if (asserted & ATTN_NIG_FOR_FUNC) {
+-
+- /* save nig interrupt mask */
+- bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
+- REG_WR(bp, nig_int_mask_addr, 0);
+-
+- bnx2x_link_update(bp);
+-
+- /* handle unicore attn? */
+- }
+- if (asserted & ATTN_SW_TIMER_4_FUNC)
+- DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
+-
+- if (asserted & GPIO_2_FUNC)
+- DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
+-
+- if (asserted & GPIO_3_FUNC)
+- DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
+-
+- if (asserted & GPIO_4_FUNC)
+- DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
+-
+- if (port == 0) {
+- if (asserted & ATTN_GENERAL_ATTN_1) {
+- DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
+- REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
+- }
+- if (asserted & ATTN_GENERAL_ATTN_2) {
+- DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
+- REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
+- }
+- if (asserted & ATTN_GENERAL_ATTN_3) {
+- DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
+- REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
+- }
+- } else {
+- if (asserted & ATTN_GENERAL_ATTN_4) {
+- DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
+- REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
+- }
+- if (asserted & ATTN_GENERAL_ATTN_5) {
+- DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
+- REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
+- }
+- if (asserted & ATTN_GENERAL_ATTN_6) {
+- DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
+- REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
+- }
+- }
+-
+- } /* if hardwired */
+-
+- DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n",
+- asserted, BAR_IGU_INTMEM + igu_addr);
+- REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted);
+-
+- /* now set back the mask */
+- if (asserted & ATTN_NIG_FOR_FUNC)
+- REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
+-}
+-
+-static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
+-{
+- int port = bp->port;
+- int reg_offset;
+- u32 val;
+-
+- if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
+-
+- reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
+- MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
+-
+- val = REG_RD(bp, reg_offset);
+- val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
+- REG_WR(bp, reg_offset, val);
+-
+- BNX2X_ERR("SPIO5 hw attention\n");
+-
+- switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
+- case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
+- /* Fan failure attention */
+-
+- /* The PHY reset is controled by GPIO 1 */
+- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+- MISC_REGISTERS_GPIO_OUTPUT_LOW);
+- /* Low power mode is controled by GPIO 2 */
+- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+- MISC_REGISTERS_GPIO_OUTPUT_LOW);
+- /* mark the failure */
+- bp->ext_phy_config &=
+- ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
+- bp->ext_phy_config |=
+- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
+- SHMEM_WR(bp,
+- dev_info.port_hw_config[port].
+- external_phy_config,
+- bp->ext_phy_config);
+- /* log the failure */
+- printk(KERN_ERR PFX "Fan Failure on Network"
+- " Controller %s has caused the driver to"
+- " shutdown the card to prevent permanent"
+- " damage. Please contact Dell Support for"
+- " assistance\n", bp->dev->name);
+- break;
+-
+- default:
+- break;
+- }
+- }
+-}
+-
+-static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
+-{
+- u32 val;
+-
+- if (attn & BNX2X_DOORQ_ASSERT) {
+-
+- val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
+- BNX2X_ERR("DB hw attention 0x%x\n", val);
+- /* DORQ discard attention */
+- if (val & 0x2)
+- BNX2X_ERR("FATAL error from DORQ\n");
+- }
+-}
+-
+-static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
+-{
+- u32 val;
+-
+- if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
+-
+- val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
+- BNX2X_ERR("CFC hw attention 0x%x\n", val);
+- /* CFC error attention */
+- if (val & 0x2)
+- BNX2X_ERR("FATAL error from CFC\n");
+- }
+-
+- if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
+-
+- val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
+- BNX2X_ERR("PXP hw attention 0x%x\n", val);
+- /* RQ_USDMDP_FIFO_OVERFLOW */
+- if (val & 0x18000)
+- BNX2X_ERR("FATAL error from PXP\n");
+- }
+-}
+-
+-static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
+-{
+- if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
+-
+- if (attn & BNX2X_MC_ASSERT_BITS) {
+-
+- BNX2X_ERR("MC assert!\n");
+- REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
+- REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
+- REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
+- REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
+- bnx2x_panic();
+-
+- } else if (attn & BNX2X_MCP_ASSERT) {
+-
+- BNX2X_ERR("MCP assert!\n");
+- REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
+- bnx2x_mc_assert(bp);
+-
+- } else
+- BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
+- }
+-
+- if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
+-
+- REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
+- BNX2X_ERR("LATCHED attention 0x%x (masked)\n", attn);
+- }
+-}
+-
+-static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
+-{
+- struct attn_route attn;
+- struct attn_route group_mask;
+- int port = bp->port;
+- int index;
+- u32 reg_addr;
+- u32 val;
+-
+- /* need to take HW lock because MCP or other port might also
+- try to handle this event */
+- bnx2x_lock_alr(bp);
+-
+- attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
+- attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
+- attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
+- attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
+- DP(NETIF_MSG_HW, "attn %llx\n", (unsigned long long)attn.sig[0]);
+-
+- for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
+- if (deasserted & (1 << index)) {
+- group_mask = bp->attn_group[index];
+-
+- DP(NETIF_MSG_HW, "group[%d]: %llx\n", index,
+- (unsigned long long)group_mask.sig[0]);
+-
+- bnx2x_attn_int_deasserted3(bp,
+- attn.sig[3] & group_mask.sig[3]);
+- bnx2x_attn_int_deasserted1(bp,
+- attn.sig[1] & group_mask.sig[1]);
+- bnx2x_attn_int_deasserted2(bp,
+- attn.sig[2] & group_mask.sig[2]);
+- bnx2x_attn_int_deasserted0(bp,
+- attn.sig[0] & group_mask.sig[0]);
+-
+- if ((attn.sig[0] & group_mask.sig[0] &
+- HW_INTERRUT_ASSERT_SET_0) ||
+- (attn.sig[1] & group_mask.sig[1] &
+- HW_INTERRUT_ASSERT_SET_1) ||
+- (attn.sig[2] & group_mask.sig[2] &
+- HW_INTERRUT_ASSERT_SET_2))
+- BNX2X_ERR("FATAL HW block attention"
+- " set0 0x%x set1 0x%x"
+- " set2 0x%x\n",
+- (attn.sig[0] & group_mask.sig[0] &
+- HW_INTERRUT_ASSERT_SET_0),
+- (attn.sig[1] & group_mask.sig[1] &
+- HW_INTERRUT_ASSERT_SET_1),
+- (attn.sig[2] & group_mask.sig[2] &
+- HW_INTERRUT_ASSERT_SET_2));
+-
+- if ((attn.sig[0] & group_mask.sig[0] &
+- HW_PRTY_ASSERT_SET_0) ||
+- (attn.sig[1] & group_mask.sig[1] &
+- HW_PRTY_ASSERT_SET_1) ||
+- (attn.sig[2] & group_mask.sig[2] &
+- HW_PRTY_ASSERT_SET_2))
+- BNX2X_ERR("FATAL HW block parity attention\n");
+- }
+- }
+-
+- bnx2x_unlock_alr(bp);
+-
+- reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_PORT_BASE * port) * 8;
+-
+- val = ~deasserted;
+-/* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
+- val, BAR_IGU_INTMEM + reg_addr); */
+- REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val);
+-
+- if (bp->aeu_mask & (deasserted & 0xff))
+- BNX2X_ERR("IGU BUG\n");
+- if (~bp->attn_state & deasserted)
+- BNX2X_ERR("IGU BUG\n");
+-
+- reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+- MISC_REG_AEU_MASK_ATTN_FUNC_0;
+-
+- DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask);
+- bp->aeu_mask |= (deasserted & 0xff);
+-
+- DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask);
+- REG_WR(bp, reg_addr, bp->aeu_mask);
+-
+- DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
+- bp->attn_state &= ~deasserted;
+- DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
+-}
+-
+-static void bnx2x_attn_int(struct bnx2x *bp)
+-{
+- /* read local copy of bits */
+- u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
+- u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
+- u32 attn_state = bp->attn_state;
+-
+- /* look for changed bits */
+- u32 asserted = attn_bits & ~attn_ack & ~attn_state;
+- u32 deasserted = ~attn_bits & attn_ack & attn_state;
+-
+- DP(NETIF_MSG_HW,
+- "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
+- attn_bits, attn_ack, asserted, deasserted);
+-
+- if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
+- BNX2X_ERR("bad attention state\n");
+-
+- /* handle bits that were raised */
+- if (asserted)
+- bnx2x_attn_int_asserted(bp, asserted);
+-
+- if (deasserted)
+- bnx2x_attn_int_deasserted(bp, deasserted);
+-}
+-
+-static void bnx2x_sp_task(struct work_struct *work)
+-{
+- struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
+- u16 status;
+-
+- /* Return here if interrupt is disabled */
+- if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
+- DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
+- return;
+- }
+-
+- status = bnx2x_update_dsb_idx(bp);
+- if (status == 0)
+- BNX2X_ERR("spurious slowpath interrupt!\n");
+-
+- DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
+-
+- /* HW attentions */
+- if (status & 0x1)
+- bnx2x_attn_int(bp);
+-
+- /* CStorm events: query_stats, port delete ramrod */
+- if (status & 0x2)
+- bp->stat_pending = 0;
+-
+- bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
+- IGU_INT_NOP, 1);
+- bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
+- IGU_INT_NOP, 1);
+- bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
+- IGU_INT_NOP, 1);
+- bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
+- IGU_INT_NOP, 1);
+- bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
+- IGU_INT_ENABLE, 1);
+-
+-}
+-
+-static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
+-{
+- struct net_device *dev = dev_instance;
+- struct bnx2x *bp = netdev_priv(dev);
+-
+- /* Return here if interrupt is disabled */
+- if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
+- DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
+- return IRQ_HANDLED;
+- }
+-
+- bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
+-
+-#ifdef BNX2X_STOP_ON_ERROR
+- if (unlikely(bp->panic))
+- return IRQ_HANDLED;
+-#endif
+-
+- schedule_work(&bp->sp_task);
+-
+- return IRQ_HANDLED;
+-}
+-
+-/* end of slow path */
+-
+-/* Statistics */
+-
+-/****************************************************************************
+-* Macros
+-****************************************************************************/
+-
+-#define UPDATE_STAT(s, t) \
+- do { \
+- estats->t += new->s - old->s; \
+- old->s = new->s; \
+- } while (0)
+-
+-/* sum[hi:lo] += add[hi:lo] */
+-#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
+- do { \
+- s_lo += a_lo; \
+- s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
+- } while (0)
+-
+-/* difference = minuend - subtrahend */
+-#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
+- do { \
+- if (m_lo < s_lo) { /* underflow */ \
+- d_hi = m_hi - s_hi; \
+- if (d_hi > 0) { /* we can 'loan' 1 */ \
+- d_hi--; \
+- d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
+- } else { /* m_hi <= s_hi */ \
+- d_hi = 0; \
+- d_lo = 0; \
+- } \
+- } else { /* m_lo >= s_lo */ \
+- if (m_hi < s_hi) { \
+- d_hi = 0; \
+- d_lo = 0; \
+- } else { /* m_hi >= s_hi */ \
+- d_hi = m_hi - s_hi; \
+- d_lo = m_lo - s_lo; \
+- } \
+- } \
+- } while (0)
+-
+-/* minuend -= subtrahend */
+-#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
+- do { \
+- DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
+- } while (0)
+-
+-#define UPDATE_STAT64(s_hi, t_hi, s_lo, t_lo) \
+- do { \
+- DIFF_64(diff.hi, new->s_hi, old->s_hi, \
+- diff.lo, new->s_lo, old->s_lo); \
+- old->s_hi = new->s_hi; \
+- old->s_lo = new->s_lo; \
+- ADD_64(estats->t_hi, diff.hi, \
+- estats->t_lo, diff.lo); \
+- } while (0)
+-
+-/* sum[hi:lo] += add */
+-#define ADD_EXTEND_64(s_hi, s_lo, a) \
+- do { \
+- s_lo += a; \
+- s_hi += (s_lo < a) ? 1 : 0; \
+- } while (0)
+-
+-#define UPDATE_EXTEND_STAT(s, t_hi, t_lo) \
+- do { \
+- ADD_EXTEND_64(estats->t_hi, estats->t_lo, new->s); \
+- } while (0)
+-
+-#define UPDATE_EXTEND_TSTAT(s, t_hi, t_lo) \
+- do { \
+- diff = le32_to_cpu(tclient->s) - old_tclient->s; \
+- old_tclient->s = le32_to_cpu(tclient->s); \
+- ADD_EXTEND_64(estats->t_hi, estats->t_lo, diff); \
+- } while (0)
+-
+-/*
+- * General service functions
+- */
+-
+-static inline long bnx2x_hilo(u32 *hiref)
+-{
+- u32 lo = *(hiref + 1);
+-#if (BITS_PER_LONG == 64)
+- u32 hi = *hiref;
+-
+- return HILO_U64(hi, lo);
+-#else
+- return lo;
+-#endif
+-}
+-
+-/*
+- * Init service functions
+- */
+-
+-static void bnx2x_init_mac_stats(struct bnx2x *bp)
+-{
+- struct dmae_command *dmae;
+- int port = bp->port;
+- int loader_idx = port * 8;
+- u32 opcode;
+- u32 mac_addr;
+-
+- bp->executer_idx = 0;
+- if (bp->fw_mb) {
+- /* MCP */
+- opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
+- DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
+-#ifdef __BIG_ENDIAN
+- DMAE_CMD_ENDIANITY_B_DW_SWAP |
+-#else
+- DMAE_CMD_ENDIANITY_DW_SWAP |
+-#endif
+- (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
+-
+- if (bp->link_up)
+- opcode |= (DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE);
+-
+- dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+- dmae->opcode = opcode;
+- dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, eth_stats) +
+- sizeof(u32));
+- dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, eth_stats) +
+- sizeof(u32));
+- dmae->dst_addr_lo = bp->fw_mb >> 2;
+- dmae->dst_addr_hi = 0;
+- dmae->len = (offsetof(struct bnx2x_eth_stats, mac_stx_end) -
+- sizeof(u32)) >> 2;
+- if (bp->link_up) {
+- dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+- dmae->comp_addr_hi = 0;
+- dmae->comp_val = 1;
+- } else {
+- dmae->comp_addr_lo = 0;
+- dmae->comp_addr_hi = 0;
+- dmae->comp_val = 0;
+- }
+- }
+-
+- if (!bp->link_up) {
+- /* no need to collect statistics in link down */
+- return;
+- }
+-
+- opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
+- DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
+- DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
+-#ifdef __BIG_ENDIAN
+- DMAE_CMD_ENDIANITY_B_DW_SWAP |
+-#else
+- DMAE_CMD_ENDIANITY_DW_SWAP |
+-#endif
+- (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
+-
+- if (bp->phy_flags & PHY_BMAC_FLAG) {
+-
+- mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
+- NIG_REG_INGRESS_BMAC0_MEM);
+-
+- /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
+- BIGMAC_REGISTER_TX_STAT_GTBYT */
+- dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+- dmae->opcode = opcode;
+- dmae->src_addr_lo = (mac_addr +
+- BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
+- dmae->src_addr_hi = 0;
+- dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
+- dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
+- dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
+- BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
+- dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+- dmae->comp_addr_hi = 0;
+- dmae->comp_val = 1;
+-
+- /* BIGMAC_REGISTER_RX_STAT_GR64 ..
+- BIGMAC_REGISTER_RX_STAT_GRIPJ */
+- dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+- dmae->opcode = opcode;
+- dmae->src_addr_lo = (mac_addr +
+- BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
+- dmae->src_addr_hi = 0;
+- dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
+- offsetof(struct bmac_stats, rx_gr64));
+- dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
+- offsetof(struct bmac_stats, rx_gr64));
+- dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
+- BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
+- dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+- dmae->comp_addr_hi = 0;
+- dmae->comp_val = 1;
+-
+- } else if (bp->phy_flags & PHY_EMAC_FLAG) {
+-
+- mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
+-
+- /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
+- dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+- dmae->opcode = opcode;
+- dmae->src_addr_lo = (mac_addr +
+- EMAC_REG_EMAC_RX_STAT_AC) >> 2;
+- dmae->src_addr_hi = 0;
+- dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
+- dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
+- dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
+- dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+- dmae->comp_addr_hi = 0;
+- dmae->comp_val = 1;
+-
+- /* EMAC_REG_EMAC_RX_STAT_AC_28 */
+- dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+- dmae->opcode = opcode;
+- dmae->src_addr_lo = (mac_addr +
+- EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
+- dmae->src_addr_hi = 0;
+- dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
+- offsetof(struct emac_stats,
+- rx_falsecarriererrors));
+- dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
+- offsetof(struct emac_stats,
+- rx_falsecarriererrors));
+- dmae->len = 1;
+- dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+- dmae->comp_addr_hi = 0;
+- dmae->comp_val = 1;
+-
+- /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
+- dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+- dmae->opcode = opcode;
+- dmae->src_addr_lo = (mac_addr +
+- EMAC_REG_EMAC_TX_STAT_AC) >> 2;
+- dmae->src_addr_hi = 0;
+- dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
+- offsetof(struct emac_stats,
+- tx_ifhcoutoctets));
+- dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
+- offsetof(struct emac_stats,
+- tx_ifhcoutoctets));
+- dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
+- dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+- dmae->comp_addr_hi = 0;
+- dmae->comp_val = 1;
+- }
+-
+- /* NIG */
+- dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+- dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
+- DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
+- DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
+-#ifdef __BIG_ENDIAN
+- DMAE_CMD_ENDIANITY_B_DW_SWAP |
+-#else
+- DMAE_CMD_ENDIANITY_DW_SWAP |
+-#endif
+- (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
+- dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
+- NIG_REG_STAT0_BRB_DISCARD) >> 2;
+- dmae->src_addr_hi = 0;
+- dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig));
+- dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig));
+- dmae->len = (sizeof(struct nig_stats) - 2*sizeof(u32)) >> 2;
+- dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig) +
+- offsetof(struct nig_stats, done));
+- dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig) +
+- offsetof(struct nig_stats, done));
+- dmae->comp_val = 0xffffffff;
+-}
+-
+-static void bnx2x_init_stats(struct bnx2x *bp)
+-{
+- int port = bp->port;
+-
+- bp->stats_state = STATS_STATE_DISABLE;
+- bp->executer_idx = 0;
+-
+- bp->old_brb_discard = REG_RD(bp,
+- NIG_REG_STAT0_BRB_DISCARD + port*0x38);
+-
+- memset(&bp->old_bmac, 0, sizeof(struct bmac_stats));
+- memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
+- memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
+-
+- REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port), 1);
+- REG_WR(bp, BAR_XSTRORM_INTMEM +
+- XSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
+-
+- REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port), 1);
+- REG_WR(bp, BAR_TSTRORM_INTMEM +
+- TSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
+-
+- REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port), 0);
+- REG_WR(bp, BAR_CSTRORM_INTMEM +
+- CSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
+-
+- REG_WR(bp, BAR_XSTRORM_INTMEM +
+- XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
+- U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
+- REG_WR(bp, BAR_XSTRORM_INTMEM +
+- XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
+- U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
+-
+- REG_WR(bp, BAR_TSTRORM_INTMEM +
+- TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
+- U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
+- REG_WR(bp, BAR_TSTRORM_INTMEM +
+- TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
+- U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
+-}
+-
+-static void bnx2x_stop_stats(struct bnx2x *bp)
+-{
+- might_sleep();
+- if (bp->stats_state != STATS_STATE_DISABLE) {
+- int timeout = 10;
+-
+- bp->stats_state = STATS_STATE_STOP;
+- DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
+-
+- while (bp->stats_state != STATS_STATE_DISABLE) {
+- if (!timeout) {
+- BNX2X_ERR("timeout waiting for stats stop\n");
+- break;
+- }
+- timeout--;
+- msleep(100);
+- }
+- }
+- DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
+-}
+-
+-/*
+- * Statistics service functions
+- */
+-
+-static void bnx2x_update_bmac_stats(struct bnx2x *bp)
+-{
+- struct regp diff;
+- struct regp sum;
+- struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac);
+- struct bmac_stats *old = &bp->old_bmac;
+- struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
+-
+- sum.hi = 0;
+- sum.lo = 0;
+-
+- UPDATE_STAT64(tx_gtbyt.hi, total_bytes_transmitted_hi,
+- tx_gtbyt.lo, total_bytes_transmitted_lo);
+-
+- UPDATE_STAT64(tx_gtmca.hi, total_multicast_packets_transmitted_hi,
+- tx_gtmca.lo, total_multicast_packets_transmitted_lo);
+- ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
+-
+- UPDATE_STAT64(tx_gtgca.hi, total_broadcast_packets_transmitted_hi,
+- tx_gtgca.lo, total_broadcast_packets_transmitted_lo);
+- ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
+-
+- UPDATE_STAT64(tx_gtpkt.hi, total_unicast_packets_transmitted_hi,
+- tx_gtpkt.lo, total_unicast_packets_transmitted_lo);
+- SUB_64(estats->total_unicast_packets_transmitted_hi, sum.hi,
+- estats->total_unicast_packets_transmitted_lo, sum.lo);
+-
+- UPDATE_STAT(tx_gtxpf.lo, pause_xoff_frames_transmitted);
+- UPDATE_STAT(tx_gt64.lo, frames_transmitted_64_bytes);
+- UPDATE_STAT(tx_gt127.lo, frames_transmitted_65_127_bytes);
+- UPDATE_STAT(tx_gt255.lo, frames_transmitted_128_255_bytes);
+- UPDATE_STAT(tx_gt511.lo, frames_transmitted_256_511_bytes);
+- UPDATE_STAT(tx_gt1023.lo, frames_transmitted_512_1023_bytes);
+- UPDATE_STAT(tx_gt1518.lo, frames_transmitted_1024_1522_bytes);
+- UPDATE_STAT(tx_gt2047.lo, frames_transmitted_1523_9022_bytes);
+- UPDATE_STAT(tx_gt4095.lo, frames_transmitted_1523_9022_bytes);
+- UPDATE_STAT(tx_gt9216.lo, frames_transmitted_1523_9022_bytes);
+- UPDATE_STAT(tx_gt16383.lo, frames_transmitted_1523_9022_bytes);
+-
+- UPDATE_STAT(rx_grfcs.lo, crc_receive_errors);
+- UPDATE_STAT(rx_grund.lo, runt_packets_received);
+- UPDATE_STAT(rx_grovr.lo, stat_Dot3statsFramesTooLong);
+- UPDATE_STAT(rx_grxpf.lo, pause_xoff_frames_received);
+- UPDATE_STAT(rx_grxcf.lo, control_frames_received);
+- /* UPDATE_STAT(rx_grxpf.lo, control_frames_received); */
+- UPDATE_STAT(rx_grfrg.lo, error_runt_packets_received);
+- UPDATE_STAT(rx_grjbr.lo, error_jabber_packets_received);
+-
+- UPDATE_STAT64(rx_grerb.hi, stat_IfHCInBadOctets_hi,
+- rx_grerb.lo, stat_IfHCInBadOctets_lo);
+- UPDATE_STAT64(tx_gtufl.hi, stat_IfHCOutBadOctets_hi,
+- tx_gtufl.lo, stat_IfHCOutBadOctets_lo);
+- UPDATE_STAT(tx_gterr.lo, stat_Dot3statsInternalMacTransmitErrors);
+- /* UPDATE_STAT(rx_grxpf.lo, stat_XoffStateEntered); */
+- estats->stat_XoffStateEntered = estats->pause_xoff_frames_received;
+-}
+-
+-static void bnx2x_update_emac_stats(struct bnx2x *bp)
+-{
+- struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac);
+- struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
+-
+- UPDATE_EXTEND_STAT(tx_ifhcoutoctets, total_bytes_transmitted_hi,
+- total_bytes_transmitted_lo);
+- UPDATE_EXTEND_STAT(tx_ifhcoutucastpkts,
+- total_unicast_packets_transmitted_hi,
+- total_unicast_packets_transmitted_lo);
+- UPDATE_EXTEND_STAT(tx_ifhcoutmulticastpkts,
+- total_multicast_packets_transmitted_hi,
+- total_multicast_packets_transmitted_lo);
+- UPDATE_EXTEND_STAT(tx_ifhcoutbroadcastpkts,
+- total_broadcast_packets_transmitted_hi,
+- total_broadcast_packets_transmitted_lo);
+-
+- estats->pause_xon_frames_transmitted += new->tx_outxonsent;
+- estats->pause_xoff_frames_transmitted += new->tx_outxoffsent;
+- estats->single_collision_transmit_frames +=
+- new->tx_dot3statssinglecollisionframes;
+- estats->multiple_collision_transmit_frames +=
+- new->tx_dot3statsmultiplecollisionframes;
+- estats->late_collision_frames += new->tx_dot3statslatecollisions;
+- estats->excessive_collision_frames +=
+- new->tx_dot3statsexcessivecollisions;
+- estats->frames_transmitted_64_bytes += new->tx_etherstatspkts64octets;
+- estats->frames_transmitted_65_127_bytes +=
+- new->tx_etherstatspkts65octetsto127octets;
+- estats->frames_transmitted_128_255_bytes +=
+- new->tx_etherstatspkts128octetsto255octets;
+- estats->frames_transmitted_256_511_bytes +=
+- new->tx_etherstatspkts256octetsto511octets;
+- estats->frames_transmitted_512_1023_bytes +=
+- new->tx_etherstatspkts512octetsto1023octets;
+- estats->frames_transmitted_1024_1522_bytes +=
+- new->tx_etherstatspkts1024octetsto1522octet;
+- estats->frames_transmitted_1523_9022_bytes +=
+- new->tx_etherstatspktsover1522octets;
+-
+- estats->crc_receive_errors += new->rx_dot3statsfcserrors;
+- estats->alignment_errors += new->rx_dot3statsalignmenterrors;
+- estats->false_carrier_detections += new->rx_falsecarriererrors;
+- estats->runt_packets_received += new->rx_etherstatsundersizepkts;
+- estats->stat_Dot3statsFramesTooLong += new->rx_dot3statsframestoolong;
+- estats->pause_xon_frames_received += new->rx_xonpauseframesreceived;
+- estats->pause_xoff_frames_received += new->rx_xoffpauseframesreceived;
+- estats->control_frames_received += new->rx_maccontrolframesreceived;
+- estats->error_runt_packets_received += new->rx_etherstatsfragments;
+- estats->error_jabber_packets_received += new->rx_etherstatsjabbers;
+-
+- UPDATE_EXTEND_STAT(rx_ifhcinbadoctets, stat_IfHCInBadOctets_hi,
+- stat_IfHCInBadOctets_lo);
+- UPDATE_EXTEND_STAT(tx_ifhcoutbadoctets, stat_IfHCOutBadOctets_hi,
+- stat_IfHCOutBadOctets_lo);
+- estats->stat_Dot3statsInternalMacTransmitErrors +=
+- new->tx_dot3statsinternalmactransmiterrors;
+- estats->stat_Dot3StatsCarrierSenseErrors +=
+- new->rx_dot3statscarriersenseerrors;
+- estats->stat_Dot3StatsDeferredTransmissions +=
+- new->tx_dot3statsdeferredtransmissions;
+- estats->stat_FlowControlDone += new->tx_flowcontroldone;
+- estats->stat_XoffStateEntered += new->rx_xoffstateentered;
+-}
+-
+-static int bnx2x_update_storm_stats(struct bnx2x *bp)
+-{
+- struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
+- struct tstorm_common_stats *tstats = &stats->tstorm_common;
+- struct tstorm_per_client_stats *tclient =
+- &tstats->client_statistics[0];
+- struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
+- struct xstorm_common_stats *xstats = &stats->xstorm_common;
+- struct nig_stats *nstats = bnx2x_sp(bp, nig);
+- struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
+- u32 diff;
+-
+- /* are DMAE stats valid? */
+- if (nstats->done != 0xffffffff) {
+- DP(BNX2X_MSG_STATS, "stats not updated by dmae\n");
+- return -1;
+- }
+-
+- /* are storm stats valid? */
+- if (tstats->done.hi != 0xffffffff) {
+- DP(BNX2X_MSG_STATS, "stats not updated by tstorm\n");
+- return -2;
+- }
+- if (xstats->done.hi != 0xffffffff) {
+- DP(BNX2X_MSG_STATS, "stats not updated by xstorm\n");
+- return -3;
+- }
+-
+- estats->total_bytes_received_hi =
+- estats->valid_bytes_received_hi =
+- le32_to_cpu(tclient->total_rcv_bytes.hi);
+- estats->total_bytes_received_lo =
+- estats->valid_bytes_received_lo =
+- le32_to_cpu(tclient->total_rcv_bytes.lo);
+- ADD_64(estats->total_bytes_received_hi,
+- le32_to_cpu(tclient->rcv_error_bytes.hi),
+- estats->total_bytes_received_lo,
+- le32_to_cpu(tclient->rcv_error_bytes.lo));
+-
+- UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
+- total_unicast_packets_received_hi,
+- total_unicast_packets_received_lo);
+- UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
+- total_multicast_packets_received_hi,
+- total_multicast_packets_received_lo);
+- UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
+- total_broadcast_packets_received_hi,
+- total_broadcast_packets_received_lo);
+-
+- estats->frames_received_64_bytes = MAC_STX_NA;
+- estats->frames_received_65_127_bytes = MAC_STX_NA;
+- estats->frames_received_128_255_bytes = MAC_STX_NA;
+- estats->frames_received_256_511_bytes = MAC_STX_NA;
+- estats->frames_received_512_1023_bytes = MAC_STX_NA;
+- estats->frames_received_1024_1522_bytes = MAC_STX_NA;
+- estats->frames_received_1523_9022_bytes = MAC_STX_NA;
+-
+- estats->x_total_sent_bytes_hi =
+- le32_to_cpu(xstats->total_sent_bytes.hi);
+- estats->x_total_sent_bytes_lo =
+- le32_to_cpu(xstats->total_sent_bytes.lo);
+- estats->x_total_sent_pkts = le32_to_cpu(xstats->total_sent_pkts);
+-
+- estats->t_rcv_unicast_bytes_hi =
+- le32_to_cpu(tclient->rcv_unicast_bytes.hi);
+- estats->t_rcv_unicast_bytes_lo =
+- le32_to_cpu(tclient->rcv_unicast_bytes.lo);
+- estats->t_rcv_broadcast_bytes_hi =
+- le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
+- estats->t_rcv_broadcast_bytes_lo =
+- le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
+- estats->t_rcv_multicast_bytes_hi =
+- le32_to_cpu(tclient->rcv_multicast_bytes.hi);
+- estats->t_rcv_multicast_bytes_lo =
+- le32_to_cpu(tclient->rcv_multicast_bytes.lo);
+- estats->t_total_rcv_pkt = le32_to_cpu(tclient->total_rcv_pkts);
+-
+- estats->checksum_discard = le32_to_cpu(tclient->checksum_discard);
+- estats->packets_too_big_discard =
+- le32_to_cpu(tclient->packets_too_big_discard);
+- estats->jabber_packets_received = estats->packets_too_big_discard +
+- estats->stat_Dot3statsFramesTooLong;
+- estats->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
+- estats->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
+- estats->mac_discard = le32_to_cpu(tclient->mac_discard);
+- estats->mac_filter_discard = le32_to_cpu(tstats->mac_filter_discard);
+- estats->xxoverflow_discard = le32_to_cpu(tstats->xxoverflow_discard);
+- estats->brb_truncate_discard =
+- le32_to_cpu(tstats->brb_truncate_discard);
+-
+- estats->brb_discard += nstats->brb_discard - bp->old_brb_discard;
+- bp->old_brb_discard = nstats->brb_discard;
+-
+- estats->brb_packet = nstats->brb_packet;
+- estats->brb_truncate = nstats->brb_truncate;
+- estats->flow_ctrl_discard = nstats->flow_ctrl_discard;
+- estats->flow_ctrl_octets = nstats->flow_ctrl_octets;
+- estats->flow_ctrl_packet = nstats->flow_ctrl_packet;
+- estats->mng_discard = nstats->mng_discard;
+- estats->mng_octet_inp = nstats->mng_octet_inp;
+- estats->mng_octet_out = nstats->mng_octet_out;
+- estats->mng_packet_inp = nstats->mng_packet_inp;
+- estats->mng_packet_out = nstats->mng_packet_out;
+- estats->pbf_octets = nstats->pbf_octets;
+- estats->pbf_packet = nstats->pbf_packet;
+- estats->safc_inp = nstats->safc_inp;
+-
+- xstats->done.hi = 0;
+- tstats->done.hi = 0;
+- nstats->done = 0;
+-
+- return 0;
+-}
+-
+-static void bnx2x_update_net_stats(struct bnx2x *bp)
+-{
+- struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
+- struct net_device_stats *nstats = &bp->dev->stats;
+-
+- nstats->rx_packets =
+- bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
+- bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
+- bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
+-
+- nstats->tx_packets =
+- bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
+- bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
+- bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
+-
+- nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
+-
+- nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
+-
+- nstats->rx_dropped = estats->checksum_discard + estats->mac_discard;
+- nstats->tx_dropped = 0;
+-
+- nstats->multicast =
+- bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
+-
+- nstats->collisions = estats->single_collision_transmit_frames +
+- estats->multiple_collision_transmit_frames +
+- estats->late_collision_frames +
+- estats->excessive_collision_frames;
+-
+- nstats->rx_length_errors = estats->runt_packets_received +
+- estats->jabber_packets_received;
+- nstats->rx_over_errors = estats->brb_discard +
+- estats->brb_truncate_discard;
+- nstats->rx_crc_errors = estats->crc_receive_errors;
+- nstats->rx_frame_errors = estats->alignment_errors;
+- nstats->rx_fifo_errors = estats->no_buff_discard;
+- nstats->rx_missed_errors = estats->xxoverflow_discard;
+-
+- nstats->rx_errors = nstats->rx_length_errors +
+- nstats->rx_over_errors +
+- nstats->rx_crc_errors +
+- nstats->rx_frame_errors +
+- nstats->rx_fifo_errors +
+- nstats->rx_missed_errors;
+-
+- nstats->tx_aborted_errors = estats->late_collision_frames +
+- estats->excessive_collision_frames;
+- nstats->tx_carrier_errors = estats->false_carrier_detections;
+- nstats->tx_fifo_errors = 0;
+- nstats->tx_heartbeat_errors = 0;
+- nstats->tx_window_errors = 0;
+-
+- nstats->tx_errors = nstats->tx_aborted_errors +
+- nstats->tx_carrier_errors;
+-
+- estats->mac_stx_start = ++estats->mac_stx_end;
+-}
+-
+-static void bnx2x_update_stats(struct bnx2x *bp)
+-{
+- int i;
+-
+- if (!bnx2x_update_storm_stats(bp)) {
+-
+- if (bp->phy_flags & PHY_BMAC_FLAG) {
+- bnx2x_update_bmac_stats(bp);
+-
+- } else if (bp->phy_flags & PHY_EMAC_FLAG) {
+- bnx2x_update_emac_stats(bp);
+-
+- } else { /* unreached */
+- BNX2X_ERR("no MAC active\n");
+- return;
+- }
+-
+- bnx2x_update_net_stats(bp);
+- }
+-
+- if (bp->msglevel & NETIF_MSG_TIMER) {
+- struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
+- struct net_device_stats *nstats = &bp->dev->stats;
+-
+- printk(KERN_DEBUG "%s:\n", bp->dev->name);
+- printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
+- " tx pkt (%lx)\n",
+- bnx2x_tx_avail(bp->fp),
+- *bp->fp->tx_cons_sb, nstats->tx_packets);
+- printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
+- " rx pkt (%lx)\n",
+- (u16)(*bp->fp->rx_cons_sb - bp->fp->rx_comp_cons),
+- *bp->fp->rx_cons_sb, nstats->rx_packets);
+- printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
+- netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
+- estats->driver_xoff, estats->brb_discard);
+- printk(KERN_DEBUG "tstats: checksum_discard %u "
+- "packets_too_big_discard %u no_buff_discard %u "
+- "mac_discard %u mac_filter_discard %u "
+- "xxovrflow_discard %u brb_truncate_discard %u "
+- "ttl0_discard %u\n",
+- estats->checksum_discard,
+- estats->packets_too_big_discard,
+- estats->no_buff_discard, estats->mac_discard,
+- estats->mac_filter_discard, estats->xxoverflow_discard,
+- estats->brb_truncate_discard, estats->ttl0_discard);
+-
+- for_each_queue(bp, i) {
+- printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
+- bnx2x_fp(bp, i, tx_pkt),
+- bnx2x_fp(bp, i, rx_pkt),
+- bnx2x_fp(bp, i, rx_calls));
+- }
+- }
+-
+- if (bp->state != BNX2X_STATE_OPEN) {
+- DP(BNX2X_MSG_STATS, "state is %x, returning\n", bp->state);
+- return;
+- }
+-
+-#ifdef BNX2X_STOP_ON_ERROR
+- if (unlikely(bp->panic))
+- return;
+-#endif
+-
+- /* loader */
+- if (bp->executer_idx) {
+- struct dmae_command *dmae = &bp->dmae;
+- int port = bp->port;
+- int loader_idx = port * 8;
+-
+- memset(dmae, 0, sizeof(struct dmae_command));
+-
+- dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
+- DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
+- DMAE_CMD_DST_RESET |
+-#ifdef __BIG_ENDIAN
+- DMAE_CMD_ENDIANITY_B_DW_SWAP |
+-#else
+- DMAE_CMD_ENDIANITY_DW_SWAP |
+-#endif
+- (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
+- dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
+- dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
+- dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
+- sizeof(struct dmae_command) *
+- (loader_idx + 1)) >> 2;
+- dmae->dst_addr_hi = 0;
+- dmae->len = sizeof(struct dmae_command) >> 2;
+- dmae->len--; /* !!! for A0/1 only */
+- dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
+- dmae->comp_addr_hi = 0;
+- dmae->comp_val = 1;
+-
+- bnx2x_post_dmae(bp, dmae, loader_idx);
+- }
+-
+- if (bp->stats_state != STATS_STATE_ENABLE) {
+- bp->stats_state = STATS_STATE_DISABLE;
+- return;
+- }
+-
+- if (bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0, 0, 0, 0) == 0) {
+- /* stats ramrod has it's own slot on the spe */
+- bp->spq_left++;
+- bp->stat_pending = 1;
+- }
+-}
+-
+-static void bnx2x_timer(unsigned long data)
+-{
+- struct bnx2x *bp = (struct bnx2x *) data;
+-
+- if (!netif_running(bp->dev))
+- return;
+-
+- if (atomic_read(&bp->intr_sem) != 0)
+- goto timer_restart;
+-
+- if (poll) {
+- struct bnx2x_fastpath *fp = &bp->fp[0];
+- int rc;
+-
+- bnx2x_tx_int(fp, 1000);
+- rc = bnx2x_rx_int(fp, 1000);
+- }
+-
+- if (!nomcp) {
+- int port = bp->port;
+- u32 drv_pulse;
+- u32 mcp_pulse;
+-
+- ++bp->fw_drv_pulse_wr_seq;
+- bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
+- /* TBD - add SYSTEM_TIME */
+- drv_pulse = bp->fw_drv_pulse_wr_seq;
+- SHMEM_WR(bp, func_mb[port].drv_pulse_mb, drv_pulse);
+-
+- mcp_pulse = (SHMEM_RD(bp, func_mb[port].mcp_pulse_mb) &
+- MCP_PULSE_SEQ_MASK);
+- /* The delta between driver pulse and mcp response
+- * should be 1 (before mcp response) or 0 (after mcp response)
+- */
+- if ((drv_pulse != mcp_pulse) &&
+- (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
+- /* someone lost a heartbeat... */
+- BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
+- drv_pulse, mcp_pulse);
+- }
+- }
+-
+- if (bp->stats_state == STATS_STATE_DISABLE)
+- goto timer_restart;
+-
+- bnx2x_update_stats(bp);
+-
+-timer_restart:
+- mod_timer(&bp->timer, jiffies + bp->current_interval);
+-}
+-
+-/* end of Statistics */
+-
+-/* nic init */
+-
+-/*
+- * nic init service functions
+- */
+-
+-static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
+- dma_addr_t mapping, int id)
+-{
+- int port = bp->port;
+- u64 section;
+- int index;
+-
+- /* USTORM */
+- section = ((u64)mapping) + offsetof(struct host_status_block,
+- u_status_block);
+- sb->u_status_block.status_block_id = id;
+-
+- REG_WR(bp, BAR_USTRORM_INTMEM +
+- USTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
+- REG_WR(bp, BAR_USTRORM_INTMEM +
+- ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
+- U64_HI(section));
+-
+- for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
+- REG_WR16(bp, BAR_USTRORM_INTMEM +
+- USTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
+-
+- /* CSTORM */
+- section = ((u64)mapping) + offsetof(struct host_status_block,
+- c_status_block);
+- sb->c_status_block.status_block_id = id;
+-
+- REG_WR(bp, BAR_CSTRORM_INTMEM +
+- CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
+- REG_WR(bp, BAR_CSTRORM_INTMEM +
+- ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
+- U64_HI(section));
+-
+- for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
+- REG_WR16(bp, BAR_CSTRORM_INTMEM +
+- CSTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
+-
+- bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
+-}
+-
+-static void bnx2x_init_def_sb(struct bnx2x *bp,
+- struct host_def_status_block *def_sb,
+- dma_addr_t mapping, int id)
+-{
+- int port = bp->port;
+- int index, val, reg_offset;
+- u64 section;
+-
+- /* ATTN */
+- section = ((u64)mapping) + offsetof(struct host_def_status_block,
+- atten_status_block);
+- def_sb->atten_status_block.status_block_id = id;
+-
+- bp->def_att_idx = 0;
+- bp->attn_state = 0;
+-
+- reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
+- MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
+-
+- for (index = 0; index < 3; index++) {
+- bp->attn_group[index].sig[0] = REG_RD(bp,
+- reg_offset + 0x10*index);
+- bp->attn_group[index].sig[1] = REG_RD(bp,
+- reg_offset + 0x4 + 0x10*index);
+- bp->attn_group[index].sig[2] = REG_RD(bp,
+- reg_offset + 0x8 + 0x10*index);
+- bp->attn_group[index].sig[3] = REG_RD(bp,
+- reg_offset + 0xc + 0x10*index);
+- }
+-
+- bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+- MISC_REG_AEU_MASK_ATTN_FUNC_0));
+-
+- reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
+- HC_REG_ATTN_MSG0_ADDR_L);
+-
+- REG_WR(bp, reg_offset, U64_LO(section));
+- REG_WR(bp, reg_offset + 4, U64_HI(section));
+-
+- reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
+-
+- val = REG_RD(bp, reg_offset);
+- val |= id;
+- REG_WR(bp, reg_offset, val);
+-
+- /* USTORM */
+- section = ((u64)mapping) + offsetof(struct host_def_status_block,
+- u_def_status_block);
+- def_sb->u_def_status_block.status_block_id = id;
+-
+- bp->def_u_idx = 0;
+-
+- REG_WR(bp, BAR_USTRORM_INTMEM +
+- USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
+- REG_WR(bp, BAR_USTRORM_INTMEM +
+- ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
+- U64_HI(section));
+- REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port),
+- BNX2X_BTR);
+-
+- for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
+- REG_WR16(bp, BAR_USTRORM_INTMEM +
+- USTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
+-
+- /* CSTORM */
+- section = ((u64)mapping) + offsetof(struct host_def_status_block,
+- c_def_status_block);
+- def_sb->c_def_status_block.status_block_id = id;
+-
+- bp->def_c_idx = 0;
+-
+- REG_WR(bp, BAR_CSTRORM_INTMEM +
+- CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
+- REG_WR(bp, BAR_CSTRORM_INTMEM +
+- ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
+- U64_HI(section));
+- REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port),
+- BNX2X_BTR);
+-
+- for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
+- REG_WR16(bp, BAR_CSTRORM_INTMEM +
+- CSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
+-
+- /* TSTORM */
+- section = ((u64)mapping) + offsetof(struct host_def_status_block,
+- t_def_status_block);
+- def_sb->t_def_status_block.status_block_id = id;
+-
+- bp->def_t_idx = 0;
+-
+- REG_WR(bp, BAR_TSTRORM_INTMEM +
+- TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
+- REG_WR(bp, BAR_TSTRORM_INTMEM +
+- ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
+- U64_HI(section));
+- REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port),
+- BNX2X_BTR);
+-
+- for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
+- REG_WR16(bp, BAR_TSTRORM_INTMEM +
+- TSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
+-
+- /* XSTORM */
+- section = ((u64)mapping) + offsetof(struct host_def_status_block,
+- x_def_status_block);
+- def_sb->x_def_status_block.status_block_id = id;
+-
+- bp->def_x_idx = 0;
+-
+- REG_WR(bp, BAR_XSTRORM_INTMEM +
+- XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
+- REG_WR(bp, BAR_XSTRORM_INTMEM +
+- ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
+- U64_HI(section));
+- REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port),
+- BNX2X_BTR);
+-
+- for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
+- REG_WR16(bp, BAR_XSTRORM_INTMEM +
+- XSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
+-
+- bp->stat_pending = 0;
+-
+- bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
+-}
+-
+-static void bnx2x_update_coalesce(struct bnx2x *bp)
+-{
+- int port = bp->port;
+- int i;
+-
+- for_each_queue(bp, i) {
+-
+- /* HC_INDEX_U_ETH_RX_CQ_CONS */
+- REG_WR8(bp, BAR_USTRORM_INTMEM +
+- USTORM_SB_HC_TIMEOUT_OFFSET(port, i,
+- HC_INDEX_U_ETH_RX_CQ_CONS),
+- bp->rx_ticks_int/12);
+- REG_WR16(bp, BAR_USTRORM_INTMEM +
+- USTORM_SB_HC_DISABLE_OFFSET(port, i,
+- HC_INDEX_U_ETH_RX_CQ_CONS),
+- bp->rx_ticks_int ? 0 : 1);
+-
+- /* HC_INDEX_C_ETH_TX_CQ_CONS */
+- REG_WR8(bp, BAR_CSTRORM_INTMEM +
+- CSTORM_SB_HC_TIMEOUT_OFFSET(port, i,
+- HC_INDEX_C_ETH_TX_CQ_CONS),
+- bp->tx_ticks_int/12);
+- REG_WR16(bp, BAR_CSTRORM_INTMEM +
+- CSTORM_SB_HC_DISABLE_OFFSET(port, i,
+- HC_INDEX_C_ETH_TX_CQ_CONS),
+- bp->tx_ticks_int ? 0 : 1);
+- }
+-}
+-
+-static void bnx2x_init_rx_rings(struct bnx2x *bp)
+-{
+- u16 ring_prod;
+- int i, j;
+- int port = bp->port;
+-
+- bp->rx_buf_use_size = bp->dev->mtu;
+-
+- bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
+- bp->rx_buf_size = bp->rx_buf_use_size + 64;
+-
+- for_each_queue(bp, j) {
+- struct bnx2x_fastpath *fp = &bp->fp[j];
+-
+- fp->rx_bd_cons = 0;
+- fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
+-
+- for (i = 1; i <= NUM_RX_RINGS; i++) {
+- struct eth_rx_bd *rx_bd;
+-
+- rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
+- rx_bd->addr_hi =
+- cpu_to_le32(U64_HI(fp->rx_desc_mapping +
+- BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
+- rx_bd->addr_lo =
+- cpu_to_le32(U64_LO(fp->rx_desc_mapping +
+- BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
+-
+- }
+-
+- for (i = 1; i <= NUM_RCQ_RINGS; i++) {
+- struct eth_rx_cqe_next_page *nextpg;
+-
+- nextpg = (struct eth_rx_cqe_next_page *)
+- &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
+- nextpg->addr_hi =
+- cpu_to_le32(U64_HI(fp->rx_comp_mapping +
+- BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
+- nextpg->addr_lo =
+- cpu_to_le32(U64_LO(fp->rx_comp_mapping +
+- BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
+- }
+-
+- /* rx completion queue */
+- fp->rx_comp_cons = ring_prod = 0;
+-
+- for (i = 0; i < bp->rx_ring_size; i++) {
+- if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
+- BNX2X_ERR("was only able to allocate "
+- "%d rx skbs\n", i);
+- break;
+- }
+- ring_prod = NEXT_RX_IDX(ring_prod);
+- BUG_TRAP(ring_prod > i);
+- }
+-
+- fp->rx_bd_prod = fp->rx_comp_prod = ring_prod;
+- fp->rx_pkt = fp->rx_calls = 0;
+-
+- /* Warning! this will generate an interrupt (to the TSTORM) */
+- /* must only be done when chip is initialized */
+- REG_WR(bp, BAR_TSTRORM_INTMEM +
+- TSTORM_RCQ_PROD_OFFSET(port, j), ring_prod);
+- if (j != 0)
+- continue;
+-
+- REG_WR(bp, BAR_USTRORM_INTMEM +
+- USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port),
+- U64_LO(fp->rx_comp_mapping));
+- REG_WR(bp, BAR_USTRORM_INTMEM +
+- USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port) + 4,
+- U64_HI(fp->rx_comp_mapping));
+- }
+-}
+-
+-static void bnx2x_init_tx_ring(struct bnx2x *bp)
+-{
+- int i, j;
+-
+- for_each_queue(bp, j) {
+- struct bnx2x_fastpath *fp = &bp->fp[j];
+-
+- for (i = 1; i <= NUM_TX_RINGS; i++) {
+- struct eth_tx_bd *tx_bd =
+- &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
+-
+- tx_bd->addr_hi =
+- cpu_to_le32(U64_HI(fp->tx_desc_mapping +
+- BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
+- tx_bd->addr_lo =
+- cpu_to_le32(U64_LO(fp->tx_desc_mapping +
+- BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
+- }
+-
+- fp->tx_pkt_prod = 0;
+- fp->tx_pkt_cons = 0;
+- fp->tx_bd_prod = 0;
+- fp->tx_bd_cons = 0;
+- fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
+- fp->tx_pkt = 0;
+- }
+-}
+-
+-static void bnx2x_init_sp_ring(struct bnx2x *bp)
+-{
+- int port = bp->port;
+-
+- spin_lock_init(&bp->spq_lock);
+-
+- bp->spq_left = MAX_SPQ_PENDING;
+- bp->spq_prod_idx = 0;
+- bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
+- bp->spq_prod_bd = bp->spq;
+- bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
+-
+- REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PAGE_BASE_OFFSET(port),
+- U64_LO(bp->spq_mapping));
+- REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PAGE_BASE_OFFSET(port) + 4,
+- U64_HI(bp->spq_mapping));
+-
+- REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(port),
+- bp->spq_prod_idx);
+-}
+-
+-static void bnx2x_init_context(struct bnx2x *bp)
+-{
+- int i;
+-
+- for_each_queue(bp, i) {
+- struct eth_context *context = bnx2x_sp(bp, context[i].eth);
+- struct bnx2x_fastpath *fp = &bp->fp[i];
+-
+- context->xstorm_st_context.tx_bd_page_base_hi =
+- U64_HI(fp->tx_desc_mapping);
+- context->xstorm_st_context.tx_bd_page_base_lo =
+- U64_LO(fp->tx_desc_mapping);
+- context->xstorm_st_context.db_data_addr_hi =
+- U64_HI(fp->tx_prods_mapping);
+- context->xstorm_st_context.db_data_addr_lo =
+- U64_LO(fp->tx_prods_mapping);
+-
+- context->ustorm_st_context.rx_bd_page_base_hi =
+- U64_HI(fp->rx_desc_mapping);
+- context->ustorm_st_context.rx_bd_page_base_lo =
+- U64_LO(fp->rx_desc_mapping);
+- context->ustorm_st_context.status_block_id = i;
+- context->ustorm_st_context.sb_index_number =
+- HC_INDEX_U_ETH_RX_CQ_CONS;
+- context->ustorm_st_context.rcq_base_address_hi =
+- U64_HI(fp->rx_comp_mapping);
+- context->ustorm_st_context.rcq_base_address_lo =
+- U64_LO(fp->rx_comp_mapping);
+- context->ustorm_st_context.flags =
+- USTORM_ETH_ST_CONTEXT_ENABLE_MC_ALIGNMENT;
+- context->ustorm_st_context.mc_alignment_size = 64;
+- context->ustorm_st_context.num_rss = bp->num_queues;
+-
+- context->cstorm_st_context.sb_index_number =
+- HC_INDEX_C_ETH_TX_CQ_CONS;
+- context->cstorm_st_context.status_block_id = i;
+-
+- context->xstorm_ag_context.cdu_reserved =
+- CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
+- CDU_REGION_NUMBER_XCM_AG,
+- ETH_CONNECTION_TYPE);
+- context->ustorm_ag_context.cdu_usage =
+- CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
+- CDU_REGION_NUMBER_UCM_AG,
+- ETH_CONNECTION_TYPE);
+- }
+-}
+-
+-static void bnx2x_init_ind_table(struct bnx2x *bp)
+-{
+- int port = bp->port;
+- int i;
+-
+- if (!is_multi(bp))
+- return;
+-
+- for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
+- REG_WR8(bp, TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
+- i % bp->num_queues);
+-
+- REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
+-}
+-
+-static void bnx2x_set_client_config(struct bnx2x *bp)
+-{
+-#ifdef BCM_VLAN
+- int mode = bp->rx_mode;
+-#endif
+- int i, port = bp->port;
+- struct tstorm_eth_client_config tstorm_client = {0};
+-
+- tstorm_client.mtu = bp->dev->mtu;
+- tstorm_client.statistics_counter_id = 0;
+- tstorm_client.config_flags =
+- TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
+-#ifdef BCM_VLAN
+- if (mode && bp->vlgrp) {
+- tstorm_client.config_flags |=
+- TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
+- DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
+- }
+-#endif
+- if (mode != BNX2X_RX_MODE_PROMISC)
+- tstorm_client.drop_flags =
+- TSTORM_ETH_CLIENT_CONFIG_DROP_MAC_ERR;
+-
+- for_each_queue(bp, i) {
+- REG_WR(bp, BAR_TSTRORM_INTMEM +
+- TSTORM_CLIENT_CONFIG_OFFSET(port, i),
+- ((u32 *)&tstorm_client)[0]);
+- REG_WR(bp, BAR_TSTRORM_INTMEM +
+- TSTORM_CLIENT_CONFIG_OFFSET(port, i) + 4,
+- ((u32 *)&tstorm_client)[1]);
+- }
+-
+-/* DP(NETIF_MSG_IFUP, "tstorm_client: 0x%08x 0x%08x\n",
+- ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]); */
+-}
+-
+-static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
+-{
+- int mode = bp->rx_mode;
+- int port = bp->port;
+- struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
+- int i;
+-
+- DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
+-
+- switch (mode) {
+- case BNX2X_RX_MODE_NONE: /* no Rx */
+- tstorm_mac_filter.ucast_drop_all = 1;
+- tstorm_mac_filter.mcast_drop_all = 1;
+- tstorm_mac_filter.bcast_drop_all = 1;
+- break;
+- case BNX2X_RX_MODE_NORMAL:
+- tstorm_mac_filter.bcast_accept_all = 1;
+- break;
+- case BNX2X_RX_MODE_ALLMULTI:
+- tstorm_mac_filter.mcast_accept_all = 1;
+- tstorm_mac_filter.bcast_accept_all = 1;
+- break;
+- case BNX2X_RX_MODE_PROMISC:
+- tstorm_mac_filter.ucast_accept_all = 1;
+- tstorm_mac_filter.mcast_accept_all = 1;
+- tstorm_mac_filter.bcast_accept_all = 1;
+- break;
+- default:
+- BNX2X_ERR("bad rx mode (%d)\n", mode);
+- }
+-
+- for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
+- REG_WR(bp, BAR_TSTRORM_INTMEM +
+- TSTORM_MAC_FILTER_CONFIG_OFFSET(port) + i * 4,
+- ((u32 *)&tstorm_mac_filter)[i]);
+-
+-/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
+- ((u32 *)&tstorm_mac_filter)[i]); */
+- }
+-
+- if (mode != BNX2X_RX_MODE_NONE)
+- bnx2x_set_client_config(bp);
+-}
+-
+-static void bnx2x_init_internal(struct bnx2x *bp)
+-{
+- int port = bp->port;
+- struct tstorm_eth_function_common_config tstorm_config = {0};
+- struct stats_indication_flags stats_flags = {0};
+-
+- if (is_multi(bp)) {
+- tstorm_config.config_flags = MULTI_FLAGS;
+- tstorm_config.rss_result_mask = MULTI_MASK;
+- }
+-
+- REG_WR(bp, BAR_TSTRORM_INTMEM +
+- TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(port),
+- (*(u32 *)&tstorm_config));
+-
+-/* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
+- (*(u32 *)&tstorm_config)); */
+-
+- bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
+- bnx2x_set_storm_rx_mode(bp);
+-
+- stats_flags.collect_eth = cpu_to_le32(1);
+-
+- REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port),
+- ((u32 *)&stats_flags)[0]);
+- REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port) + 4,
+- ((u32 *)&stats_flags)[1]);
+-
+- REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port),
+- ((u32 *)&stats_flags)[0]);
+- REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port) + 4,
+- ((u32 *)&stats_flags)[1]);
+-
+- REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port),
+- ((u32 *)&stats_flags)[0]);
+- REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port) + 4,
+- ((u32 *)&stats_flags)[1]);
+-
+-/* DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n",
+- ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */
+-}
+-
+-static void bnx2x_nic_init(struct bnx2x *bp)
+-{
+- int i;
+-
+- for_each_queue(bp, i) {
+- struct bnx2x_fastpath *fp = &bp->fp[i];
+-
+- fp->state = BNX2X_FP_STATE_CLOSED;
+- DP(NETIF_MSG_IFUP, "bnx2x_init_sb(%p,%p,%d);\n",
+- bp, fp->status_blk, i);
+- fp->index = i;
+- bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping, i);
+- }
+-
+- bnx2x_init_def_sb(bp, bp->def_status_blk,
+- bp->def_status_blk_mapping, 0x10);
+- bnx2x_update_coalesce(bp);
+- bnx2x_init_rx_rings(bp);
+- bnx2x_init_tx_ring(bp);
+- bnx2x_init_sp_ring(bp);
+- bnx2x_init_context(bp);
+- bnx2x_init_internal(bp);
+- bnx2x_init_stats(bp);
+- bnx2x_init_ind_table(bp);
+- bnx2x_int_enable(bp);
+-
+-}
+-
+-/* end of nic init */
+-
+-/*
+- * gzip service functions
+- */
+-
+-static int bnx2x_gunzip_init(struct bnx2x *bp)
+-{
+- bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
+- &bp->gunzip_mapping);
+- if (bp->gunzip_buf == NULL)
+- goto gunzip_nomem1;
+-
+- bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
+- if (bp->strm == NULL)
+- goto gunzip_nomem2;
+-
+- bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
+- GFP_KERNEL);
+- if (bp->strm->workspace == NULL)
+- goto gunzip_nomem3;
+-
+- return 0;
+-
+-gunzip_nomem3:
+- kfree(bp->strm);
+- bp->strm = NULL;
+-
+-gunzip_nomem2:
+- pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
+- bp->gunzip_mapping);
+- bp->gunzip_buf = NULL;
+-
+-gunzip_nomem1:
+- printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
+- " uncompression\n", bp->dev->name);
+- return -ENOMEM;
+-}
+-
+-static void bnx2x_gunzip_end(struct bnx2x *bp)
+-{
+- kfree(bp->strm->workspace);
+-
+- kfree(bp->strm);
+- bp->strm = NULL;
+-
+- if (bp->gunzip_buf) {
+- pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
+- bp->gunzip_mapping);
+- bp->gunzip_buf = NULL;
+- }
+-}
+-
+-static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
+-{
+- int n, rc;
+-
+- /* check gzip header */
+- if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
+- return -EINVAL;
+-
+- n = 10;
+-
+-#define FNAME 0x8
+-
+- if (zbuf[3] & FNAME)
+- while ((zbuf[n++] != 0) && (n < len));
+-
+- bp->strm->next_in = zbuf + n;
+- bp->strm->avail_in = len - n;
+- bp->strm->next_out = bp->gunzip_buf;
+- bp->strm->avail_out = FW_BUF_SIZE;
+-
+- rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
+- if (rc != Z_OK)
+- return rc;
+-
+- rc = zlib_inflate(bp->strm, Z_FINISH);
+- if ((rc != Z_OK) && (rc != Z_STREAM_END))
+- printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
+- bp->dev->name, bp->strm->msg);
+-
+- bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
+- if (bp->gunzip_outlen & 0x3)
+- printk(KERN_ERR PFX "%s: Firmware decompression error:"
+- " gunzip_outlen (%d) not aligned\n",
+- bp->dev->name, bp->gunzip_outlen);
+- bp->gunzip_outlen >>= 2;
+-
+- zlib_inflateEnd(bp->strm);
+-
+- if (rc == Z_STREAM_END)
+- return 0;
+-
+- return rc;
+-}
+-
+-/* nic load/unload */
+-
+-/*
+- * general service functions
+- */
+-
+-/* send a NIG loopback debug packet */
+-static void bnx2x_lb_pckt(struct bnx2x *bp)
+-{
+-#ifdef USE_DMAE
+- u32 wb_write[3];
+-#endif
+-
+- /* Ethernet source and destination addresses */
+-#ifdef USE_DMAE
+- wb_write[0] = 0x55555555;
+- wb_write[1] = 0x55555555;
+- wb_write[2] = 0x20; /* SOP */
+- REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
+-#else
+- REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB, 0x55555555);
+- REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555);
+- /* SOP */
+- REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 8, 0x20);
+-#endif
+-
+- /* NON-IP protocol */
+-#ifdef USE_DMAE
+- wb_write[0] = 0x09000000;
+- wb_write[1] = 0x55555555;
+- wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
+- REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
+-#else
+- REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB, 0x09000000);
+- REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555);
+- /* EOP, eop_bvalid = 0 */
+- REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 8, 0x10);
+-#endif
+-}
+-
+-/* some of the internal memories
+- * are not directly readable from the driver
+- * to test them we send debug packets
+- */
+-static int bnx2x_int_mem_test(struct bnx2x *bp)
+-{
+- int factor;
+- int count, i;
+- u32 val = 0;
+-
+- switch (CHIP_REV(bp)) {
+- case CHIP_REV_EMUL:
+- factor = 200;
+- break;
+- case CHIP_REV_FPGA:
+- factor = 120;
+- break;
+- default:
+- factor = 1;
+- break;
+- }
+-
+- DP(NETIF_MSG_HW, "start part1\n");
+-
+- /* Disable inputs of parser neighbor blocks */
+- REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
+- REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
+- REG_WR(bp, CFC_REG_DEBUG0, 0x1);
+- NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
+-
+- /* Write 0 to parser credits for CFC search request */
+- REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
+-
+- /* send Ethernet packet */
+- bnx2x_lb_pckt(bp);
+-
+- /* TODO do i reset NIG statistic? */
+- /* Wait until NIG register shows 1 packet of size 0x10 */
+- count = 1000 * factor;
+- while (count) {
+-#ifdef BNX2X_DMAE_RD
+- bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
+- val = *bnx2x_sp(bp, wb_data[0]);
+-#else
+- val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
+- REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
+-#endif
+- if (val == 0x10)
+- break;
+-
+- msleep(10);
+- count--;
+- }
+- if (val != 0x10) {
+- BNX2X_ERR("NIG timeout val = 0x%x\n", val);
+- return -1;
+- }
+-
+- /* Wait until PRS register shows 1 packet */
+- count = 1000 * factor;
+- while (count) {
+- val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
+-
+- if (val == 1)
+- break;
+-
+- msleep(10);
+- count--;
+- }
+- if (val != 0x1) {
+- BNX2X_ERR("PRS timeout val = 0x%x\n", val);
+- return -2;
+- }
+-
+- /* Reset and init BRB, PRS */
+- REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x3);
+- msleep(50);
+- REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x3);
+- msleep(50);
+- bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
+- bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
+-
+- DP(NETIF_MSG_HW, "part2\n");
+-
+- /* Disable inputs of parser neighbor blocks */
+- REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
+- REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
+- REG_WR(bp, CFC_REG_DEBUG0, 0x1);
+- NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
+-
+- /* Write 0 to parser credits for CFC search request */
+- REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
+-
+- /* send 10 Ethernet packets */
+- for (i = 0; i < 10; i++)
+- bnx2x_lb_pckt(bp);
+-
+- /* Wait until NIG register shows 10 + 1
+- packets of size 11*0x10 = 0xb0 */
+- count = 1000 * factor;
+- while (count) {
+-#ifdef BNX2X_DMAE_RD
+- bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
+- val = *bnx2x_sp(bp, wb_data[0]);
+-#else
+- val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
+- REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
+-#endif
+- if (val == 0xb0)
+- break;
+-
+- msleep(10);
+- count--;
+- }
+- if (val != 0xb0) {
+- BNX2X_ERR("NIG timeout val = 0x%x\n", val);
+- return -3;
+- }
+-
+- /* Wait until PRS register shows 2 packets */
+- val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
+- if (val != 2)
+- BNX2X_ERR("PRS timeout val = 0x%x\n", val);
+-
+- /* Write 1 to parser credits for CFC search request */
+- REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
+-
+- /* Wait until PRS register shows 3 packets */
+- msleep(10 * factor);
+- /* Wait until NIG register shows 1 packet of size 0x10 */
+- val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
+- if (val != 3)
+- BNX2X_ERR("PRS timeout val = 0x%x\n", val);
+-
+- /* clear NIG EOP FIFO */
+- for (i = 0; i < 11; i++)
+- REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
+- val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
+- if (val != 1) {
+- BNX2X_ERR("clear of NIG failed\n");
+- return -4;
+- }
+-
+- /* Reset and init BRB, PRS, NIG */
+- REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
+- msleep(50);
+- REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
+- msleep(50);
+- bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
+- bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
+-#ifndef BCM_ISCSI
+- /* set NIC mode */
+- REG_WR(bp, PRS_REG_NIC_MODE, 1);
+-#endif
+-
+- /* Enable inputs of parser neighbor blocks */
+- REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
+- REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
+- REG_WR(bp, CFC_REG_DEBUG0, 0x0);
+- NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
+-
+- DP(NETIF_MSG_HW, "done\n");
+-
+- return 0; /* OK */
+-}
+-
+-static void enable_blocks_attention(struct bnx2x *bp)
+-{
+- REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
+- REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
+- REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
+- REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
+- REG_WR(bp, QM_REG_QM_INT_MASK, 0);
+- REG_WR(bp, TM_REG_TM_INT_MASK, 0);
+- REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
+- REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
+- REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
+-/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
+-/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
+- REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
+- REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
+- REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
+-/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
+-/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
+- REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
+- REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
+- REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
+- REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
+-/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
+-/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
+- REG_WR(bp, PXP2_REG_PXP2_INT_MASK, 0x480000);
+- REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
+- REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
+- REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
+-/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
+-/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
+- REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
+- REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
+-/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
+- REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
+-}
+-
+-static int bnx2x_function_init(struct bnx2x *bp, int mode)
+-{
+- int func = bp->port;
+- int port = func ? PORT1 : PORT0;
+- u32 val, i;
+-#ifdef USE_DMAE
+- u32 wb_write[2];
+-#endif
+-
+- DP(BNX2X_MSG_MCP, "function is %d mode is %x\n", func, mode);
+- if ((func != 0) && (func != 1)) {
+- BNX2X_ERR("BAD function number (%d)\n", func);
+- return -ENODEV;
+- }
+-
+- bnx2x_gunzip_init(bp);
+-
+- if (mode & 0x1) { /* init common */
+- DP(BNX2X_MSG_MCP, "starting common init func %d mode %x\n",
+- func, mode);
+- REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
+- 0xffffffff);
+- REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+- 0xfffc);
+- bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
+-
+- REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
+- msleep(30);
+- REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
+-
+- bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
+- bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
+-
+- bnx2x_init_pxp(bp);
+-
+- if (CHIP_REV(bp) == CHIP_REV_Ax) {
+- /* enable HW interrupt from PXP on USDM
+- overflow bit 16 on INT_MASK_0 */
+- REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
+- }
+-
+-#ifdef __BIG_ENDIAN
+- REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
+- REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
+- REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
+- REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
+- REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
+- REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
+-
+-/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
+- REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
+- REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
+- REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
+- REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
+-#endif
+-
+-#ifndef BCM_ISCSI
+- /* set NIC mode */
+- REG_WR(bp, PRS_REG_NIC_MODE, 1);
+-#endif
+-
+- REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 5);
+-#ifdef BCM_ISCSI
+- REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
+- REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
+- REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
+-#endif
+-
+- bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
+-
+- /* let the HW do it's magic ... */
+- msleep(100);
+- /* finish PXP init
+- (can be moved up if we want to use the DMAE) */
+- val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
+- if (val != 1) {
+- BNX2X_ERR("PXP2 CFG failed\n");
+- return -EBUSY;
+- }
+-
+- val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
+- if (val != 1) {
+- BNX2X_ERR("PXP2 RD_INIT failed\n");
+- return -EBUSY;
+- }
+-
+- REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
+- REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
+-
+- bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
+-
+- bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
+- bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
+- bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
+- bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
+-
+-#ifdef BNX2X_DMAE_RD
+- bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
+- bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
+- bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
+- bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
+-#else
+- REG_RD(bp, XSEM_REG_PASSIVE_BUFFER);
+- REG_RD(bp, XSEM_REG_PASSIVE_BUFFER + 4);
+- REG_RD(bp, XSEM_REG_PASSIVE_BUFFER + 8);
+- REG_RD(bp, CSEM_REG_PASSIVE_BUFFER);
+- REG_RD(bp, CSEM_REG_PASSIVE_BUFFER + 4);
+- REG_RD(bp, CSEM_REG_PASSIVE_BUFFER + 8);
+- REG_RD(bp, TSEM_REG_PASSIVE_BUFFER);
+- REG_RD(bp, TSEM_REG_PASSIVE_BUFFER + 4);
+- REG_RD(bp, TSEM_REG_PASSIVE_BUFFER + 8);
+- REG_RD(bp, USEM_REG_PASSIVE_BUFFER);
+- REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 4);
+- REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 8);
+-#endif
+- bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
+- /* soft reset pulse */
+- REG_WR(bp, QM_REG_SOFT_RESET, 1);
+- REG_WR(bp, QM_REG_SOFT_RESET, 0);
+-
+-#ifdef BCM_ISCSI
+- bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
+-#endif
+- bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
+- REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_BITS);
+- if (CHIP_REV(bp) == CHIP_REV_Ax) {
+- /* enable hw interrupt from doorbell Q */
+- REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
+- }
+-
+- bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
+-
+- if (CHIP_REV_IS_SLOW(bp)) {
+- /* fix for emulation and FPGA for no pause */
+- REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
+- REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
+- REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
+- REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
+- }
+-
+- bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
+-
+- bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
+- bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
+- bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
+- bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
+-
+- bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
+- bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
+- bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
+- bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
+-
+- bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
+- bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
+- bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
+- bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
+-
+- /* sync semi rtc */
+- REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+- 0x80000000);
+- REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
+- 0x80000000);
+-
+- bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
+- bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
+- bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
+-
+- REG_WR(bp, SRC_REG_SOFT_RST, 1);
+- for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
+- REG_WR(bp, i, 0xc0cac01a);
+- /* TODO: replace with something meaningful */
+- }
+- /* SRCH COMMON comes here */
+- REG_WR(bp, SRC_REG_SOFT_RST, 0);
+-
+- if (sizeof(union cdu_context) != 1024) {
+- /* we currently assume that a context is 1024 bytes */
+- printk(KERN_ALERT PFX "please adjust the size of"
+- " cdu_context(%ld)\n",
+- (long)sizeof(union cdu_context));
+- }
+- val = (4 << 24) + (0 << 12) + 1024;
+- REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
+- bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
+-
+- bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
+- REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
+-
+- bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
+- bnx2x_init_block(bp, MISC_AEU_COMMON_START,
+- MISC_AEU_COMMON_END);
+- /* RXPCS COMMON comes here */
+- /* EMAC0 COMMON comes here */
+- /* EMAC1 COMMON comes here */
+- /* DBU COMMON comes here */
+- /* DBG COMMON comes here */
+- bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
+-
+- if (CHIP_REV_IS_SLOW(bp))
+- msleep(200);
+-
+- /* finish CFC init */
+- val = REG_RD(bp, CFC_REG_LL_INIT_DONE);
+- if (val != 1) {
+- BNX2X_ERR("CFC LL_INIT failed\n");
+- return -EBUSY;
+- }
+-
+- val = REG_RD(bp, CFC_REG_AC_INIT_DONE);
+- if (val != 1) {
+- BNX2X_ERR("CFC AC_INIT failed\n");
+- return -EBUSY;
+- }
+-
+- val = REG_RD(bp, CFC_REG_CAM_INIT_DONE);
+- if (val != 1) {
+- BNX2X_ERR("CFC CAM_INIT failed\n");
+- return -EBUSY;
+- }
+-
+- REG_WR(bp, CFC_REG_DEBUG0, 0);
+-
+- /* read NIG statistic
+- to see if this is our first up since powerup */
+-#ifdef BNX2X_DMAE_RD
+- bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
+- val = *bnx2x_sp(bp, wb_data[0]);
+-#else
+- val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
+- REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
+-#endif
+- /* do internal memory self test */
+- if ((val == 0) && bnx2x_int_mem_test(bp)) {
+- BNX2X_ERR("internal mem selftest failed\n");
+- return -EBUSY;
+- }
+-
+- /* clear PXP2 attentions */
+- REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR);
+-
+- enable_blocks_attention(bp);
+- /* enable_blocks_parity(bp); */
+-
+- switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
+- case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
+- /* Fan failure is indicated by SPIO 5 */
+- bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
+- MISC_REGISTERS_SPIO_INPUT_HI_Z);
+-
+- /* set to active low mode */
+- val = REG_RD(bp, MISC_REG_SPIO_INT);
+- val |= ((1 << MISC_REGISTERS_SPIO_5) <<
+- MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
+- REG_WR(bp, MISC_REG_SPIO_INT, val);
+-
+- /* enable interrupt to signal the IGU */
+- val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
+- val |= (1 << MISC_REGISTERS_SPIO_5);
+- REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
+- break;
+-
+- default:
+- break;
+- }
+-
+- } /* end of common init */
+-
+- /* per port init */
+-
+- /* the phys address is shifted right 12 bits and has an added
+- 1=valid bit added to the 53rd bit
+- then since this is a wide register(TM)
+- we split it into two 32 bit writes
+- */
+-#define RQ_ONCHIP_AT_PORT_SIZE 384
+-#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
+-#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
+-#define PXP_ONE_ILT(x) ((x << 10) | x)
+-
+- DP(BNX2X_MSG_MCP, "starting per-function init port is %x\n", func);
+-
+- REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + func*4, 0);
+-
+- /* Port PXP comes here */
+- /* Port PXP2 comes here */
+-
+- /* Offset is
+- * Port0 0
+- * Port1 384 */
+- i = func * RQ_ONCHIP_AT_PORT_SIZE;
+-#ifdef USE_DMAE
+- wb_write[0] = ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context));
+- wb_write[1] = ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context));
+- REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
+-#else
+- REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + i*8,
+- ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context)));
+- REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + i*8 + 4,
+- ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context)));
+-#endif
+- REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4, PXP_ONE_ILT(i));
+-
+-#ifdef BCM_ISCSI
+- /* Port0 1
+- * Port1 385 */
+- i++;
+- wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
+- wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
+- REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
+- REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
+-
+- /* Port0 2
+- * Port1 386 */
+- i++;
+- wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
+- wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
+- REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
+- REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
+-
+- /* Port0 3
+- * Port1 387 */
+- i++;
+- wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
+- wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
+- REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
+- REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
+-#endif
+-
+- /* Port TCM comes here */
+- /* Port UCM comes here */
+- /* Port CCM comes here */
+- bnx2x_init_block(bp, func ? XCM_PORT1_START : XCM_PORT0_START,
+- func ? XCM_PORT1_END : XCM_PORT0_END);
+-
+-#ifdef USE_DMAE
+- wb_write[0] = 0;
+- wb_write[1] = 0;
+-#endif
+- for (i = 0; i < 32; i++) {
+- REG_WR(bp, QM_REG_BASEADDR + (func*32 + i)*4, 1024 * 4 * i);
+-#ifdef USE_DMAE
+- REG_WR_DMAE(bp, QM_REG_PTRTBL + (func*32 + i)*8, wb_write, 2);
+-#else
+- REG_WR_IND(bp, QM_REG_PTRTBL + (func*32 + i)*8, 0);
+- REG_WR_IND(bp, QM_REG_PTRTBL + (func*32 + i)*8 + 4, 0);
+-#endif
+- }
+- REG_WR(bp, QM_REG_CONNNUM_0 + func*4, 1024/16 - 1);
+-
+- /* Port QM comes here */
+-
+-#ifdef BCM_ISCSI
+- REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
+- REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
+-
+- bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
+- func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
+-#endif
+- /* Port DQ comes here */
+- /* Port BRB1 comes here */
+- bnx2x_init_block(bp, func ? PRS_PORT1_START : PRS_PORT0_START,
+- func ? PRS_PORT1_END : PRS_PORT0_END);
+- /* Port TSDM comes here */
+- /* Port CSDM comes here */
+- /* Port USDM comes here */
+- /* Port XSDM comes here */
+- bnx2x_init_block(bp, func ? TSEM_PORT1_START : TSEM_PORT0_START,
+- func ? TSEM_PORT1_END : TSEM_PORT0_END);
+- bnx2x_init_block(bp, func ? USEM_PORT1_START : USEM_PORT0_START,
+- func ? USEM_PORT1_END : USEM_PORT0_END);
+- bnx2x_init_block(bp, func ? CSEM_PORT1_START : CSEM_PORT0_START,
+- func ? CSEM_PORT1_END : CSEM_PORT0_END);
+- bnx2x_init_block(bp, func ? XSEM_PORT1_START : XSEM_PORT0_START,
+- func ? XSEM_PORT1_END : XSEM_PORT0_END);
+- /* Port UPB comes here */
+- /* Port XSDM comes here */
+- bnx2x_init_block(bp, func ? PBF_PORT1_START : PBF_PORT0_START,
+- func ? PBF_PORT1_END : PBF_PORT0_END);
+-
+- /* configure PBF to work without PAUSE mtu 9000 */
+- REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + func*4, 0);
+-
+- /* update threshold */
+- REG_WR(bp, PBF_REG_P0_ARB_THRSH + func*4, (9040/16));
+- /* update init credit */
+- REG_WR(bp, PBF_REG_P0_INIT_CRD + func*4, (9040/16) + 553 - 22);
+-
+- /* probe changes */
+- REG_WR(bp, PBF_REG_INIT_P0 + func*4, 1);
+- msleep(5);
+- REG_WR(bp, PBF_REG_INIT_P0 + func*4, 0);
+-
+-#ifdef BCM_ISCSI
+- /* tell the searcher where the T2 table is */
+- REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
+-
+- wb_write[0] = U64_LO(bp->t2_mapping);
+- wb_write[1] = U64_HI(bp->t2_mapping);
+- REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
+- wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
+- wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
+- REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
+-
+- REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
+- /* Port SRCH comes here */
+-#endif
+- /* Port CDU comes here */
+- /* Port CFC comes here */
+- bnx2x_init_block(bp, func ? HC_PORT1_START : HC_PORT0_START,
+- func ? HC_PORT1_END : HC_PORT0_END);
+- bnx2x_init_block(bp, func ? MISC_AEU_PORT1_START :
+- MISC_AEU_PORT0_START,
+- func ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
+- /* Port PXPCS comes here */
+- /* Port EMAC0 comes here */
+- /* Port EMAC1 comes here */
+- /* Port DBU comes here */
+- /* Port DBG comes here */
+- bnx2x_init_block(bp, func ? NIG_PORT1_START : NIG_PORT0_START,
+- func ? NIG_PORT1_END : NIG_PORT0_END);
+- REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + func*4, 1);
+- /* Port MCP comes here */
+- /* Port DMAE comes here */
+-
+- switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
+- case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
+- /* add SPIO 5 to group 0 */
+- val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
+- val |= AEU_INPUTS_ATTN_BITS_SPIO5;
+- REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
+- break;
+-
+- default:
+- break;
+- }
+-
+- bnx2x_link_reset(bp);
+-
+- /* Reset PCIE errors for debug */
+- REG_WR(bp, 0x2114, 0xffffffff);
+- REG_WR(bp, 0x2120, 0xffffffff);
+- REG_WR(bp, 0x2814, 0xffffffff);
+-
+- /* !!! move to init_values.h */
+- REG_WR(bp, XSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
+- REG_WR(bp, USDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
+- REG_WR(bp, CSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
+- REG_WR(bp, TSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
+-
+- REG_WR(bp, DBG_REG_PCI_REQ_CREDIT, 0x1);
+- REG_WR(bp, TM_REG_PCIARB_CRDCNT_VAL, 0x1);
+- REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
+- REG_WR(bp, CDU_REG_CDU_DEBUG, 0x0);
+-
+- bnx2x_gunzip_end(bp);
+-
+- if (!nomcp) {
+- port = bp->port;
+-
+- bp->fw_drv_pulse_wr_seq =
+- (SHMEM_RD(bp, func_mb[port].drv_pulse_mb) &
+- DRV_PULSE_SEQ_MASK);
+- bp->fw_mb = SHMEM_RD(bp, func_mb[port].fw_mb_param);
+- DP(BNX2X_MSG_MCP, "drv_pulse 0x%x fw_mb 0x%x\n",
+- bp->fw_drv_pulse_wr_seq, bp->fw_mb);
+- } else {
+- bp->fw_mb = 0;
+- }
+-
+- return 0;
+-}
+-
+-/* send the MCP a request, block until there is a reply */
+-static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
+-{
+- int port = bp->port;
+- u32 seq = ++bp->fw_seq;
+- u32 rc = 0;
+-
+- SHMEM_WR(bp, func_mb[port].drv_mb_header, (command | seq));
+- DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
+-
+- /* let the FW do it's magic ... */
+- msleep(100); /* TBD */
+-
+- if (CHIP_REV_IS_SLOW(bp))
+- msleep(900);
+-
+- rc = SHMEM_RD(bp, func_mb[port].fw_mb_header);
+- DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq);
+-
+- /* is this a reply to our command? */
+- if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
+- rc &= FW_MSG_CODE_MASK;
+-
+- } else {
+- /* FW BUG! */
+- BNX2X_ERR("FW failed to respond!\n");
+- bnx2x_fw_dump(bp);
+- rc = 0;
+- }
+-
+- return rc;
+-}
+-
+-static void bnx2x_free_mem(struct bnx2x *bp)
+-{
+-
+-#define BNX2X_PCI_FREE(x, y, size) \
+- do { \
+- if (x) { \
+- pci_free_consistent(bp->pdev, size, x, y); \
+- x = NULL; \
+- y = 0; \
+- } \
+- } while (0)
+-
+-#define BNX2X_FREE(x) \
+- do { \
+- if (x) { \
+- vfree(x); \
+- x = NULL; \
+- } \
+- } while (0)
+-
+- int i;
+-
+- /* fastpath */
+- for_each_queue(bp, i) {
+-
+- /* Status blocks */
+- BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
+- bnx2x_fp(bp, i, status_blk_mapping),
+- sizeof(struct host_status_block) +
+- sizeof(struct eth_tx_db_data));
+-
+- /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
+- BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
+- BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
+- bnx2x_fp(bp, i, tx_desc_mapping),
+- sizeof(struct eth_tx_bd) * NUM_TX_BD);
+-
+- BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
+- BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
+- bnx2x_fp(bp, i, rx_desc_mapping),
+- sizeof(struct eth_rx_bd) * NUM_RX_BD);
+-
+- BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
+- bnx2x_fp(bp, i, rx_comp_mapping),
+- sizeof(struct eth_fast_path_rx_cqe) *
+- NUM_RCQ_BD);
+- }
+-
+- BNX2X_FREE(bp->fp);
+-
+- /* end of fastpath */
+-
+- BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
+- (sizeof(struct host_def_status_block)));
+-
+- BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
+- (sizeof(struct bnx2x_slowpath)));
+-
+-#ifdef BCM_ISCSI
+- BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
+- BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
+- BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
+- BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
+-#endif
+- BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, PAGE_SIZE);
+-
+-#undef BNX2X_PCI_FREE
+-#undef BNX2X_KFREE
+-}
+-
+-static int bnx2x_alloc_mem(struct bnx2x *bp)
+-{
+-
+-#define BNX2X_PCI_ALLOC(x, y, size) \
+- do { \
+- x = pci_alloc_consistent(bp->pdev, size, y); \
+- if (x == NULL) \
+- goto alloc_mem_err; \
+- memset(x, 0, size); \
+- } while (0)
+-
+-#define BNX2X_ALLOC(x, size) \
+- do { \
+- x = vmalloc(size); \
+- if (x == NULL) \
+- goto alloc_mem_err; \
+- memset(x, 0, size); \
+- } while (0)
+-
+- int i;
+-
+- /* fastpath */
+- BNX2X_ALLOC(bp->fp, sizeof(struct bnx2x_fastpath) * bp->num_queues);
+-
+- for_each_queue(bp, i) {
+- bnx2x_fp(bp, i, bp) = bp;
+-
+- /* Status blocks */
+- BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
+- &bnx2x_fp(bp, i, status_blk_mapping),
+- sizeof(struct host_status_block) +
+- sizeof(struct eth_tx_db_data));
+-
+- bnx2x_fp(bp, i, hw_tx_prods) =
+- (void *)(bnx2x_fp(bp, i, status_blk) + 1);
+-
+- bnx2x_fp(bp, i, tx_prods_mapping) =
+- bnx2x_fp(bp, i, status_blk_mapping) +
+- sizeof(struct host_status_block);
+-
+- /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
+- BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
+- sizeof(struct sw_tx_bd) * NUM_TX_BD);
+- BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
+- &bnx2x_fp(bp, i, tx_desc_mapping),
+- sizeof(struct eth_tx_bd) * NUM_TX_BD);
+-
+- BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
+- sizeof(struct sw_rx_bd) * NUM_RX_BD);
+- BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
+- &bnx2x_fp(bp, i, rx_desc_mapping),
+- sizeof(struct eth_rx_bd) * NUM_RX_BD);
+-
+- BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
+- &bnx2x_fp(bp, i, rx_comp_mapping),
+- sizeof(struct eth_fast_path_rx_cqe) *
+- NUM_RCQ_BD);
+-
+- }
+- /* end of fastpath */
+-
+- BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
+- sizeof(struct host_def_status_block));
+-
+- BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
+- sizeof(struct bnx2x_slowpath));
+-
+-#ifdef BCM_ISCSI
+- BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
+-
+- /* Initialize T1 */
+- for (i = 0; i < 64*1024; i += 64) {
+- *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
+- *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
+- }
+-
+- /* allocate searcher T2 table
+- we allocate 1/4 of alloc num for T2
+- (which is not entered into the ILT) */
+- BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
+-
+- /* Initialize T2 */
+- for (i = 0; i < 16*1024; i += 64)
+- * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
+-
+- /* now fixup the last line in the block to point to the next block */
+- *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
+-
+- /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
+- BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
+-
+- /* QM queues (128*MAX_CONN) */
+- BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
+-#endif
+-
+- /* Slow path ring */
+- BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
+-
+- return 0;
+-
+-alloc_mem_err:
+- bnx2x_free_mem(bp);
+- return -ENOMEM;
+-
+-#undef BNX2X_PCI_ALLOC
+-#undef BNX2X_ALLOC
+-}
+-
+-static void bnx2x_free_tx_skbs(struct bnx2x *bp)
+-{
+- int i;
+-
+- for_each_queue(bp, i) {
+- struct bnx2x_fastpath *fp = &bp->fp[i];
+-
+- u16 bd_cons = fp->tx_bd_cons;
+- u16 sw_prod = fp->tx_pkt_prod;
+- u16 sw_cons = fp->tx_pkt_cons;
+-
+- BUG_TRAP(fp->tx_buf_ring != NULL);
+-
+- while (sw_cons != sw_prod) {
+- bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
+- sw_cons++;
+- }
+- }
+-}
+-
+-static void bnx2x_free_rx_skbs(struct bnx2x *bp)
+-{
+- int i, j;
+-
+- for_each_queue(bp, j) {
+- struct bnx2x_fastpath *fp = &bp->fp[j];
+-
+- BUG_TRAP(fp->rx_buf_ring != NULL);
+-
+- for (i = 0; i < NUM_RX_BD; i++) {
+- struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
+- struct sk_buff *skb = rx_buf->skb;
+-
+- if (skb == NULL)
+- continue;
+-
+- pci_unmap_single(bp->pdev,
+- pci_unmap_addr(rx_buf, mapping),
+- bp->rx_buf_use_size,
+- PCI_DMA_FROMDEVICE);
+-
+- rx_buf->skb = NULL;
+- dev_kfree_skb(skb);
+- }
+- }
+-}
+-
+-static void bnx2x_free_skbs(struct bnx2x *bp)
+-{
+- bnx2x_free_tx_skbs(bp);
+- bnx2x_free_rx_skbs(bp);
+-}
+-
+-static void bnx2x_free_msix_irqs(struct bnx2x *bp)
+-{
+- int i;
+-
+- free_irq(bp->msix_table[0].vector, bp->dev);
+- DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
+- bp->msix_table[0].vector);
+-
+- for_each_queue(bp, i) {
+- DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
+- "state(%x)\n", i, bp->msix_table[i + 1].vector,
+- bnx2x_fp(bp, i, state));
+-
+- if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
+- BNX2X_ERR("IRQ of fp #%d being freed while "
+- "state != closed\n", i);
+-
+- free_irq(bp->msix_table[i + 1].vector, &bp->fp[i]);
+- }
+-
+-}
+-
+-static void bnx2x_free_irq(struct bnx2x *bp)
+-{
+-
+- if (bp->flags & USING_MSIX_FLAG) {
+-
+- bnx2x_free_msix_irqs(bp);
+- pci_disable_msix(bp->pdev);
+-
+- bp->flags &= ~USING_MSIX_FLAG;
+-
+- } else
+- free_irq(bp->pdev->irq, bp->dev);
+-}
+-
+-static int bnx2x_enable_msix(struct bnx2x *bp)
+-{
+-
+- int i;
+-
+- bp->msix_table[0].entry = 0;
+- for_each_queue(bp, i)
+- bp->msix_table[i + 1].entry = i + 1;
+-
+- if (pci_enable_msix(bp->pdev, &bp->msix_table[0],
+- bp->num_queues + 1)){
+- BNX2X_LOG("failed to enable MSI-X\n");
+- return -1;
+-
+- }
+-
+- bp->flags |= USING_MSIX_FLAG;
+-
+- return 0;
+-
+-}
+-
+-
+-static int bnx2x_req_msix_irqs(struct bnx2x *bp)
+-{
+-
+- int i, rc;
+-
+- rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
+- bp->dev->name, bp->dev);
+-
+- if (rc) {
+- BNX2X_ERR("request sp irq failed\n");
+- return -EBUSY;
+- }
+-
+- for_each_queue(bp, i) {
+- rc = request_irq(bp->msix_table[i + 1].vector,
+- bnx2x_msix_fp_int, 0,
+- bp->dev->name, &bp->fp[i]);
+-
+- if (rc) {
+- BNX2X_ERR("request fp #%d irq failed "
+- "rc %d\n", i, rc);
+- bnx2x_free_msix_irqs(bp);
+- return -EBUSY;
+- }
+-
+- bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
+-
+- }
+-
+- return 0;
+-
+-}
+-
+-static int bnx2x_req_irq(struct bnx2x *bp)
+-{
+-
+- int rc = request_irq(bp->pdev->irq, bnx2x_interrupt,
+- IRQF_SHARED, bp->dev->name, bp->dev);
+- if (!rc)
+- bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
+-
+- return rc;
+-
+-}
+-
+-/*
+- * Init service functions
+- */
+-
+-static void bnx2x_set_mac_addr(struct bnx2x *bp)
+-{
+- struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
+-
+- /* CAM allocation
+- * unicasts 0-31:port0 32-63:port1
+- * multicast 64-127:port0 128-191:port1
+- */
+- config->hdr.length_6b = 2;
+- config->hdr.offset = bp->port ? 31 : 0;
+- config->hdr.reserved0 = 0;
+- config->hdr.reserved1 = 0;
+-
+- /* primary MAC */
+- config->config_table[0].cam_entry.msb_mac_addr =
+- swab16(*(u16 *)&bp->dev->dev_addr[0]);
+- config->config_table[0].cam_entry.middle_mac_addr =
+- swab16(*(u16 *)&bp->dev->dev_addr[2]);
+- config->config_table[0].cam_entry.lsb_mac_addr =
+- swab16(*(u16 *)&bp->dev->dev_addr[4]);
+- config->config_table[0].cam_entry.flags = cpu_to_le16(bp->port);
+- config->config_table[0].target_table_entry.flags = 0;
+- config->config_table[0].target_table_entry.client_id = 0;
+- config->config_table[0].target_table_entry.vlan_id = 0;
+-
+- DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n",
+- config->config_table[0].cam_entry.msb_mac_addr,
+- config->config_table[0].cam_entry.middle_mac_addr,
+- config->config_table[0].cam_entry.lsb_mac_addr);
+-
+- /* broadcast */
+- config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
+- config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
+- config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
+- config->config_table[1].cam_entry.flags = cpu_to_le16(bp->port);
+- config->config_table[1].target_table_entry.flags =
+- TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
+- config->config_table[1].target_table_entry.client_id = 0;
+- config->config_table[1].target_table_entry.vlan_id = 0;
+-
+- bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
+- U64_HI(bnx2x_sp_mapping(bp, mac_config)),
+- U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
+-}
+-
+-static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
+- int *state_p, int poll)
+-{
+- /* can take a while if any port is running */
+- int timeout = 500;
+-
+- DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
+- poll ? "polling" : "waiting", state, idx);
+-
+- might_sleep();
+-
+- while (timeout) {
+-
+- if (poll) {
+- bnx2x_rx_int(bp->fp, 10);
+- /* If index is different from 0
+- * The reply for some commands will
+- * be on the none default queue
+- */
+- if (idx)
+- bnx2x_rx_int(&bp->fp[idx], 10);
+- }
+-
+- mb(); /* state is changed by bnx2x_sp_event()*/
+-
+- if (*state_p == state)
+- return 0;
+-
+- timeout--;
+- msleep(1);
+-
+- }
+-
+- /* timeout! */
+- BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
+- poll ? "polling" : "waiting", state, idx);
+-
+- return -EBUSY;
+-}
+-
+-static int bnx2x_setup_leading(struct bnx2x *bp)
+-{
+-
+- /* reset IGU state */
+- bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
+-
+- /* SETUP ramrod */
+- bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
+-
+- return bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
+-
+-}
+-
+-static int bnx2x_setup_multi(struct bnx2x *bp, int index)
+-{
+-
+- /* reset IGU state */
+- bnx2x_ack_sb(bp, index, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
+-
+- /* SETUP ramrod */
+- bp->fp[index].state = BNX2X_FP_STATE_OPENING;
+- bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
+-
+- /* Wait for completion */
+- return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
+- &(bp->fp[index].state), 0);
+-
+-}
+-
+-
+-static int bnx2x_poll(struct napi_struct *napi, int budget);
+-static void bnx2x_set_rx_mode(struct net_device *dev);
+-
+-static int bnx2x_nic_load(struct bnx2x *bp, int req_irq)
+-{
+- u32 load_code;
+- int i;
+-
+- bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
+-
+- /* Send LOAD_REQUEST command to MCP.
+- Returns the type of LOAD command: if it is the
+- first port to be initialized common blocks should be
+- initialized, otherwise - not.
+- */
+- if (!nomcp) {
+- load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
+- if (!load_code) {
+- BNX2X_ERR("MCP response failure, unloading\n");
+- return -EBUSY;
+- }
+- if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
+- BNX2X_ERR("MCP refused load request, unloading\n");
+- return -EBUSY; /* other port in diagnostic mode */
+- }
+- } else {
+- load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
+- }
+-
+- /* if we can't use msix we only need one fp,
+- * so try to enable msix with the requested number of fp's
+- * and fallback to inta with one fp
+- */
+- if (req_irq) {
+- if (use_inta) {
+- bp->num_queues = 1;
+- } else {
+- if ((use_multi > 1) && (use_multi <= 16))
+- /* user requested number */
+- bp->num_queues = use_multi;
+- else if (use_multi == 1)
+- bp->num_queues = num_online_cpus();
+- else
+- bp->num_queues = 1;
+-
+- if (bnx2x_enable_msix(bp)) {
+- /* failed to enable msix */
+- bp->num_queues = 1;
+- if (use_multi)
+- BNX2X_ERR("Multi requested but failed"
+- " to enable MSI-X\n");
+- }
+- }
+- }
+-
+- DP(NETIF_MSG_IFUP, "set number of queues to %d\n", bp->num_queues);
+-
+- if (bnx2x_alloc_mem(bp))
+- return -ENOMEM;
+-
+- if (req_irq) {
+- if (bp->flags & USING_MSIX_FLAG) {
+- if (bnx2x_req_msix_irqs(bp)) {
+- pci_disable_msix(bp->pdev);
+- goto load_error;
+- }
+-
+- } else {
+- if (bnx2x_req_irq(bp)) {
+- BNX2X_ERR("IRQ request failed, aborting\n");
+- goto load_error;
+- }
+- }
+- }
+-
+- for_each_queue(bp, i)
+- netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
+- bnx2x_poll, 128);
+-
+-
+- /* Initialize HW */
+- if (bnx2x_function_init(bp,
+- (load_code == FW_MSG_CODE_DRV_LOAD_COMMON))) {
+- BNX2X_ERR("HW init failed, aborting\n");
+- goto load_error;
+- }
+-
+-
+- atomic_set(&bp->intr_sem, 0);
+-
+-
+- /* Setup NIC internals and enable interrupts */
+- bnx2x_nic_init(bp);
+-
+- /* Send LOAD_DONE command to MCP */
+- if (!nomcp) {
+- load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
+- if (!load_code) {
+- BNX2X_ERR("MCP response failure, unloading\n");
+- goto load_int_disable;
+- }
+- }
+-
+- bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
+-
+- /* Enable Rx interrupt handling before sending the ramrod
+- as it's completed on Rx FP queue */
+- for_each_queue(bp, i)
+- napi_enable(&bnx2x_fp(bp, i, napi));
+-
+- if (bnx2x_setup_leading(bp))
+- goto load_stop_netif;
+-
+- for_each_nondefault_queue(bp, i)
+- if (bnx2x_setup_multi(bp, i))
+- goto load_stop_netif;
+-
+- bnx2x_set_mac_addr(bp);
+-
+- bnx2x_phy_init(bp);
+-
+- /* Start fast path */
+- if (req_irq) { /* IRQ is only requested from bnx2x_open */
+- netif_start_queue(bp->dev);
+- if (bp->flags & USING_MSIX_FLAG)
+- printk(KERN_INFO PFX "%s: using MSI-X\n",
+- bp->dev->name);
+-
+- /* Otherwise Tx queue should be only reenabled */
+- } else if (netif_running(bp->dev)) {
+- netif_wake_queue(bp->dev);
+- bnx2x_set_rx_mode(bp->dev);
+- }
+-
+- /* start the timer */
+- mod_timer(&bp->timer, jiffies + bp->current_interval);
+-
+- return 0;
+-
+-load_stop_netif:
+- for_each_queue(bp, i)
+- napi_disable(&bnx2x_fp(bp, i, napi));
+-
+-load_int_disable:
+- bnx2x_int_disable_sync(bp);
+-
+- bnx2x_free_skbs(bp);
+- bnx2x_free_irq(bp);
+-
+-load_error:
+- bnx2x_free_mem(bp);
+-
+- /* TBD we really need to reset the chip
+- if we want to recover from this */
+- return -EBUSY;
+-}
+-
+-
+-static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
+-{
+- int port = bp->port;
+-#ifdef USE_DMAE
+- u32 wb_write[2];
+-#endif
+- int base, i;
+-
+- DP(NETIF_MSG_IFDOWN, "reset called with code %x\n", reset_code);
+-
+- /* Do not rcv packets to BRB */
+- REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
+- /* Do not direct rcv packets that are not for MCP to the BRB */
+- REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
+- NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
+-
+- /* Configure IGU and AEU */
+- REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
+- REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
+-
+- /* TODO: Close Doorbell port? */
+-
+- /* Clear ILT */
+-#ifdef USE_DMAE
+- wb_write[0] = 0;
+- wb_write[1] = 0;
+-#endif
+- base = port * RQ_ONCHIP_AT_PORT_SIZE;
+- for (i = base; i < base + RQ_ONCHIP_AT_PORT_SIZE; i++) {
+-#ifdef USE_DMAE
+- REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
+-#else
+- REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT, 0);
+- REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + 4, 0);
+-#endif
+- }
+-
+- if (reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
+- /* reset_common */
+- REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+- 0xd3ffff7f);
+- REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+- 0x1403);
+- }
+-}
+-
+-static int bnx2x_stop_multi(struct bnx2x *bp, int index)
+-{
+-
+- int rc;
+-
+- /* halt the connection */
+- bp->fp[index].state = BNX2X_FP_STATE_HALTING;
+- bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
+-
+-
+- rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
+- &(bp->fp[index].state), 1);
+- if (rc) /* timeout */
+- return rc;
+-
+- /* delete cfc entry */
+- bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
+-
+- return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
+- &(bp->fp[index].state), 1);
+-
+-}
+-
+-
+-static void bnx2x_stop_leading(struct bnx2x *bp)
+-{
+- u16 dsb_sp_prod_idx;
+- /* if the other port is handling traffic,
+- this can take a lot of time */
+- int timeout = 500;
+-
+- might_sleep();
+-
+- /* Send HALT ramrod */
+- bp->fp[0].state = BNX2X_FP_STATE_HALTING;
+- bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, 0, 0);
+-
+- if (bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
+- &(bp->fp[0].state), 1))
+- return;
+-
+- dsb_sp_prod_idx = *bp->dsb_sp_prod;
+-
+- /* Send PORT_DELETE ramrod */
+- bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
+-
+- /* Wait for completion to arrive on default status block
+- we are going to reset the chip anyway
+- so there is not much to do if this times out
+- */
+- while ((dsb_sp_prod_idx == *bp->dsb_sp_prod) && timeout) {
+- timeout--;
+- msleep(1);
+- }
+- if (!timeout) {
+- DP(NETIF_MSG_IFDOWN, "timeout polling for completion "
+- "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
+- *bp->dsb_sp_prod, dsb_sp_prod_idx);
+- }
+- bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
+- bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
+-}
+-
+-
+-static int bnx2x_nic_unload(struct bnx2x *bp, int free_irq)
+-{
+- u32 reset_code = 0;
+- int i, timeout;
+-
+- bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
+-
+- del_timer_sync(&bp->timer);
+-
+- bp->rx_mode = BNX2X_RX_MODE_NONE;
+- bnx2x_set_storm_rx_mode(bp);
+-
+- if (netif_running(bp->dev)) {
+- netif_tx_disable(bp->dev);
+- bp->dev->trans_start = jiffies; /* prevent tx timeout */
+- }
+-
+- /* Wait until all fast path tasks complete */
+- for_each_queue(bp, i) {
+- struct bnx2x_fastpath *fp = &bp->fp[i];
+-
+- timeout = 1000;
+- while (bnx2x_has_work(fp) && (timeout--))
+- msleep(1);
+- if (!timeout)
+- BNX2X_ERR("timeout waiting for queue[%d]\n", i);
+- }
+-
+- /* Wait until stat ramrod returns and all SP tasks complete */
+- timeout = 1000;
+- while ((bp->stat_pending || (bp->spq_left != MAX_SPQ_PENDING)) &&
+- (timeout--))
+- msleep(1);
+-
+- for_each_queue(bp, i)
+- napi_disable(&bnx2x_fp(bp, i, napi));
+- /* Disable interrupts after Tx and Rx are disabled on stack level */
+- bnx2x_int_disable_sync(bp);
+-
+- if (bp->flags & NO_WOL_FLAG)
+- reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
+-
+- else if (bp->wol) {
+- u32 emac_base = bp->port ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
+- u8 *mac_addr = bp->dev->dev_addr;
+- u32 val = (EMAC_MODE_MPKT | EMAC_MODE_MPKT_RCVD |
+- EMAC_MODE_ACPI_RCVD);
+-
+- EMAC_WR(EMAC_REG_EMAC_MODE, val);
+-
+- val = (mac_addr[0] << 8) | mac_addr[1];
+- EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val);
+-
+- val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
+- (mac_addr[4] << 8) | mac_addr[5];
+- EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val);
+-
+- reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
+-
+- } else
+- reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
+-
+- /* Close multi and leading connections */
+- for_each_nondefault_queue(bp, i)
+- if (bnx2x_stop_multi(bp, i))
+- goto unload_error;
+-
+- bnx2x_stop_leading(bp);
+- if ((bp->state != BNX2X_STATE_CLOSING_WAIT4_UNLOAD) ||
+- (bp->fp[0].state != BNX2X_FP_STATE_CLOSED)) {
+- DP(NETIF_MSG_IFDOWN, "failed to close leading properly!"
+- "state 0x%x fp[0].state 0x%x",
+- bp->state, bp->fp[0].state);
+- }
+-
+-unload_error:
+- bnx2x_link_reset(bp);
+-
+- if (!nomcp)
+- reset_code = bnx2x_fw_command(bp, reset_code);
+- else
+- reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
+-
+- /* Release IRQs */
+- if (free_irq)
+- bnx2x_free_irq(bp);
+-
+- /* Reset the chip */
+- bnx2x_reset_chip(bp, reset_code);
+-
+- /* Report UNLOAD_DONE to MCP */
+- if (!nomcp)
+- bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
+-
+- /* Free SKBs and driver internals */
+- bnx2x_free_skbs(bp);
+- bnx2x_free_mem(bp);
+-
+- bp->state = BNX2X_STATE_CLOSED;
+-
+- netif_carrier_off(bp->dev);
+-
+- return 0;
+-}
+-
+-/* end of nic load/unload */
+-
+-/* ethtool_ops */
+-
+-/*
+- * Init service functions
+- */
+-
+-static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
+-{
+- int port = bp->port;
+- u32 ext_phy_type;
+-
+- bp->phy_flags = 0;
+-
+- switch (switch_cfg) {
+- case SWITCH_CFG_1G:
+- BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
+-
+- ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
+- switch (ext_phy_type) {
+- case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
+- BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
+- ext_phy_type);
+-
+- bp->supported |= (SUPPORTED_10baseT_Half |
+- SUPPORTED_10baseT_Full |
+- SUPPORTED_100baseT_Half |
+- SUPPORTED_100baseT_Full |
+- SUPPORTED_1000baseT_Full |
+- SUPPORTED_2500baseX_Full |
+- SUPPORTED_TP | SUPPORTED_FIBRE |
+- SUPPORTED_Autoneg |
+- SUPPORTED_Pause |
+- SUPPORTED_Asym_Pause);
+- break;
+-
+- case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
+- BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
+- ext_phy_type);
+-
+- bp->phy_flags |= PHY_SGMII_FLAG;
+-
+- bp->supported |= (SUPPORTED_10baseT_Half |
+- SUPPORTED_10baseT_Full |
+- SUPPORTED_100baseT_Half |
+- SUPPORTED_100baseT_Full |
+- SUPPORTED_1000baseT_Full |
+- SUPPORTED_TP | SUPPORTED_FIBRE |
+- SUPPORTED_Autoneg |
+- SUPPORTED_Pause |
+- SUPPORTED_Asym_Pause);
+- break;
+-
+- default:
+- BNX2X_ERR("NVRAM config error. "
+- "BAD SerDes ext_phy_config 0x%x\n",
+- bp->ext_phy_config);
+- return;
+- }
+-
+- bp->phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
+- port*0x10);
+- BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->phy_addr);
+- break;
+-
+- case SWITCH_CFG_10G:
+- BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
+-
+- bp->phy_flags |= PHY_XGXS_FLAG;
+-
+- ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
+- switch (ext_phy_type) {
+- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
+- BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
+- ext_phy_type);
+-
+- bp->supported |= (SUPPORTED_10baseT_Half |
+- SUPPORTED_10baseT_Full |
+- SUPPORTED_100baseT_Half |
+- SUPPORTED_100baseT_Full |
+- SUPPORTED_1000baseT_Full |
+- SUPPORTED_2500baseX_Full |
+- SUPPORTED_10000baseT_Full |
+- SUPPORTED_TP | SUPPORTED_FIBRE |
+- SUPPORTED_Autoneg |
+- SUPPORTED_Pause |
+- SUPPORTED_Asym_Pause);
+- break;
+-
+- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
+- BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
+- ext_phy_type);
+-
+- bp->supported |= (SUPPORTED_10000baseT_Full |
+- SUPPORTED_FIBRE |
+- SUPPORTED_Pause |
+- SUPPORTED_Asym_Pause);
+- break;
+-
+- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
+- BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
+- ext_phy_type);
+-
+- bp->supported |= (SUPPORTED_10000baseT_Full |
+- SUPPORTED_1000baseT_Full |
+- SUPPORTED_Autoneg |
+- SUPPORTED_FIBRE |
+- SUPPORTED_Pause |
+- SUPPORTED_Asym_Pause);
+- break;
+-
+- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
+- BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
+- ext_phy_type);
+-
+- bp->supported |= (SUPPORTED_10000baseT_Full |
+- SUPPORTED_1000baseT_Full |
+- SUPPORTED_FIBRE |
+- SUPPORTED_Autoneg |
+- SUPPORTED_Pause |
+- SUPPORTED_Asym_Pause);
+- break;
+-
+- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
+- BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
+- ext_phy_type);
+-
+- bp->supported |= (SUPPORTED_10000baseT_Full |
+- SUPPORTED_TP |
+- SUPPORTED_Autoneg |
+- SUPPORTED_Pause |
+- SUPPORTED_Asym_Pause);
+- break;
+-
+- default:
+- BNX2X_ERR("NVRAM config error. "
+- "BAD XGXS ext_phy_config 0x%x\n",
+- bp->ext_phy_config);
+- return;
+- }
+-
+- bp->phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
+- port*0x18);
+- BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->phy_addr);
+-
+- bp->ser_lane = ((bp->lane_config &
+- PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+- PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+- bp->rx_lane_swap = ((bp->lane_config &
+- PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
+- PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
+- bp->tx_lane_swap = ((bp->lane_config &
+- PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
+- PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
+- BNX2X_DEV_INFO("rx_lane_swap 0x%x tx_lane_swap 0x%x\n",
+- bp->rx_lane_swap, bp->tx_lane_swap);
+- break;
+-
+- default:
+- BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
+- bp->link_config);
+- return;
+- }
+-
+- /* mask what we support according to speed_cap_mask */
+- if (!(bp->speed_cap_mask &
+- PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
+- bp->supported &= ~SUPPORTED_10baseT_Half;
+-
+- if (!(bp->speed_cap_mask &
+- PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
+- bp->supported &= ~SUPPORTED_10baseT_Full;
+-
+- if (!(bp->speed_cap_mask &
+- PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
+- bp->supported &= ~SUPPORTED_100baseT_Half;
+-
+- if (!(bp->speed_cap_mask &
+- PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
+- bp->supported &= ~SUPPORTED_100baseT_Full;
+-
+- if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
+- bp->supported &= ~(SUPPORTED_1000baseT_Half |
+- SUPPORTED_1000baseT_Full);
+-
+- if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
+- bp->supported &= ~SUPPORTED_2500baseX_Full;
+-
+- if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
+- bp->supported &= ~SUPPORTED_10000baseT_Full;
+-
+- BNX2X_DEV_INFO("supported 0x%x\n", bp->supported);
+-}
+-
+-static void bnx2x_link_settings_requested(struct bnx2x *bp)
+-{
+- bp->req_autoneg = 0;
+- bp->req_duplex = DUPLEX_FULL;
+-
+- switch (bp->link_config & PORT_FEATURE_LINK_SPEED_MASK) {
+- case PORT_FEATURE_LINK_SPEED_AUTO:
+- if (bp->supported & SUPPORTED_Autoneg) {
+- bp->req_autoneg |= AUTONEG_SPEED;
+- bp->req_line_speed = 0;
+- bp->advertising = bp->supported;
+- } else {
+- if (XGXS_EXT_PHY_TYPE(bp) ==
+- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) {
+- /* force 10G, no AN */
+- bp->req_line_speed = SPEED_10000;
+- bp->advertising =
+- (ADVERTISED_10000baseT_Full |
+- ADVERTISED_FIBRE);
+- break;
+- }
+- BNX2X_ERR("NVRAM config error. "
+- "Invalid link_config 0x%x"
+- " Autoneg not supported\n",
+- bp->link_config);
+- return;
+- }
+- break;
+-
+- case PORT_FEATURE_LINK_SPEED_10M_FULL:
+- if (bp->supported & SUPPORTED_10baseT_Full) {
+- bp->req_line_speed = SPEED_10;
+- bp->advertising = (ADVERTISED_10baseT_Full |
+- ADVERTISED_TP);
+- } else {
+- BNX2X_ERR("NVRAM config error. "
+- "Invalid link_config 0x%x"
+- " speed_cap_mask 0x%x\n",
+- bp->link_config, bp->speed_cap_mask);
+- return;
+- }
+- break;
+-
+- case PORT_FEATURE_LINK_SPEED_10M_HALF:
+- if (bp->supported & SUPPORTED_10baseT_Half) {
+- bp->req_line_speed = SPEED_10;
+- bp->req_duplex = DUPLEX_HALF;
+- bp->advertising = (ADVERTISED_10baseT_Half |
+- ADVERTISED_TP);
+- } else {
+- BNX2X_ERR("NVRAM config error. "
+- "Invalid link_config 0x%x"
+- " speed_cap_mask 0x%x\n",
+- bp->link_config, bp->speed_cap_mask);
+- return;
+- }
+- break;
+-
+- case PORT_FEATURE_LINK_SPEED_100M_FULL:
+- if (bp->supported & SUPPORTED_100baseT_Full) {
+- bp->req_line_speed = SPEED_100;
+- bp->advertising = (ADVERTISED_100baseT_Full |
+- ADVERTISED_TP);
+- } else {
+- BNX2X_ERR("NVRAM config error. "
+- "Invalid link_config 0x%x"
+- " speed_cap_mask 0x%x\n",
+- bp->link_config, bp->speed_cap_mask);
+- return;
+- }
+- break;
+-
+- case PORT_FEATURE_LINK_SPEED_100M_HALF:
+- if (bp->supported & SUPPORTED_100baseT_Half) {
+- bp->req_line_speed = SPEED_100;
+- bp->req_duplex = DUPLEX_HALF;
+- bp->advertising = (ADVERTISED_100baseT_Half |
+- ADVERTISED_TP);
+- } else {
+- BNX2X_ERR("NVRAM config error. "
+- "Invalid link_config 0x%x"
+- " speed_cap_mask 0x%x\n",
+- bp->link_config, bp->speed_cap_mask);
+- return;
+- }
+- break;
+-
+- case PORT_FEATURE_LINK_SPEED_1G:
+- if (bp->supported & SUPPORTED_1000baseT_Full) {
+- bp->req_line_speed = SPEED_1000;
+- bp->advertising = (ADVERTISED_1000baseT_Full |
+- ADVERTISED_TP);
+- } else {
+- BNX2X_ERR("NVRAM config error. "
+- "Invalid link_config 0x%x"
+- " speed_cap_mask 0x%x\n",
+- bp->link_config, bp->speed_cap_mask);
+- return;
+- }
+- break;
+-
+- case PORT_FEATURE_LINK_SPEED_2_5G:
+- if (bp->supported & SUPPORTED_2500baseX_Full) {
+- bp->req_line_speed = SPEED_2500;
+- bp->advertising = (ADVERTISED_2500baseX_Full |
+- ADVERTISED_TP);
+- } else {
+- BNX2X_ERR("NVRAM config error. "
+- "Invalid link_config 0x%x"
+- " speed_cap_mask 0x%x\n",
+- bp->link_config, bp->speed_cap_mask);
+- return;
+- }
+- break;
+-
+- case PORT_FEATURE_LINK_SPEED_10G_CX4:
+- case PORT_FEATURE_LINK_SPEED_10G_KX4:
+- case PORT_FEATURE_LINK_SPEED_10G_KR:
+- if (bp->supported & SUPPORTED_10000baseT_Full) {
+- bp->req_line_speed = SPEED_10000;
+- bp->advertising = (ADVERTISED_10000baseT_Full |
+- ADVERTISED_FIBRE);
+- } else {
+- BNX2X_ERR("NVRAM config error. "
+- "Invalid link_config 0x%x"
+- " speed_cap_mask 0x%x\n",
+- bp->link_config, bp->speed_cap_mask);
+- return;
+- }
+- break;
+-
+- default:
+- BNX2X_ERR("NVRAM config error. "
+- "BAD link speed link_config 0x%x\n",
+- bp->link_config);
+- bp->req_autoneg |= AUTONEG_SPEED;
+- bp->req_line_speed = 0;
+- bp->advertising = bp->supported;
+- break;
+- }
+- BNX2X_DEV_INFO("req_line_speed %d req_duplex %d\n",
+- bp->req_line_speed, bp->req_duplex);
+-
+- bp->req_flow_ctrl = (bp->link_config &
+- PORT_FEATURE_FLOW_CONTROL_MASK);
+- if ((bp->req_flow_ctrl == FLOW_CTRL_AUTO) &&
+- (bp->supported & SUPPORTED_Autoneg))
+- bp->req_autoneg |= AUTONEG_FLOW_CTRL;
+-
+- BNX2X_DEV_INFO("req_autoneg 0x%x req_flow_ctrl 0x%x"
+- " advertising 0x%x\n",
+- bp->req_autoneg, bp->req_flow_ctrl, bp->advertising);
+-}
+-
+-static void bnx2x_get_hwinfo(struct bnx2x *bp)
+-{
+- u32 val, val2, val3, val4, id;
+- int port = bp->port;
+- u32 switch_cfg;
+-
+- bp->shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+- BNX2X_DEV_INFO("shmem offset is %x\n", bp->shmem_base);
+-
+- /* Get the chip revision id and number. */
+- /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
+- val = REG_RD(bp, MISC_REG_CHIP_NUM);
+- id = ((val & 0xffff) << 16);
+- val = REG_RD(bp, MISC_REG_CHIP_REV);
+- id |= ((val & 0xf) << 12);
+- val = REG_RD(bp, MISC_REG_CHIP_METAL);
+- id |= ((val & 0xff) << 4);
+- REG_RD(bp, MISC_REG_BOND_ID);
+- id |= (val & 0xf);
+- bp->chip_id = id;
+- BNX2X_DEV_INFO("chip ID is %x\n", id);
+-
+- if (!bp->shmem_base || (bp->shmem_base != 0xAF900)) {
+- BNX2X_DEV_INFO("MCP not active\n");
+- nomcp = 1;
+- goto set_mac;
+- }
+-
+- val = SHMEM_RD(bp, validity_map[port]);
+- if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
+- != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
+- BNX2X_ERR("BAD MCP validity signature\n");
+-
+- bp->fw_seq = (SHMEM_RD(bp, func_mb[port].drv_mb_header) &
+- DRV_MSG_SEQ_NUMBER_MASK);
+-
+- bp->hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
+- bp->board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
+- bp->serdes_config =
+- SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
+- bp->lane_config =
+- SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
+- bp->ext_phy_config =
+- SHMEM_RD(bp,
+- dev_info.port_hw_config[port].external_phy_config);
+- bp->speed_cap_mask =
+- SHMEM_RD(bp,
+- dev_info.port_hw_config[port].speed_capability_mask);
+-
+- bp->link_config =
+- SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
+-
+- BNX2X_DEV_INFO("hw_config (%08x) board (%08x) serdes_config (%08x)\n"
+- KERN_INFO " lane_config (%08x) ext_phy_config (%08x)\n"
+- KERN_INFO " speed_cap_mask (%08x) link_config (%08x)"
+- " fw_seq (%08x)\n",
+- bp->hw_config, bp->board, bp->serdes_config,
+- bp->lane_config, bp->ext_phy_config,
+- bp->speed_cap_mask, bp->link_config, bp->fw_seq);
+-
+- switch_cfg = (bp->link_config & PORT_FEATURE_CONNECTED_SWITCH_MASK);
+- bnx2x_link_settings_supported(bp, switch_cfg);
+-
+- bp->autoneg = (bp->hw_config & SHARED_HW_CFG_AN_ENABLE_MASK);
+- /* for now disable cl73 */
+- bp->autoneg &= ~SHARED_HW_CFG_AN_ENABLE_CL73;
+- BNX2X_DEV_INFO("autoneg 0x%x\n", bp->autoneg);
+-
+- bnx2x_link_settings_requested(bp);
+-
+- val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
+- val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
+- bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
+- bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
+- bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
+- bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
+- bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
+- bp->dev->dev_addr[5] = (u8)(val & 0xff);
+-
+- memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6);
+-
+-
+- val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
+- val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
+- val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
+- val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
+-
+- printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
+- val, val2, val3, val4);
+-
+- /* bc ver */
+- if (!nomcp) {
+- bp->bc_ver = val = ((SHMEM_RD(bp, dev_info.bc_rev)) >> 8);
+- BNX2X_DEV_INFO("bc_ver %X\n", val);
+- if (val < BNX2X_BC_VER) {
+- /* for now only warn
+- * later we might need to enforce this */
+- BNX2X_ERR("This driver needs bc_ver %X but found %X,"
+- " please upgrade BC\n", BNX2X_BC_VER, val);
+- }
+- } else {
+- bp->bc_ver = 0;
+- }
+-
+- val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
+- bp->flash_size = (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
+- BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
+- bp->flash_size, bp->flash_size);
+-
+- return;
+-
+-set_mac: /* only supposed to happen on emulation/FPGA */
+- BNX2X_ERR("warning rendom MAC workaround active\n");
+- random_ether_addr(bp->dev->dev_addr);
+- memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6);
+-
+-}
+-
+-/*
+- * ethtool service functions
+- */
+-
+-/* All ethtool functions called with rtnl_lock */
+-
+-static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+-{
+- struct bnx2x *bp = netdev_priv(dev);
+-
+- cmd->supported = bp->supported;
+- cmd->advertising = bp->advertising;
+-
+- if (netif_carrier_ok(dev)) {
+- cmd->speed = bp->line_speed;
+- cmd->duplex = bp->duplex;
+- } else {
+- cmd->speed = bp->req_line_speed;
+- cmd->duplex = bp->req_duplex;
+- }
+-
+- if (bp->phy_flags & PHY_XGXS_FLAG) {
+- u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
+-
+- switch (ext_phy_type) {
+- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
+- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
+- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
+- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
+- cmd->port = PORT_FIBRE;
+- break;
+-
+- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
+- cmd->port = PORT_TP;
+- break;
+-
+- default:
+- DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
+- bp->ext_phy_config);
+- }
+- } else
+- cmd->port = PORT_TP;
+-
+- cmd->phy_address = bp->phy_addr;
+- cmd->transceiver = XCVR_INTERNAL;
+-
+- if (bp->req_autoneg & AUTONEG_SPEED)
+- cmd->autoneg = AUTONEG_ENABLE;
+- else
+- cmd->autoneg = AUTONEG_DISABLE;
+-
+- cmd->maxtxpkt = 0;
+- cmd->maxrxpkt = 0;
+-
+- DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
+- DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
+- DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
+- DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
+- cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
+- cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
+- cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
+-
+- return 0;
+-}
+-
+-static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+-{
+- struct bnx2x *bp = netdev_priv(dev);
+- u32 advertising;
+-
+- DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
+- DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
+- DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
+- DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
+- cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
+- cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
+- cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
+-
+- if (cmd->autoneg == AUTONEG_ENABLE) {
+- if (!(bp->supported & SUPPORTED_Autoneg)) {
+- DP(NETIF_MSG_LINK, "Aotoneg not supported\n");
+- return -EINVAL;
+- }
+-
+- /* advertise the requested speed and duplex if supported */
+- cmd->advertising &= bp->supported;
+-
+- bp->req_autoneg |= AUTONEG_SPEED;
+- bp->req_line_speed = 0;
+- bp->req_duplex = DUPLEX_FULL;
+- bp->advertising |= (ADVERTISED_Autoneg | cmd->advertising);
+-
+- } else { /* forced speed */
+- /* advertise the requested speed and duplex if supported */
+- switch (cmd->speed) {
+- case SPEED_10:
+- if (cmd->duplex == DUPLEX_FULL) {
+- if (!(bp->supported &
+- SUPPORTED_10baseT_Full)) {
+- DP(NETIF_MSG_LINK,
+- "10M full not supported\n");
+- return -EINVAL;
+- }
+-
+- advertising = (ADVERTISED_10baseT_Full |
+- ADVERTISED_TP);
+- } else {
+- if (!(bp->supported &
+- SUPPORTED_10baseT_Half)) {
+- DP(NETIF_MSG_LINK,
+- "10M half not supported\n");
+- return -EINVAL;
+- }
+-
+- advertising = (ADVERTISED_10baseT_Half |
+- ADVERTISED_TP);
+- }
+- break;
+-
+- case SPEED_100:
+- if (cmd->duplex == DUPLEX_FULL) {
+- if (!(bp->supported &
+- SUPPORTED_100baseT_Full)) {
+- DP(NETIF_MSG_LINK,
+- "100M full not supported\n");
+- return -EINVAL;
+- }
+-
+- advertising = (ADVERTISED_100baseT_Full |
+- ADVERTISED_TP);
+- } else {
+- if (!(bp->supported &
+- SUPPORTED_100baseT_Half)) {
+- DP(NETIF_MSG_LINK,
+- "100M half not supported\n");
+- return -EINVAL;
+- }
+-
+- advertising = (ADVERTISED_100baseT_Half |
+- ADVERTISED_TP);
+- }
+- break;
+-
+- case SPEED_1000:
+- if (cmd->duplex != DUPLEX_FULL) {
+- DP(NETIF_MSG_LINK, "1G half not supported\n");
+- return -EINVAL;
+- }
+-
+- if (!(bp->supported & SUPPORTED_1000baseT_Full)) {
+- DP(NETIF_MSG_LINK, "1G full not supported\n");
+- return -EINVAL;
+- }
+-
+- advertising = (ADVERTISED_1000baseT_Full |
+- ADVERTISED_TP);
+- break;
+-
+- case SPEED_2500:
+- if (cmd->duplex != DUPLEX_FULL) {
+- DP(NETIF_MSG_LINK,
+- "2.5G half not supported\n");
+- return -EINVAL;
+- }
+-
+- if (!(bp->supported & SUPPORTED_2500baseX_Full)) {
+- DP(NETIF_MSG_LINK,
+- "2.5G full not supported\n");
+- return -EINVAL;
+- }
+-
+- advertising = (ADVERTISED_2500baseX_Full |
+- ADVERTISED_TP);
+- break;
+-
+- case SPEED_10000:
+- if (cmd->duplex != DUPLEX_FULL) {
+- DP(NETIF_MSG_LINK, "10G half not supported\n");
+- return -EINVAL;
+- }
+-
+- if (!(bp->supported & SUPPORTED_10000baseT_Full)) {
+- DP(NETIF_MSG_LINK, "10G full not supported\n");
+- return -EINVAL;
+- }
+-
+- advertising = (ADVERTISED_10000baseT_Full |
+- ADVERTISED_FIBRE);
+- break;
+-
+- default:
+- DP(NETIF_MSG_LINK, "Unsupported speed\n");
+- return -EINVAL;
+- }
+-
+- bp->req_autoneg &= ~AUTONEG_SPEED;
+- bp->req_line_speed = cmd->speed;
+- bp->req_duplex = cmd->duplex;
+- bp->advertising = advertising;
+- }
+-
+- DP(NETIF_MSG_LINK, "req_autoneg 0x%x req_line_speed %d\n"
+- DP_LEVEL " req_duplex %d advertising 0x%x\n",
+- bp->req_autoneg, bp->req_line_speed, bp->req_duplex,
+- bp->advertising);
+-
+- bnx2x_stop_stats(bp);
+- bnx2x_link_initialize(bp);
+-
+- return 0;
+-}
+-
+-static void bnx2x_get_drvinfo(struct net_device *dev,
+- struct ethtool_drvinfo *info)
+-{
+- struct bnx2x *bp = netdev_priv(dev);
+-
+- strcpy(info->driver, DRV_MODULE_NAME);
+- strcpy(info->version, DRV_MODULE_VERSION);
+- snprintf(info->fw_version, 32, "%d.%d.%d:%d (BC VER %x)",
+- BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
+- BCM_5710_FW_REVISION_VERSION, BCM_5710_FW_COMPILE_FLAGS,
+- bp->bc_ver);
+- strcpy(info->bus_info, pci_name(bp->pdev));
+- info->n_stats = BNX2X_NUM_STATS;
+- info->testinfo_len = BNX2X_NUM_TESTS;
+- info->eedump_len = bp->flash_size;
+- info->regdump_len = 0;
+-}
+-
+-static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+-{
+- struct bnx2x *bp = netdev_priv(dev);
+-
+- if (bp->flags & NO_WOL_FLAG) {
+- wol->supported = 0;
+- wol->wolopts = 0;
+- } else {
+- wol->supported = WAKE_MAGIC;
+- if (bp->wol)
+- wol->wolopts = WAKE_MAGIC;
+- else
+- wol->wolopts = 0;
+- }
+- memset(&wol->sopass, 0, sizeof(wol->sopass));
+-}
+-
+-static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+-{
+- struct bnx2x *bp = netdev_priv(dev);
+-
+- if (wol->wolopts & ~WAKE_MAGIC)
+- return -EINVAL;
+-
+- if (wol->wolopts & WAKE_MAGIC) {
+- if (bp->flags & NO_WOL_FLAG)
+- return -EINVAL;
+-
+- bp->wol = 1;
+- } else {
+- bp->wol = 0;
+- }
+- return 0;
+-}
+-
+-static u32 bnx2x_get_msglevel(struct net_device *dev)
+-{
+- struct bnx2x *bp = netdev_priv(dev);
+-
+- return bp->msglevel;
+-}
+-
+-static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
+-{
+- struct bnx2x *bp = netdev_priv(dev);
+-
+- if (capable(CAP_NET_ADMIN))
+- bp->msglevel = level;
+-}
+-
+-static int bnx2x_nway_reset(struct net_device *dev)
+-{
+- struct bnx2x *bp = netdev_priv(dev);
+-
+- if (bp->state != BNX2X_STATE_OPEN) {
+- DP(NETIF_MSG_PROBE, "state is %x, returning\n", bp->state);
+- return -EAGAIN;
+- }
+-
+- bnx2x_stop_stats(bp);
+- bnx2x_link_initialize(bp);
+-
+- return 0;
+-}
+-
+-static int bnx2x_get_eeprom_len(struct net_device *dev)
+-{
+- struct bnx2x *bp = netdev_priv(dev);
+-
+- return bp->flash_size;
+-}
+-
+-static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
+-{
+- int port = bp->port;
+- int count, i;
+- u32 val = 0;
+-
+- /* adjust timeout for emulation/FPGA */
+- count = NVRAM_TIMEOUT_COUNT;
+- if (CHIP_REV_IS_SLOW(bp))
+- count *= 100;
+-
+- /* request access to nvram interface */
+- REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
+- (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
+-
+- for (i = 0; i < count*10; i++) {
+- val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
+- if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
+- break;
+-
+- udelay(5);
+- }
+-
+- if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
+- DP(NETIF_MSG_NVM, "cannot get access to nvram interface\n");
+- return -EBUSY;
+- }
+-
+- return 0;
+-}
+-
+-static int bnx2x_release_nvram_lock(struct bnx2x *bp)
+-{
+- int port = bp->port;
+- int count, i;
+- u32 val = 0;
+-
+- /* adjust timeout for emulation/FPGA */
+- count = NVRAM_TIMEOUT_COUNT;
+- if (CHIP_REV_IS_SLOW(bp))
+- count *= 100;
+-
+- /* relinquish nvram interface */
+- REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
+- (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
+-
+- for (i = 0; i < count*10; i++) {
+- val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
+- if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
+- break;
+-
+- udelay(5);
+- }
+-
+- if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
+- DP(NETIF_MSG_NVM, "cannot free access to nvram interface\n");
+- return -EBUSY;
+- }
+-
+- return 0;
+-}
+-
+-static void bnx2x_enable_nvram_access(struct bnx2x *bp)
+-{
+- u32 val;
+-
+- val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
+-
+- /* enable both bits, even on read */
+- REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
+- (val | MCPR_NVM_ACCESS_ENABLE_EN |
+- MCPR_NVM_ACCESS_ENABLE_WR_EN));
+-}
+-
+-static void bnx2x_disable_nvram_access(struct bnx2x *bp)
+-{
+- u32 val;
+-
+- val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
+-
+- /* disable both bits, even after read */
+- REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
+- (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
+- MCPR_NVM_ACCESS_ENABLE_WR_EN)));
+-}
+-
+-static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
+- u32 cmd_flags)
+-{
+- int count, i, rc;
+- u32 val;
+-
+- /* build the command word */
+- cmd_flags |= MCPR_NVM_COMMAND_DOIT;
+-
+- /* need to clear DONE bit separately */
+- REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
+-
+- /* address of the NVRAM to read from */
+- REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
+- (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
+-
+- /* issue a read command */
+- REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
+-
+- /* adjust timeout for emulation/FPGA */
+- count = NVRAM_TIMEOUT_COUNT;
+- if (CHIP_REV_IS_SLOW(bp))
+- count *= 100;
+-
+- /* wait for completion */
+- *ret_val = 0;
+- rc = -EBUSY;
+- for (i = 0; i < count; i++) {
+- udelay(5);
+- val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
+-
+- if (val & MCPR_NVM_COMMAND_DONE) {
+- val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
+- DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
+- /* we read nvram data in cpu order
+- * but ethtool sees it as an array of bytes
+- * converting to big-endian will do the work */
+- val = cpu_to_be32(val);
+- *ret_val = val;
+- rc = 0;
+- break;
+- }
+- }
+-
+- return rc;
+-}
+-
+-static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
+- int buf_size)
+-{
+- int rc;
+- u32 cmd_flags;
+- u32 val;
+-
+- if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
+- DP(NETIF_MSG_NVM,
+- "Invalid parameter: offset 0x%x buf_size 0x%x\n",
+- offset, buf_size);
+- return -EINVAL;
+- }
+-
+- if (offset + buf_size > bp->flash_size) {
+- DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
+- " buf_size (0x%x) > flash_size (0x%x)\n",
+- offset, buf_size, bp->flash_size);
+- return -EINVAL;
+- }
+-
+- /* request access to nvram interface */
+- rc = bnx2x_acquire_nvram_lock(bp);
+- if (rc)
+- return rc;
+-
+- /* enable access to nvram interface */
+- bnx2x_enable_nvram_access(bp);
+-
+- /* read the first word(s) */
+- cmd_flags = MCPR_NVM_COMMAND_FIRST;
+- while ((buf_size > sizeof(u32)) && (rc == 0)) {
+- rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
+- memcpy(ret_buf, &val, 4);
+-
+- /* advance to the next dword */
+- offset += sizeof(u32);
+- ret_buf += sizeof(u32);
+- buf_size -= sizeof(u32);
+- cmd_flags = 0;
+- }
+-
+- if (rc == 0) {
+- cmd_flags |= MCPR_NVM_COMMAND_LAST;
+- rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
+- memcpy(ret_buf, &val, 4);
+- }
+-
+- /* disable access to nvram interface */
+- bnx2x_disable_nvram_access(bp);
+- bnx2x_release_nvram_lock(bp);
+-
+- return rc;
+-}
+-
+-static int bnx2x_get_eeprom(struct net_device *dev,
+- struct ethtool_eeprom *eeprom, u8 *eebuf)
+-{
+- struct bnx2x *bp = netdev_priv(dev);
+- int rc;
+-
+- DP(NETIF_MSG_NVM, "ethtool_eeprom: cmd %d\n"
+- DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
+- eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
+- eeprom->len, eeprom->len);
+-
+- /* parameters already validated in ethtool_get_eeprom */
+-
+- rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
+-
+- return rc;
+-}
+-
+-static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
+- u32 cmd_flags)
+-{
+- int count, i, rc;
+-
+- /* build the command word */
+- cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
+-
+- /* need to clear DONE bit separately */
+- REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
+-
+- /* write the data */
+- REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
+-
+- /* address of the NVRAM to write to */
+- REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
+- (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
+-
+- /* issue the write command */
+- REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
+-
+- /* adjust timeout for emulation/FPGA */
+- count = NVRAM_TIMEOUT_COUNT;
+- if (CHIP_REV_IS_SLOW(bp))
+- count *= 100;
+-
+- /* wait for completion */
+- rc = -EBUSY;
+- for (i = 0; i < count; i++) {
+- udelay(5);
+- val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
+- if (val & MCPR_NVM_COMMAND_DONE) {
+- rc = 0;
+- break;
+- }
+- }
+-
+- return rc;
+-}
+-
+-#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
+-
+-static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
+- int buf_size)
+-{
+- int rc;
+- u32 cmd_flags;
+- u32 align_offset;
+- u32 val;
+-
+- if (offset + buf_size > bp->flash_size) {
+- DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
+- " buf_size (0x%x) > flash_size (0x%x)\n",
+- offset, buf_size, bp->flash_size);
+- return -EINVAL;
+- }
+-
+- /* request access to nvram interface */
+- rc = bnx2x_acquire_nvram_lock(bp);
+- if (rc)
+- return rc;
+-
+- /* enable access to nvram interface */
+- bnx2x_enable_nvram_access(bp);
+-
+- cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
+- align_offset = (offset & ~0x03);
+- rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
+-
+- if (rc == 0) {
+- val &= ~(0xff << BYTE_OFFSET(offset));
+- val |= (*data_buf << BYTE_OFFSET(offset));
+-
+- /* nvram data is returned as an array of bytes
+- * convert it back to cpu order */
+- val = be32_to_cpu(val);
+-
+- DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
+-
+- rc = bnx2x_nvram_write_dword(bp, align_offset, val,
+- cmd_flags);
+- }
+-
+- /* disable access to nvram interface */
+- bnx2x_disable_nvram_access(bp);
+- bnx2x_release_nvram_lock(bp);
+-
+- return rc;
+-}
+-
+-static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
+- int buf_size)
+-{
+- int rc;
+- u32 cmd_flags;
+- u32 val;
+- u32 written_so_far;
+-
+- if (buf_size == 1) { /* ethtool */
+- return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
+- }
+-
+- if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
+- DP(NETIF_MSG_NVM,
+- "Invalid parameter: offset 0x%x buf_size 0x%x\n",
+- offset, buf_size);
+- return -EINVAL;
+- }
+-
+- if (offset + buf_size > bp->flash_size) {
+- DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
+- " buf_size (0x%x) > flash_size (0x%x)\n",
+- offset, buf_size, bp->flash_size);
+- return -EINVAL;
+- }
+-
+- /* request access to nvram interface */
+- rc = bnx2x_acquire_nvram_lock(bp);
+- if (rc)
+- return rc;
+-
+- /* enable access to nvram interface */
+- bnx2x_enable_nvram_access(bp);
+-
+- written_so_far = 0;
+- cmd_flags = MCPR_NVM_COMMAND_FIRST;
+- while ((written_so_far < buf_size) && (rc == 0)) {
+- if (written_so_far == (buf_size - sizeof(u32)))
+- cmd_flags |= MCPR_NVM_COMMAND_LAST;
+- else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
+- cmd_flags |= MCPR_NVM_COMMAND_LAST;
+- else if ((offset % NVRAM_PAGE_SIZE) == 0)
+- cmd_flags |= MCPR_NVM_COMMAND_FIRST;
+-
+- memcpy(&val, data_buf, 4);
+- DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
+-
+- rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
+-
+- /* advance to the next dword */
+- offset += sizeof(u32);
+- data_buf += sizeof(u32);
+- written_so_far += sizeof(u32);
+- cmd_flags = 0;
+- }
+-
+- /* disable access to nvram interface */
+- bnx2x_disable_nvram_access(bp);
+- bnx2x_release_nvram_lock(bp);
+-
+- return rc;
+-}
+-
+-static int bnx2x_set_eeprom(struct net_device *dev,
+- struct ethtool_eeprom *eeprom, u8 *eebuf)
+-{
+- struct bnx2x *bp = netdev_priv(dev);
+- int rc;
+-
+- DP(NETIF_MSG_NVM, "ethtool_eeprom: cmd %d\n"
+- DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
+- eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
+- eeprom->len, eeprom->len);
+-
+- /* parameters already validated in ethtool_set_eeprom */
+-
+- rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
+-
+- return rc;
+-}
+-
+-static int bnx2x_get_coalesce(struct net_device *dev,
+- struct ethtool_coalesce *coal)
+-{
+- struct bnx2x *bp = netdev_priv(dev);
+-
+- memset(coal, 0, sizeof(struct ethtool_coalesce));
+-
+- coal->rx_coalesce_usecs = bp->rx_ticks;
+- coal->tx_coalesce_usecs = bp->tx_ticks;
+- coal->stats_block_coalesce_usecs = bp->stats_ticks;
+-
+- return 0;
+-}
+-
+-static int bnx2x_set_coalesce(struct net_device *dev,
+- struct ethtool_coalesce *coal)
+-{
+- struct bnx2x *bp = netdev_priv(dev);
+-
+- bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
+- if (bp->rx_ticks > 3000)
+- bp->rx_ticks = 3000;
+-
+- bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
+- if (bp->tx_ticks > 0x3000)
+- bp->tx_ticks = 0x3000;
+-
+- bp->stats_ticks = coal->stats_block_coalesce_usecs;
+- if (bp->stats_ticks > 0xffff00)
+- bp->stats_ticks = 0xffff00;
+- bp->stats_ticks &= 0xffff00;
+-
+- if (netif_running(bp->dev))
+- bnx2x_update_coalesce(bp);
+-
+- return 0;
+-}
+-
+-static void bnx2x_get_ringparam(struct net_device *dev,
+- struct ethtool_ringparam *ering)
+-{
+- struct bnx2x *bp = netdev_priv(dev);
+-
+- ering->rx_max_pending = MAX_RX_AVAIL;
+- ering->rx_mini_max_pending = 0;
+- ering->rx_jumbo_max_pending = 0;
+-
+- ering->rx_pending = bp->rx_ring_size;
+- ering->rx_mini_pending = 0;
+- ering->rx_jumbo_pending = 0;
+-
+- ering->tx_max_pending = MAX_TX_AVAIL;
+- ering->tx_pending = bp->tx_ring_size;
+-}
+-
+-static int bnx2x_set_ringparam(struct net_device *dev,
+- struct ethtool_ringparam *ering)
+-{
+- struct bnx2x *bp = netdev_priv(dev);
+-
+- if ((ering->rx_pending > MAX_RX_AVAIL) ||
+- (ering->tx_pending > MAX_TX_AVAIL) ||
+- (ering->tx_pending <= MAX_SKB_FRAGS + 4))
+- return -EINVAL;
+-
+- bp->rx_ring_size = ering->rx_pending;
+- bp->tx_ring_size = ering->tx_pending;
+-
+- if (netif_running(bp->dev)) {
+- bnx2x_nic_unload(bp, 0);
+- bnx2x_nic_load(bp, 0);
+- }
+-
+- return 0;
+-}
+-
+-static void bnx2x_get_pauseparam(struct net_device *dev,
+- struct ethtool_pauseparam *epause)
+-{
+- struct bnx2x *bp = netdev_priv(dev);
+-
+- epause->autoneg =
+- ((bp->req_autoneg & AUTONEG_FLOW_CTRL) == AUTONEG_FLOW_CTRL);
+- epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) == FLOW_CTRL_RX);
+- epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) == FLOW_CTRL_TX);
+-
+- DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
+- DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
+- epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
+-}
+-
+-static int bnx2x_set_pauseparam(struct net_device *dev,
+- struct ethtool_pauseparam *epause)
+-{
+- struct bnx2x *bp = netdev_priv(dev);
+-
+- DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
+- DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
+- epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
+-
+- if (epause->autoneg) {
+- if (!(bp->supported & SUPPORTED_Autoneg)) {
+- DP(NETIF_MSG_LINK, "Aotoneg not supported\n");
+- return -EINVAL;
+- }
+-
+- bp->req_autoneg |= AUTONEG_FLOW_CTRL;
+- } else
+- bp->req_autoneg &= ~AUTONEG_FLOW_CTRL;
+-
+- bp->req_flow_ctrl = FLOW_CTRL_AUTO;
+-
+- if (epause->rx_pause)
+- bp->req_flow_ctrl |= FLOW_CTRL_RX;
+- if (epause->tx_pause)
+- bp->req_flow_ctrl |= FLOW_CTRL_TX;
+-
+- if (!(bp->req_autoneg & AUTONEG_FLOW_CTRL) &&
+- (bp->req_flow_ctrl == FLOW_CTRL_AUTO))
+- bp->req_flow_ctrl = FLOW_CTRL_NONE;
+-
+- DP(NETIF_MSG_LINK, "req_autoneg 0x%x req_flow_ctrl 0x%x\n",
+- bp->req_autoneg, bp->req_flow_ctrl);
+-
+- bnx2x_stop_stats(bp);
+- bnx2x_link_initialize(bp);
+-
+- return 0;
+-}
+-
+-static u32 bnx2x_get_rx_csum(struct net_device *dev)
+-{
+- struct bnx2x *bp = netdev_priv(dev);
+-
+- return bp->rx_csum;
+-}
+-
+-static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
+-{
+- struct bnx2x *bp = netdev_priv(dev);
+-
+- bp->rx_csum = data;
+- return 0;
+-}
+-
+-static int bnx2x_set_tso(struct net_device *dev, u32 data)
+-{
+- if (data)
+- dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
+- else
+- dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
+- return 0;
+-}
+-
+-static struct {
+- char string[ETH_GSTRING_LEN];
+-} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
+- { "MC Errors (online)" }
+-};
+-
+-static int bnx2x_self_test_count(struct net_device *dev)
+-{
+- return BNX2X_NUM_TESTS;
+-}
+-
+-static void bnx2x_self_test(struct net_device *dev,
+- struct ethtool_test *etest, u64 *buf)
+-{
+- struct bnx2x *bp = netdev_priv(dev);
+- int stats_state;
+-
+- memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
+-
+- if (bp->state != BNX2X_STATE_OPEN) {
+- DP(NETIF_MSG_PROBE, "state is %x, returning\n", bp->state);
+- return;
+- }
+-
+- stats_state = bp->stats_state;
+- bnx2x_stop_stats(bp);
+-
+- if (bnx2x_mc_assert(bp) != 0) {
+- buf[0] = 1;
+- etest->flags |= ETH_TEST_FL_FAILED;
+- }
+-
+-#ifdef BNX2X_EXTRA_DEBUG
+- bnx2x_panic_dump(bp);
+-#endif
+- bp->stats_state = stats_state;
+-}
+-
+-static struct {
+- char string[ETH_GSTRING_LEN];
+-} bnx2x_stats_str_arr[BNX2X_NUM_STATS] = {
+- { "rx_bytes"},
+- { "rx_error_bytes"},
+- { "tx_bytes"},
+- { "tx_error_bytes"},
+- { "rx_ucast_packets"},
+- { "rx_mcast_packets"},
+- { "rx_bcast_packets"},
+- { "tx_ucast_packets"},
+- { "tx_mcast_packets"},
+- { "tx_bcast_packets"},
+- { "tx_mac_errors"}, /* 10 */
+- { "tx_carrier_errors"},
+- { "rx_crc_errors"},
+- { "rx_align_errors"},
+- { "tx_single_collisions"},
+- { "tx_multi_collisions"},
+- { "tx_deferred"},
+- { "tx_excess_collisions"},
+- { "tx_late_collisions"},
+- { "tx_total_collisions"},
+- { "rx_fragments"}, /* 20 */
+- { "rx_jabbers"},
+- { "rx_undersize_packets"},
+- { "rx_oversize_packets"},
+- { "rx_xon_frames"},
+- { "rx_xoff_frames"},
+- { "tx_xon_frames"},
+- { "tx_xoff_frames"},
+- { "rx_mac_ctrl_frames"},
+- { "rx_filtered_packets"},
+- { "rx_discards"}, /* 30 */
+- { "brb_discard"},
+- { "brb_truncate"},
+- { "xxoverflow"}
+-};
+-
+-#define STATS_OFFSET32(offset_name) \
+- (offsetof(struct bnx2x_eth_stats, offset_name) / 4)
+-
+-static unsigned long bnx2x_stats_offset_arr[BNX2X_NUM_STATS] = {
+- STATS_OFFSET32(total_bytes_received_hi),
+- STATS_OFFSET32(stat_IfHCInBadOctets_hi),
+- STATS_OFFSET32(total_bytes_transmitted_hi),
+- STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
+- STATS_OFFSET32(total_unicast_packets_received_hi),
+- STATS_OFFSET32(total_multicast_packets_received_hi),
+- STATS_OFFSET32(total_broadcast_packets_received_hi),
+- STATS_OFFSET32(total_unicast_packets_transmitted_hi),
+- STATS_OFFSET32(total_multicast_packets_transmitted_hi),
+- STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
+- STATS_OFFSET32(stat_Dot3statsInternalMacTransmitErrors), /* 10 */
+- STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
+- STATS_OFFSET32(crc_receive_errors),
+- STATS_OFFSET32(alignment_errors),
+- STATS_OFFSET32(single_collision_transmit_frames),
+- STATS_OFFSET32(multiple_collision_transmit_frames),
+- STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
+- STATS_OFFSET32(excessive_collision_frames),
+- STATS_OFFSET32(late_collision_frames),
+- STATS_OFFSET32(number_of_bugs_found_in_stats_spec),
+- STATS_OFFSET32(runt_packets_received), /* 20 */
+- STATS_OFFSET32(jabber_packets_received),
+- STATS_OFFSET32(error_runt_packets_received),
+- STATS_OFFSET32(error_jabber_packets_received),
+- STATS_OFFSET32(pause_xon_frames_received),
+- STATS_OFFSET32(pause_xoff_frames_received),
+- STATS_OFFSET32(pause_xon_frames_transmitted),
+- STATS_OFFSET32(pause_xoff_frames_transmitted),
+- STATS_OFFSET32(control_frames_received),
+- STATS_OFFSET32(mac_filter_discard),
+- STATS_OFFSET32(no_buff_discard), /* 30 */
+- STATS_OFFSET32(brb_discard),
+- STATS_OFFSET32(brb_truncate_discard),
+- STATS_OFFSET32(xxoverflow_discard)
+-};
+-
+-static u8 bnx2x_stats_len_arr[BNX2X_NUM_STATS] = {
+- 8, 0, 8, 0, 8, 8, 8, 8, 8, 8,
+- 4, 0, 4, 4, 4, 4, 4, 4, 4, 4,
+- 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+- 4, 4, 4, 4
+-};
+-
+-static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+-{
+- switch (stringset) {
+- case ETH_SS_STATS:
+- memcpy(buf, bnx2x_stats_str_arr, sizeof(bnx2x_stats_str_arr));
+- break;
+-
+- case ETH_SS_TEST:
+- memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
+- break;
+- }
+-}
+-
+-static int bnx2x_get_stats_count(struct net_device *dev)
+-{
+- return BNX2X_NUM_STATS;
+-}
+-
+-static void bnx2x_get_ethtool_stats(struct net_device *dev,
+- struct ethtool_stats *stats, u64 *buf)
+-{
+- struct bnx2x *bp = netdev_priv(dev);
+- u32 *hw_stats = (u32 *)bnx2x_sp_check(bp, eth_stats);
+- int i;
+-
+- for (i = 0; i < BNX2X_NUM_STATS; i++) {
+- if (bnx2x_stats_len_arr[i] == 0) {
+- /* skip this counter */
+- buf[i] = 0;
+- continue;
+- }
+- if (!hw_stats) {
+- buf[i] = 0;
+- continue;
+- }
+- if (bnx2x_stats_len_arr[i] == 4) {
+- /* 4-byte counter */
+- buf[i] = (u64) *(hw_stats + bnx2x_stats_offset_arr[i]);
+- continue;
+- }
+- /* 8-byte counter */
+- buf[i] = HILO_U64(*(hw_stats + bnx2x_stats_offset_arr[i]),
+- *(hw_stats + bnx2x_stats_offset_arr[i] + 1));
+- }
+-}
+-
+-static int bnx2x_phys_id(struct net_device *dev, u32 data)
+-{
+- struct bnx2x *bp = netdev_priv(dev);
+- int i;
+-
+- if (data == 0)
+- data = 2;
+-
+- for (i = 0; i < (data * 2); i++) {
+- if ((i % 2) == 0) {
+- bnx2x_leds_set(bp, SPEED_1000);
+- } else {
+- bnx2x_leds_unset(bp);
+- }
+- msleep_interruptible(500);
+- if (signal_pending(current))
+- break;
+- }
+-
+- if (bp->link_up)
+- bnx2x_leds_set(bp, bp->line_speed);
+-
+- return 0;
+-}
+-
+-static struct ethtool_ops bnx2x_ethtool_ops = {
+- .get_settings = bnx2x_get_settings,
+- .set_settings = bnx2x_set_settings,
+- .get_drvinfo = bnx2x_get_drvinfo,
+- .get_wol = bnx2x_get_wol,
+- .set_wol = bnx2x_set_wol,
+- .get_msglevel = bnx2x_get_msglevel,
+- .set_msglevel = bnx2x_set_msglevel,
+- .nway_reset = bnx2x_nway_reset,
+- .get_link = ethtool_op_get_link,
+- .get_eeprom_len = bnx2x_get_eeprom_len,
+- .get_eeprom = bnx2x_get_eeprom,
+- .set_eeprom = bnx2x_set_eeprom,
+- .get_coalesce = bnx2x_get_coalesce,
+- .set_coalesce = bnx2x_set_coalesce,
+- .get_ringparam = bnx2x_get_ringparam,
+- .set_ringparam = bnx2x_set_ringparam,
+- .get_pauseparam = bnx2x_get_pauseparam,
+- .set_pauseparam = bnx2x_set_pauseparam,
+- .get_rx_csum = bnx2x_get_rx_csum,
+- .set_rx_csum = bnx2x_set_rx_csum,
+- .get_tx_csum = ethtool_op_get_tx_csum,
+- .set_tx_csum = ethtool_op_set_tx_csum,
+- .get_sg = ethtool_op_get_sg,
+- .set_sg = ethtool_op_set_sg,
+- .get_tso = ethtool_op_get_tso,
+- .set_tso = bnx2x_set_tso,
+- .self_test_count = bnx2x_self_test_count,
+- .self_test = bnx2x_self_test,
+- .get_strings = bnx2x_get_strings,
+- .phys_id = bnx2x_phys_id,
+- .get_stats_count = bnx2x_get_stats_count,
+- .get_ethtool_stats = bnx2x_get_ethtool_stats
+-};
+-
+-/* end of ethtool_ops */
+-
+-/****************************************************************************
+-* General service functions
+-****************************************************************************/
+-
+-static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
+-{
+- u16 pmcsr;
+-
+- pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
+-
+- switch (state) {
+- case PCI_D0:
+- pci_write_config_word(bp->pdev,
+- bp->pm_cap + PCI_PM_CTRL,
+- ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
+- PCI_PM_CTRL_PME_STATUS));
+-
+- if (pmcsr & PCI_PM_CTRL_STATE_MASK)
+- /* delay required during transition out of D3hot */
+- msleep(20);
+- break;
+-
+- case PCI_D3hot:
+- pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+- pmcsr |= 3;
+-
+- if (bp->wol)
+- pmcsr |= PCI_PM_CTRL_PME_ENABLE;
+-
+- pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
+- pmcsr);
+-
+- /* No more memory access after this point until
+- * device is brought back to D0.
+- */
+- break;
+-
+- default:
+- return -EINVAL;
+- }
+- return 0;
+-}
+-
+-/*
+- * net_device service functions
+- */
+-
+-/* called with netif_tx_lock from set_multicast */
+-static void bnx2x_set_rx_mode(struct net_device *dev)
+-{
+- struct bnx2x *bp = netdev_priv(dev);
+- u32 rx_mode = BNX2X_RX_MODE_NORMAL;
+-
+- DP(NETIF_MSG_IFUP, "called dev->flags = %x\n", dev->flags);
+-
+- if (dev->flags & IFF_PROMISC)
+- rx_mode = BNX2X_RX_MODE_PROMISC;
+-
+- else if ((dev->flags & IFF_ALLMULTI) ||
+- (dev->mc_count > BNX2X_MAX_MULTICAST))
+- rx_mode = BNX2X_RX_MODE_ALLMULTI;
+-
+- else { /* some multicasts */
+- int i, old, offset;
+- struct dev_mc_list *mclist;
+- struct mac_configuration_cmd *config =
+- bnx2x_sp(bp, mcast_config);
+-
+- for (i = 0, mclist = dev->mc_list;
+- mclist && (i < dev->mc_count);
+- i++, mclist = mclist->next) {
+-
+- config->config_table[i].cam_entry.msb_mac_addr =
+- swab16(*(u16 *)&mclist->dmi_addr[0]);
+- config->config_table[i].cam_entry.middle_mac_addr =
+- swab16(*(u16 *)&mclist->dmi_addr[2]);
+- config->config_table[i].cam_entry.lsb_mac_addr =
+- swab16(*(u16 *)&mclist->dmi_addr[4]);
+- config->config_table[i].cam_entry.flags =
+- cpu_to_le16(bp->port);
+- config->config_table[i].target_table_entry.flags = 0;
+- config->config_table[i].target_table_entry.
+- client_id = 0;
+- config->config_table[i].target_table_entry.
+- vlan_id = 0;
+-
+- DP(NETIF_MSG_IFUP,
+- "setting MCAST[%d] (%04x:%04x:%04x)\n",
+- i, config->config_table[i].cam_entry.msb_mac_addr,
+- config->config_table[i].cam_entry.middle_mac_addr,
+- config->config_table[i].cam_entry.lsb_mac_addr);
+- }
+- old = config->hdr.length_6b;
+- if (old > i) {
+- for (; i < old; i++) {
+- if (CAM_IS_INVALID(config->config_table[i])) {
+- i--; /* already invalidated */
+- break;
+- }
+- /* invalidate */
+- CAM_INVALIDATE(config->config_table[i]);
+- }
+- }
+-
+- if (CHIP_REV_IS_SLOW(bp))
+- offset = BNX2X_MAX_EMUL_MULTI*(1 + bp->port);
+- else
+- offset = BNX2X_MAX_MULTICAST*(1 + bp->port);
+-
+- config->hdr.length_6b = i;
+- config->hdr.offset = offset;
+- config->hdr.reserved0 = 0;
+- config->hdr.reserved1 = 0;
+-
+- bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
+- U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
+- U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
+- }
+-
+- bp->rx_mode = rx_mode;
+- bnx2x_set_storm_rx_mode(bp);
+-}
+-
+-static int bnx2x_poll(struct napi_struct *napi, int budget)
+-{
+- struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
+- napi);
+- struct bnx2x *bp = fp->bp;
+- int work_done = 0;
+-
+-#ifdef BNX2X_STOP_ON_ERROR
+- if (unlikely(bp->panic))
+- goto out_panic;
+-#endif
+-
+- prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
+- prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
+- prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
+-
+- bnx2x_update_fpsb_idx(fp);
+-
+- if (le16_to_cpu(*fp->tx_cons_sb) != fp->tx_pkt_cons)
+- bnx2x_tx_int(fp, budget);
+-
+-
+- if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons)
+- work_done = bnx2x_rx_int(fp, budget);
+-
+-
+- rmb(); /* bnx2x_has_work() reads the status block */
+-
+- /* must not complete if we consumed full budget */
+- if ((work_done < budget) && !bnx2x_has_work(fp)) {
+-
+-#ifdef BNX2X_STOP_ON_ERROR
+-out_panic:
+-#endif
+- netif_rx_complete(bp->dev, napi);
+-
+- bnx2x_ack_sb(bp, fp->index, USTORM_ID,
+- le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
+- bnx2x_ack_sb(bp, fp->index, CSTORM_ID,
+- le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
+- }
+-
+- return work_done;
+-}
+-
+-/* Called with netif_tx_lock.
+- * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
+- * netif_wake_queue().
+- */
+-static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
+-{
+- struct bnx2x *bp = netdev_priv(dev);
+- struct bnx2x_fastpath *fp;
+- struct sw_tx_bd *tx_buf;
+- struct eth_tx_bd *tx_bd;
+- struct eth_tx_parse_bd *pbd = NULL;
+- u16 pkt_prod, bd_prod;
+- int nbd, fp_index = 0;
+- dma_addr_t mapping;
+-
+-#ifdef BNX2X_STOP_ON_ERROR
+- if (unlikely(bp->panic))
+- return NETDEV_TX_BUSY;
+-#endif
+-
+- fp_index = smp_processor_id() % (bp->num_queues);
+-
+- fp = &bp->fp[fp_index];
+- if (unlikely(bnx2x_tx_avail(bp->fp) <
+- (skb_shinfo(skb)->nr_frags + 3))) {
+- bp->slowpath->eth_stats.driver_xoff++,
+- netif_stop_queue(dev);
+- BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
+- return NETDEV_TX_BUSY;
+- }
+-
+- /*
+- This is a bit ugly. First we use one BD which we mark as start,
+- then for TSO or xsum we have a parsing info BD,
+- and only then we have the rest of the TSO bds.
+- (don't forget to mark the last one as last,
+- and to unmap only AFTER you write to the BD ...)
+- I would like to thank DovH for this mess.
+- */
+-
+- pkt_prod = fp->tx_pkt_prod++;
+- bd_prod = fp->tx_bd_prod;
+- bd_prod = TX_BD(bd_prod);
+-
+- /* get a tx_buff and first bd */
+- tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
+- tx_bd = &fp->tx_desc_ring[bd_prod];
+-
+- tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
+- tx_bd->general_data = (UNICAST_ADDRESS <<
+- ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
+- tx_bd->general_data |= 1; /* header nbd */
+-
+- /* remember the first bd of the packet */
+- tx_buf->first_bd = bd_prod;
+-
+- DP(NETIF_MSG_TX_QUEUED,
+- "sending pkt %u @%p next_idx %u bd %u @%p\n",
+- pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
+-
+- if (skb->ip_summed == CHECKSUM_PARTIAL) {
+- struct iphdr *iph = ip_hdr(skb);
+- u8 len;
+-
+- tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
+-
+- /* turn on parsing and get a bd */
+- bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+- pbd = (void *)&fp->tx_desc_ring[bd_prod];
+- len = ((u8 *)iph - (u8 *)skb->data) / 2;
+-
+- /* for now NS flag is not used in Linux */
+- pbd->global_data = (len |
+- ((skb->protocol == ntohs(ETH_P_8021Q)) <<
+- ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
+- pbd->ip_hlen = ip_hdrlen(skb) / 2;
+- pbd->total_hlen = cpu_to_le16(len + pbd->ip_hlen);
+- if (iph->protocol == IPPROTO_TCP) {
+- struct tcphdr *th = tcp_hdr(skb);
+-
+- tx_bd->bd_flags.as_bitfield |=
+- ETH_TX_BD_FLAGS_TCP_CSUM;
+- pbd->tcp_flags = pbd_tcp_flags(skb);
+- pbd->total_hlen += cpu_to_le16(tcp_hdrlen(skb) / 2);
+- pbd->tcp_pseudo_csum = swab16(th->check);
+-
+- } else if (iph->protocol == IPPROTO_UDP) {
+- struct udphdr *uh = udp_hdr(skb);
+-
+- tx_bd->bd_flags.as_bitfield |=
+- ETH_TX_BD_FLAGS_TCP_CSUM;
+- pbd->total_hlen += cpu_to_le16(4);
+- pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
+- pbd->cs_offset = 5; /* 10 >> 1 */
+- pbd->tcp_pseudo_csum = 0;
+- /* HW bug: we need to subtract 10 bytes before the
+- * UDP header from the csum
+- */
+- uh->check = (u16) ~csum_fold(csum_sub(uh->check,
+- csum_partial(((u8 *)(uh)-10), 10, 0)));
+- }
+- }
+-
+- if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
+- tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
+- tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
+- } else {
+- tx_bd->vlan = cpu_to_le16(pkt_prod);
+- }
+-
+- mapping = pci_map_single(bp->pdev, skb->data,
+- skb->len, PCI_DMA_TODEVICE);
+-
+- tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+- tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+- nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
+- tx_bd->nbd = cpu_to_le16(nbd);
+- tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
+-
+- DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
+- " nbytes %d flags %x vlan %u\n",
+- tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, tx_bd->nbd,
+- tx_bd->nbytes, tx_bd->bd_flags.as_bitfield, tx_bd->vlan);
+-
+- if (skb_shinfo(skb)->gso_size &&
+- (skb->len > (bp->dev->mtu + ETH_HLEN))) {
+- int hlen = 2 * le16_to_cpu(pbd->total_hlen);
+-
+- DP(NETIF_MSG_TX_QUEUED,
+- "TSO packet len %d hlen %d total len %d tso size %d\n",
+- skb->len, hlen, skb_headlen(skb),
+- skb_shinfo(skb)->gso_size);
+-
+- tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
+-
+- if (tx_bd->nbytes > cpu_to_le16(hlen)) {
+- /* we split the first bd into headers and data bds
+- * to ease the pain of our fellow micocode engineers
+- * we use one mapping for both bds
+- * So far this has only been observed to happen
+- * in Other Operating Systems(TM)
+- */
+-
+- /* first fix first bd */
+- nbd++;
+- tx_bd->nbd = cpu_to_le16(nbd);
+- tx_bd->nbytes = cpu_to_le16(hlen);
+-
+- /* we only print this as an error
+- * because we don't think this will ever happen.
+- */
+- BNX2X_ERR("TSO split header size is %d (%x:%x)"
+- " nbd %d\n", tx_bd->nbytes, tx_bd->addr_hi,
+- tx_bd->addr_lo, tx_bd->nbd);
+-
+- /* now get a new data bd
+- * (after the pbd) and fill it */
+- bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+- tx_bd = &fp->tx_desc_ring[bd_prod];
+-
+- tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+- tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping) + hlen);
+- tx_bd->nbytes = cpu_to_le16(skb_headlen(skb) - hlen);
+- tx_bd->vlan = cpu_to_le16(pkt_prod);
+- /* this marks the bd
+- * as one that has no individual mapping
+- * the FW ignores this flag in a bd not marked start
+- */
+- tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
+- DP(NETIF_MSG_TX_QUEUED,
+- "TSO split data size is %d (%x:%x)\n",
+- tx_bd->nbytes, tx_bd->addr_hi, tx_bd->addr_lo);
+- }
+-
+- if (!pbd) {
+- /* supposed to be unreached
+- * (and therefore not handled properly...)
+- */
+- BNX2X_ERR("LSO with no PBD\n");
+- BUG();
+- }
+-
+- pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
+- pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
+- pbd->ip_id = swab16(ip_hdr(skb)->id);
+- pbd->tcp_pseudo_csum =
+- swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+- ip_hdr(skb)->daddr,
+- 0, IPPROTO_TCP, 0));
+- pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
+- }
+-
+- {
+- int i;
+-
+- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+-
+- bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+- tx_bd = &fp->tx_desc_ring[bd_prod];
+-
+- mapping = pci_map_page(bp->pdev, frag->page,
+- frag->page_offset,
+- frag->size, PCI_DMA_TODEVICE);
+-
+- tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+- tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+- tx_bd->nbytes = cpu_to_le16(frag->size);
+- tx_bd->vlan = cpu_to_le16(pkt_prod);
+- tx_bd->bd_flags.as_bitfield = 0;
+- DP(NETIF_MSG_TX_QUEUED, "frag %d bd @%p"
+- " addr (%x:%x) nbytes %d flags %x\n",
+- i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
+- tx_bd->nbytes, tx_bd->bd_flags.as_bitfield);
+- } /* for */
+- }
+-
+- /* now at last mark the bd as the last bd */
+- tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
+-
+- DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
+- tx_bd, tx_bd->bd_flags.as_bitfield);
+-
+- tx_buf->skb = skb;
+-
+- bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+-
+- /* now send a tx doorbell, counting the next bd
+- * if the packet contains or ends with it
+- */
+- if (TX_BD_POFF(bd_prod) < nbd)
+- nbd++;
+-
+- if (pbd)
+- DP(NETIF_MSG_TX_QUEUED,
+- "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
+- " tcp_flags %x xsum %x seq %u hlen %u\n",
+- pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
+- pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
+- pbd->tcp_send_seq, pbd->total_hlen);
+-
+- DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %u bd %d\n", nbd, bd_prod);
+-
+- fp->hw_tx_prods->bds_prod =
+- cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
+- mb(); /* FW restriction: must not reorder writing nbd and packets */
+- fp->hw_tx_prods->packets_prod =
+- cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
+- DOORBELL(bp, fp_index, 0);
+-
+- mmiowb();
+-
+- fp->tx_bd_prod = bd_prod;
+- dev->trans_start = jiffies;
+-
+- if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
+- netif_stop_queue(dev);
+- bp->slowpath->eth_stats.driver_xoff++;
+- if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
+- netif_wake_queue(dev);
+- }
+- fp->tx_pkt++;
+-
+- return NETDEV_TX_OK;
+-}
+-
+-/* Called with rtnl_lock */
+-static int bnx2x_open(struct net_device *dev)
+-{
+- struct bnx2x *bp = netdev_priv(dev);
+-
+- bnx2x_set_power_state(bp, PCI_D0);
+-
+- return bnx2x_nic_load(bp, 1);
+-}
+-
+-/* Called with rtnl_lock */
+-static int bnx2x_close(struct net_device *dev)
+-{
+- struct bnx2x *bp = netdev_priv(dev);
+-
+- /* Unload the driver, release IRQs */
+- bnx2x_nic_unload(bp, 1);
+-
+- if (!CHIP_REV_IS_SLOW(bp))
+- bnx2x_set_power_state(bp, PCI_D3hot);
+-
+- return 0;
+-}
+-
+-/* Called with rtnl_lock */
+-static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
+-{
+- struct sockaddr *addr = p;
+- struct bnx2x *bp = netdev_priv(dev);
+-
+- if (!is_valid_ether_addr(addr->sa_data))
+- return -EINVAL;
+-
+- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+- if (netif_running(dev))
+- bnx2x_set_mac_addr(bp);
+-
+- return 0;
+-}
+-
+-/* Called with rtnl_lock */
+-static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+-{
+- struct mii_ioctl_data *data = if_mii(ifr);
+- struct bnx2x *bp = netdev_priv(dev);
+- int err;
+-
+- switch (cmd) {
+- case SIOCGMIIPHY:
+- data->phy_id = bp->phy_addr;
+-
+- /* fallthrough */
+- case SIOCGMIIREG: {
+- u32 mii_regval;
+-
+- spin_lock_bh(&bp->phy_lock);
+- if (bp->state == BNX2X_STATE_OPEN) {
+- err = bnx2x_mdio22_read(bp, data->reg_num & 0x1f,
+- &mii_regval);
+-
+- data->val_out = mii_regval;
+- } else {
+- err = -EAGAIN;
+- }
+- spin_unlock_bh(&bp->phy_lock);
+- return err;
+- }
+-
+- case SIOCSMIIREG:
+- if (!capable(CAP_NET_ADMIN))
+- return -EPERM;
+-
+- spin_lock_bh(&bp->phy_lock);
+- if (bp->state == BNX2X_STATE_OPEN) {
+- err = bnx2x_mdio22_write(bp, data->reg_num & 0x1f,
+- data->val_in);
+- } else {
+- err = -EAGAIN;
+- }
+- spin_unlock_bh(&bp->phy_lock);
+- return err;
+-
+- default:
+- /* do nothing */
+- break;
+- }
+-
+- return -EOPNOTSUPP;
+-}
+-
+-/* Called with rtnl_lock */
+-static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
+-{
+- struct bnx2x *bp = netdev_priv(dev);
+-
+- if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
+- ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
+- return -EINVAL;
+-
+- /* This does not race with packet allocation
+- * because the actual alloc size is
+- * only updated as part of load
+- */
+- dev->mtu = new_mtu;
+-
+- if (netif_running(dev)) {
+- bnx2x_nic_unload(bp, 0);
+- bnx2x_nic_load(bp, 0);
+- }
+- return 0;
+-}
+-
+-static void bnx2x_tx_timeout(struct net_device *dev)
+-{
+- struct bnx2x *bp = netdev_priv(dev);
+-
+-#ifdef BNX2X_STOP_ON_ERROR
+- if (!bp->panic)
+- bnx2x_panic();
+-#endif
+- /* This allows the netif to be shutdown gracefully before resetting */
+- schedule_work(&bp->reset_task);
+-}
+-
+-#ifdef BCM_VLAN
+-/* Called with rtnl_lock */
+-static void bnx2x_vlan_rx_register(struct net_device *dev,
+- struct vlan_group *vlgrp)
+-{
+- struct bnx2x *bp = netdev_priv(dev);
+-
+- bp->vlgrp = vlgrp;
+- if (netif_running(dev))
+- bnx2x_set_client_config(bp);
+-}
+-#endif
+-
+-#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
+-static void poll_bnx2x(struct net_device *dev)
+-{
+- struct bnx2x *bp = netdev_priv(dev);
+-
+- disable_irq(bp->pdev->irq);
+- bnx2x_interrupt(bp->pdev->irq, dev);
+- enable_irq(bp->pdev->irq);
+-}
+-#endif
+-
+-static void bnx2x_reset_task(struct work_struct *work)
+-{
+- struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
+-
+-#ifdef BNX2X_STOP_ON_ERROR
+- BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
+- " so reset not done to allow debug dump,\n"
+- KERN_ERR " you will need to reboot when done\n");
+- return;
+-#endif
+-
+- if (!netif_running(bp->dev))
+- return;
+-
+- rtnl_lock();
+-
+- if (bp->state != BNX2X_STATE_OPEN) {
+- DP(NETIF_MSG_TX_ERR, "state is %x, returning\n", bp->state);
+- goto reset_task_exit;
+- }
+-
+- bnx2x_nic_unload(bp, 0);
+- bnx2x_nic_load(bp, 0);
+-
+-reset_task_exit:
+- rtnl_unlock();
+-}
+-
+-static int __devinit bnx2x_init_board(struct pci_dev *pdev,
+- struct net_device *dev)
+-{
+- struct bnx2x *bp;
+- int rc;
+-
+- SET_NETDEV_DEV(dev, &pdev->dev);
+- bp = netdev_priv(dev);
+-
+- bp->flags = 0;
+- bp->port = PCI_FUNC(pdev->devfn);
+-
+- rc = pci_enable_device(pdev);
+- if (rc) {
+- printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
+- goto err_out;
+- }
+-
+- if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+- printk(KERN_ERR PFX "Cannot find PCI device base address,"
+- " aborting\n");
+- rc = -ENODEV;
+- goto err_out_disable;
+- }
+-
+- if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
+- printk(KERN_ERR PFX "Cannot find second PCI device"
+- " base address, aborting\n");
+- rc = -ENODEV;
+- goto err_out_disable;
+- }
+-
+- rc = pci_request_regions(pdev, DRV_MODULE_NAME);
+- if (rc) {
+- printk(KERN_ERR PFX "Cannot obtain PCI resources,"
+- " aborting\n");
+- goto err_out_disable;
+- }
+-
+- pci_set_master(pdev);
+-
+- bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
+- if (bp->pm_cap == 0) {
+- printk(KERN_ERR PFX "Cannot find power management"
+- " capability, aborting\n");
+- rc = -EIO;
+- goto err_out_release;
+- }
+-
+- bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+- if (bp->pcie_cap == 0) {
+- printk(KERN_ERR PFX "Cannot find PCI Express capability,"
+- " aborting\n");
+- rc = -EIO;
+- goto err_out_release;
+- }
+-
+- if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
+- bp->flags |= USING_DAC_FLAG;
+- if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
+- printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
+- " failed, aborting\n");
+- rc = -EIO;
+- goto err_out_release;
+- }
+-
+- } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
+- printk(KERN_ERR PFX "System does not support DMA,"
+- " aborting\n");
+- rc = -EIO;
+- goto err_out_release;
+- }
+-
+- bp->dev = dev;
+- bp->pdev = pdev;
+-
+- spin_lock_init(&bp->phy_lock);
+-
+- INIT_WORK(&bp->reset_task, bnx2x_reset_task);
+- INIT_WORK(&bp->sp_task, bnx2x_sp_task);
+-
+- dev->base_addr = pci_resource_start(pdev, 0);
+-
+- dev->irq = pdev->irq;
+-
+- bp->regview = ioremap_nocache(dev->base_addr,
+- pci_resource_len(pdev, 0));
+- if (!bp->regview) {
+- printk(KERN_ERR PFX "Cannot map register space, aborting\n");
+- rc = -ENOMEM;
+- goto err_out_release;
+- }
+-
+- bp->doorbells = ioremap_nocache(pci_resource_start(pdev , 2),
+- pci_resource_len(pdev, 2));
+- if (!bp->doorbells) {
+- printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
+- rc = -ENOMEM;
+- goto err_out_unmap;
+- }
+-
+- bnx2x_set_power_state(bp, PCI_D0);
+-
+- bnx2x_get_hwinfo(bp);
+-
+- if (CHIP_REV(bp) == CHIP_REV_FPGA) {
+- printk(KERN_ERR PFX "FPGA detected. MCP disabled,"
+- " will only init first device\n");
+- onefunc = 1;
+- nomcp = 1;
+- }
+-
+- if (nomcp) {
+- printk(KERN_ERR PFX "MCP disabled, will only"
+- " init first device\n");
+- onefunc = 1;
+- }
+-
+- if (onefunc && bp->port) {
+- printk(KERN_ERR PFX "Second device disabled, exiting\n");
+- rc = -ENODEV;
+- goto err_out_unmap;
+- }
+-
+- bp->tx_ring_size = MAX_TX_AVAIL;
+- bp->rx_ring_size = MAX_RX_AVAIL;
+-
+- bp->rx_csum = 1;
+-
+- bp->rx_offset = 0;
+-
+- bp->tx_quick_cons_trip_int = 0xff;
+- bp->tx_quick_cons_trip = 0xff;
+- bp->tx_ticks_int = 50;
+- bp->tx_ticks = 50;
+-
+- bp->rx_quick_cons_trip_int = 0xff;
+- bp->rx_quick_cons_trip = 0xff;
+- bp->rx_ticks_int = 25;
+- bp->rx_ticks = 25;
+-
+- bp->stats_ticks = 1000000 & 0xffff00;
+-
+- bp->timer_interval = HZ;
+- bp->current_interval = (poll ? poll : HZ);
+-
+- init_timer(&bp->timer);
+- bp->timer.expires = jiffies + bp->current_interval;
+- bp->timer.data = (unsigned long) bp;
+- bp->timer.function = bnx2x_timer;
+-
+- return 0;
+-
+-err_out_unmap:
+- if (bp->regview) {
+- iounmap(bp->regview);
+- bp->regview = NULL;
+- }
+-
+- if (bp->doorbells) {
+- iounmap(bp->doorbells);
+- bp->doorbells = NULL;
+- }
+-
+-err_out_release:
+- pci_release_regions(pdev);
+-
+-err_out_disable:
+- pci_disable_device(pdev);
+- pci_set_drvdata(pdev, NULL);
+-
+-err_out:
+- return rc;
+-}
+-
+-static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
+-{
+- u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
+-
+- val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
+- return val;
+-}
+-
+-/* return value of 1=2.5GHz 2=5GHz */
+-static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
+-{
+- u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
+-
+- val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
+- return val;
+-}
+-
+-static int __devinit bnx2x_init_one(struct pci_dev *pdev,
+- const struct pci_device_id *ent)
+-{
+- static int version_printed;
+- struct net_device *dev = NULL;
+- struct bnx2x *bp;
+- int rc;
+- int port = PCI_FUNC(pdev->devfn);
+- DECLARE_MAC_BUF(mac);
+-
+- if (version_printed++ == 0)
+- printk(KERN_INFO "%s", version);
+-
+- /* dev zeroed in init_etherdev */
+- dev = alloc_etherdev(sizeof(*bp));
+- if (!dev)
+- return -ENOMEM;
+-
+- netif_carrier_off(dev);
+-
+- bp = netdev_priv(dev);
+- bp->msglevel = debug;
+-
+- if (port && onefunc) {
+- printk(KERN_ERR PFX "second function disabled. exiting\n");
+- free_netdev(dev);
+- return 0;
+- }
+-
+- rc = bnx2x_init_board(pdev, dev);
+- if (rc < 0) {
+- free_netdev(dev);
+- return rc;
+- }
+-
+- dev->hard_start_xmit = bnx2x_start_xmit;
+- dev->watchdog_timeo = TX_TIMEOUT;
+-
+- dev->ethtool_ops = &bnx2x_ethtool_ops;
+- dev->open = bnx2x_open;
+- dev->stop = bnx2x_close;
+- dev->set_multicast_list = bnx2x_set_rx_mode;
+- dev->set_mac_address = bnx2x_change_mac_addr;
+- dev->do_ioctl = bnx2x_ioctl;
+- dev->change_mtu = bnx2x_change_mtu;
+- dev->tx_timeout = bnx2x_tx_timeout;
+-#ifdef BCM_VLAN
+- dev->vlan_rx_register = bnx2x_vlan_rx_register;
+-#endif
+-#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
+- dev->poll_controller = poll_bnx2x;
+-#endif
+- dev->features |= NETIF_F_SG;
+- if (bp->flags & USING_DAC_FLAG)
+- dev->features |= NETIF_F_HIGHDMA;
+- dev->features |= NETIF_F_IP_CSUM;
+-#ifdef BCM_VLAN
+- dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+-#endif
+- dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
+-
+- rc = register_netdev(dev);
+- if (rc) {
+- dev_err(&pdev->dev, "Cannot register net device\n");
+- if (bp->regview)
+- iounmap(bp->regview);
+- if (bp->doorbells)
+- iounmap(bp->doorbells);
+- pci_release_regions(pdev);
+- pci_disable_device(pdev);
+- pci_set_drvdata(pdev, NULL);
+- free_netdev(dev);
+- return rc;
+- }
+-
+- pci_set_drvdata(pdev, dev);
+-
+- bp->name = board_info[ent->driver_data].name;
+- printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
+- " IRQ %d, ", dev->name, bp->name,
+- ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
+- ((CHIP_ID(bp) & 0x0ff0) >> 4),
+- bnx2x_get_pcie_width(bp),
+- (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
+- dev->base_addr, bp->pdev->irq);
+- printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
+- return 0;
+-}
+-
+-static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
+-{
+- struct net_device *dev = pci_get_drvdata(pdev);
+- struct bnx2x *bp;
+-
+- if (!dev) {
+- /* we get here if init_one() fails */
+- printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
+- return;
+- }
+-
+- bp = netdev_priv(dev);
+-
+- unregister_netdev(dev);
+-
+- if (bp->regview)
+- iounmap(bp->regview);
+-
+- if (bp->doorbells)
+- iounmap(bp->doorbells);
+-
+- free_netdev(dev);
+- pci_release_regions(pdev);
+- pci_disable_device(pdev);
+- pci_set_drvdata(pdev, NULL);
+-}
+-
+-static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
+-{
+- struct net_device *dev = pci_get_drvdata(pdev);
+- struct bnx2x *bp;
+-
+- if (!dev)
+- return 0;
+-
+- if (!netif_running(dev))
+- return 0;
+-
+- bp = netdev_priv(dev);
+-
+- bnx2x_nic_unload(bp, 0);
+-
+- netif_device_detach(dev);
+-
+- pci_save_state(pdev);
+- bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
+-
+- return 0;
+-}
+-
+-static int bnx2x_resume(struct pci_dev *pdev)
+-{
+- struct net_device *dev = pci_get_drvdata(pdev);
+- struct bnx2x *bp;
+- int rc;
+-
+- if (!dev) {
+- printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
+- return -ENODEV;
+- }
+-
+- if (!netif_running(dev))
+- return 0;
+-
+- bp = netdev_priv(dev);
+-
+- pci_restore_state(pdev);
+- bnx2x_set_power_state(bp, PCI_D0);
+- netif_device_attach(dev);
+-
+- rc = bnx2x_nic_load(bp, 0);
+- if (rc)
+- return rc;
+-
+- return 0;
+-}
+-
+-static struct pci_driver bnx2x_pci_driver = {
+- .name = DRV_MODULE_NAME,
+- .id_table = bnx2x_pci_tbl,
+- .probe = bnx2x_init_one,
+- .remove = __devexit_p(bnx2x_remove_one),
+- .suspend = bnx2x_suspend,
+- .resume = bnx2x_resume,
+-};
+-
+-static int __init bnx2x_init(void)
+-{
+- return pci_register_driver(&bnx2x_pci_driver);
+-}
+-
+-static void __exit bnx2x_cleanup(void)
+-{
+- pci_unregister_driver(&bnx2x_pci_driver);
+-}
+-
+-module_init(bnx2x_init);
+-module_exit(bnx2x_cleanup);
+-
+diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
+index 8e68d06..84314b8 100644
+--- a/drivers/net/bnx2x.h
++++ b/drivers/net/bnx2x.h
+@@ -1,6 +1,6 @@
+ /* bnx2x.h: Broadcom Everest network driver.
+ *
+- * Copyright (c) 2007-2008 Broadcom Corporation
++ * Copyright (c) 2007-2009 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+@@ -14,39 +14,51 @@
+ #ifndef BNX2X_H
+ #define BNX2X_H
+
++/* compilation time flags */
++
++/* define this to make the driver freeze on error to allow getting debug info
++ * (you will need to reboot afterwards) */
++/* #define BNX2X_STOP_ON_ERROR */
++
++#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
++#define BCM_VLAN 1
++#endif
++
++
+ /* error/debug prints */
+
+-#define DRV_MODULE_NAME "bnx2x"
+-#define PFX DRV_MODULE_NAME ": "
++#define DRV_MODULE_NAME "bnx2x"
++#define PFX DRV_MODULE_NAME ": "
+
+ /* for messages that are currently off */
+-#define BNX2X_MSG_OFF 0
+-#define BNX2X_MSG_MCP 0x10000 /* was: NETIF_MSG_HW */
+-#define BNX2X_MSG_STATS 0x20000 /* was: NETIF_MSG_TIMER */
+-#define NETIF_MSG_NVM 0x40000 /* was: NETIF_MSG_HW */
+-#define NETIF_MSG_DMAE 0x80000 /* was: NETIF_MSG_HW */
++#define BNX2X_MSG_OFF 0
++#define BNX2X_MSG_MCP 0x010000 /* was: NETIF_MSG_HW */
++#define BNX2X_MSG_STATS 0x020000 /* was: NETIF_MSG_TIMER */
++#define BNX2X_MSG_NVM 0x040000 /* was: NETIF_MSG_HW */
++#define BNX2X_MSG_DMAE 0x080000 /* was: NETIF_MSG_HW */
+ #define BNX2X_MSG_SP 0x100000 /* was: NETIF_MSG_INTR */
+ #define BNX2X_MSG_FP 0x200000 /* was: NETIF_MSG_INTR */
+
+-#define DP_LEVEL KERN_NOTICE /* was: KERN_DEBUG */
++#define DP_LEVEL KERN_NOTICE /* was: KERN_DEBUG */
+
+ /* regular debug print */
+ #define DP(__mask, __fmt, __args...) do { \
+ if (bp->msglevel & (__mask)) \
+- printk(DP_LEVEL "[%s:%d(%s)]" __fmt, __FUNCTION__, \
+- __LINE__, bp->dev?(bp->dev->name):"?", ##__args); \
++ printk(DP_LEVEL "[%s:%d(%s)]" __fmt, __func__, __LINE__, \
++ bp->dev ? (bp->dev->name) : "?", ##__args); \
+ } while (0)
+
+-/* for errors (never masked) */
+-#define BNX2X_ERR(__fmt, __args...) do { \
+- printk(KERN_ERR "[%s:%d(%s)]" __fmt, __FUNCTION__, \
+- __LINE__, bp->dev?(bp->dev->name):"?", ##__args); \
++/* errors debug print */
++#define BNX2X_DBG_ERR(__fmt, __args...) do { \
++ if (bp->msglevel & NETIF_MSG_PROBE) \
++ printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \
++ bp->dev ? (bp->dev->name) : "?", ##__args); \
+ } while (0)
+
+-/* for logging (never masked) */
+-#define BNX2X_LOG(__fmt, __args...) do { \
+- printk(KERN_NOTICE "[%s:%d(%s)]" __fmt, __FUNCTION__, \
+- __LINE__, bp->dev?(bp->dev->name):"?", ##__args); \
++/* for errors (never masked) */
++#define BNX2X_ERR(__fmt, __args...) do { \
++ printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \
++ bp->dev ? (bp->dev->name) : "?", ##__args); \
+ } while (0)
+
+ /* before we have a dev->name use dev_info() */
+@@ -60,7 +72,7 @@
+ #define bnx2x_panic() do { \
+ bp->panic = 1; \
+ BNX2X_ERR("driver assert\n"); \
+- bnx2x_disable_int(bp); \
++ bnx2x_int_disable(bp); \
+ bnx2x_panic_dump(bp); \
+ } while (0)
+ #else
+@@ -71,164 +83,413 @@
+ #endif
+
+
+-#define U64_LO(x) (((u64)x) & 0xffffffff)
+-#define U64_HI(x) (((u64)x) >> 32)
+-#define HILO_U64(hi, lo) (((u64)hi << 32) + lo)
++#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff)
++#define U64_HI(x) (u32)(((u64)(x)) >> 32)
++#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
+
+
+-#define REG_ADDR(bp, offset) (bp->regview + offset)
++#define REG_ADDR(bp, offset) (bp->regview + offset)
+
+-#define REG_RD(bp, offset) readl(REG_ADDR(bp, offset))
+-#define REG_RD8(bp, offset) readb(REG_ADDR(bp, offset))
+-#define REG_RD64(bp, offset) readq(REG_ADDR(bp, offset))
++#define REG_RD(bp, offset) readl(REG_ADDR(bp, offset))
++#define REG_RD8(bp, offset) readb(REG_ADDR(bp, offset))
++#define REG_RD64(bp, offset) readq(REG_ADDR(bp, offset))
+
+-#define REG_WR(bp, offset, val) writel((u32)val, REG_ADDR(bp, offset))
++#define REG_WR(bp, offset, val) writel((u32)val, REG_ADDR(bp, offset))
+ #define REG_WR8(bp, offset, val) writeb((u8)val, REG_ADDR(bp, offset))
+-#define REG_WR16(bp, offset, val) writew((u16)val, REG_ADDR(bp, offset))
+-#define REG_WR32(bp, offset, val) REG_WR(bp, offset, val)
++#define REG_WR16(bp, offset, val) writew((u16)val, REG_ADDR(bp, offset))
++#define REG_WR32(bp, offset, val) REG_WR(bp, offset, val)
++
++#define REG_RD_IND(bp, offset) bnx2x_reg_rd_ind(bp, offset)
++#define REG_WR_IND(bp, offset, val) bnx2x_reg_wr_ind(bp, offset, val)
+
+-#define REG_RD_IND(bp, offset) bnx2x_reg_rd_ind(bp, offset)
+-#define REG_WR_IND(bp, offset, val) bnx2x_reg_wr_ind(bp, offset, val)
++#define REG_RD_DMAE(bp, offset, valp, len32) \
++ do { \
++ bnx2x_read_dmae(bp, offset, len32);\
++ memcpy(valp, bnx2x_sp(bp, wb_data[0]), len32 * 4); \
++ } while (0)
+
+-#define REG_WR_DMAE(bp, offset, val, len32) \
++#define REG_WR_DMAE(bp, offset, valp, len32) \
+ do { \
+- memcpy(bnx2x_sp(bp, wb_data[0]), val, len32 * 4); \
++ memcpy(bnx2x_sp(bp, wb_data[0]), valp, len32 * 4); \
+ bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), \
+ offset, len32); \
+ } while (0)
+
+-#define SHMEM_RD(bp, type) \
+- REG_RD(bp, bp->shmem_base + offsetof(struct shmem_region, type))
+-#define SHMEM_WR(bp, type, val) \
+- REG_WR(bp, bp->shmem_base + offsetof(struct shmem_region, type), val)
++#define SHMEM_ADDR(bp, field) (bp->common.shmem_base + \
++ offsetof(struct shmem_region, field))
++#define SHMEM_RD(bp, field) REG_RD(bp, SHMEM_ADDR(bp, field))
++#define SHMEM_WR(bp, field, val) REG_WR(bp, SHMEM_ADDR(bp, field), val)
+
+-#define NIG_WR(reg, val) REG_WR(bp, reg, val)
+-#define EMAC_WR(reg, val) REG_WR(bp, emac_base + reg, val)
+-#define BMAC_WR(reg, val) REG_WR(bp, GRCBASE_NIG + bmac_addr + reg, val)
++#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg)
++#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val)
+
+
+-#define for_each_queue(bp, var) for (var = 0; var < bp->num_queues; var++)
+-
+-#define for_each_nondefault_queue(bp, var) \
+- for (var = 1; var < bp->num_queues; var++)
+-#define is_multi(bp) (bp->num_queues > 1)
++/* fast path */
+
++struct sw_rx_bd {
++ struct sk_buff *skb;
++ DECLARE_PCI_UNMAP_ADDR(mapping)
++};
+
+-struct regp {
+- u32 lo;
+- u32 hi;
++struct sw_tx_bd {
++ struct sk_buff *skb;
++ u16 first_bd;
+ };
+
+-struct bmac_stats {
+- struct regp tx_gtpkt;
+- struct regp tx_gtxpf;
+- struct regp tx_gtfcs;
+- struct regp tx_gtmca;
+- struct regp tx_gtgca;
+- struct regp tx_gtfrg;
+- struct regp tx_gtovr;
+- struct regp tx_gt64;
+- struct regp tx_gt127;
+- struct regp tx_gt255; /* 10 */
+- struct regp tx_gt511;
+- struct regp tx_gt1023;
+- struct regp tx_gt1518;
+- struct regp tx_gt2047;
+- struct regp tx_gt4095;
+- struct regp tx_gt9216;
+- struct regp tx_gt16383;
+- struct regp tx_gtmax;
+- struct regp tx_gtufl;
+- struct regp tx_gterr; /* 20 */
+- struct regp tx_gtbyt;
+-
+- struct regp rx_gr64;
+- struct regp rx_gr127;
+- struct regp rx_gr255;
+- struct regp rx_gr511;
+- struct regp rx_gr1023;
+- struct regp rx_gr1518;
+- struct regp rx_gr2047;
+- struct regp rx_gr4095;
+- struct regp rx_gr9216; /* 30 */
+- struct regp rx_gr16383;
+- struct regp rx_grmax;
+- struct regp rx_grpkt;
+- struct regp rx_grfcs;
+- struct regp rx_grmca;
+- struct regp rx_grbca;
+- struct regp rx_grxcf;
+- struct regp rx_grxpf;
+- struct regp rx_grxuo;
+- struct regp rx_grjbr; /* 40 */
+- struct regp rx_grovr;
+- struct regp rx_grflr;
+- struct regp rx_grmeg;
+- struct regp rx_grmeb;
+- struct regp rx_grbyt;
+- struct regp rx_grund;
+- struct regp rx_grfrg;
+- struct regp rx_grerb;
+- struct regp rx_grfre;
+- struct regp rx_gripj; /* 50 */
++struct sw_rx_page {
++ struct page *page;
++ DECLARE_PCI_UNMAP_ADDR(mapping)
+ };
+
+-struct emac_stats {
+- u32 rx_ifhcinoctets ;
+- u32 rx_ifhcinbadoctets ;
+- u32 rx_etherstatsfragments ;
+- u32 rx_ifhcinucastpkts ;
+- u32 rx_ifhcinmulticastpkts ;
+- u32 rx_ifhcinbroadcastpkts ;
+- u32 rx_dot3statsfcserrors ;
+- u32 rx_dot3statsalignmenterrors ;
+- u32 rx_dot3statscarriersenseerrors ;
+- u32 rx_xonpauseframesreceived ; /* 10 */
+- u32 rx_xoffpauseframesreceived ;
+- u32 rx_maccontrolframesreceived ;
+- u32 rx_xoffstateentered ;
+- u32 rx_dot3statsframestoolong ;
+- u32 rx_etherstatsjabbers ;
+- u32 rx_etherstatsundersizepkts ;
+- u32 rx_etherstatspkts64octets ;
+- u32 rx_etherstatspkts65octetsto127octets ;
+- u32 rx_etherstatspkts128octetsto255octets ;
+- u32 rx_etherstatspkts256octetsto511octets ; /* 20 */
+- u32 rx_etherstatspkts512octetsto1023octets ;
+- u32 rx_etherstatspkts1024octetsto1522octets;
+- u32 rx_etherstatspktsover1522octets ;
+-
+- u32 rx_falsecarriererrors ;
+-
+- u32 tx_ifhcoutoctets ;
+- u32 tx_ifhcoutbadoctets ;
+- u32 tx_etherstatscollisions ;
+- u32 tx_outxonsent ;
+- u32 tx_outxoffsent ;
+- u32 tx_flowcontroldone ; /* 30 */
+- u32 tx_dot3statssinglecollisionframes ;
+- u32 tx_dot3statsmultiplecollisionframes ;
+- u32 tx_dot3statsdeferredtransmissions ;
+- u32 tx_dot3statsexcessivecollisions ;
+- u32 tx_dot3statslatecollisions ;
+- u32 tx_ifhcoutucastpkts ;
+- u32 tx_ifhcoutmulticastpkts ;
+- u32 tx_ifhcoutbroadcastpkts ;
+- u32 tx_etherstatspkts64octets ;
+- u32 tx_etherstatspkts65octetsto127octets ; /* 40 */
+- u32 tx_etherstatspkts128octetsto255octets ;
+- u32 tx_etherstatspkts256octetsto511octets ;
+- u32 tx_etherstatspkts512octetsto1023octets ;
+- u32 tx_etherstatspkts1024octetsto1522octet ;
+- u32 tx_etherstatspktsover1522octets ;
+- u32 tx_dot3statsinternalmactransmiterrors ; /* 46 */
++
++/* MC hsi */
++#define BCM_PAGE_SHIFT 12
++#define BCM_PAGE_SIZE (1 << BCM_PAGE_SHIFT)
++#define BCM_PAGE_MASK (~(BCM_PAGE_SIZE - 1))
++#define BCM_PAGE_ALIGN(addr) (((addr) + BCM_PAGE_SIZE - 1) & BCM_PAGE_MASK)
++
++#define PAGES_PER_SGE_SHIFT 0
++#define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT)
++#define SGE_PAGE_SIZE PAGE_SIZE
++#define SGE_PAGE_SHIFT PAGE_SHIFT
++#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN(addr)
++
++#define BCM_RX_ETH_PAYLOAD_ALIGN 64
++
++/* SGE ring related macros */
++#define NUM_RX_SGE_PAGES 2
++#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge))
++#define MAX_RX_SGE_CNT (RX_SGE_CNT - 2)
++/* RX_SGE_CNT is promised to be a power of 2 */
++#define RX_SGE_MASK (RX_SGE_CNT - 1)
++#define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES)
++#define MAX_RX_SGE (NUM_RX_SGE - 1)
++#define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \
++ (MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1)
++#define RX_SGE(x) ((x) & MAX_RX_SGE)
++
++/* SGE producer mask related macros */
++/* Number of bits in one sge_mask array element */
++#define RX_SGE_MASK_ELEM_SZ 64
++#define RX_SGE_MASK_ELEM_SHIFT 6
++#define RX_SGE_MASK_ELEM_MASK ((u64)RX_SGE_MASK_ELEM_SZ - 1)
++
++/* Creates a bitmask of all ones in less significant bits.
++ idx - index of the most significant bit in the created mask */
++#define RX_SGE_ONES_MASK(idx) \
++ (((u64)0x1 << (((idx) & RX_SGE_MASK_ELEM_MASK) + 1)) - 1)
++#define RX_SGE_MASK_ELEM_ONE_MASK ((u64)(~0))
++
++/* Number of u64 elements in SGE mask array */
++#define RX_SGE_MASK_LEN ((NUM_RX_SGE_PAGES * RX_SGE_CNT) / \
++ RX_SGE_MASK_ELEM_SZ)
++#define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1)
++#define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK)
++
++
++struct bnx2x_fastpath {
++
++ struct napi_struct napi;
++
++ struct host_status_block *status_blk;
++ dma_addr_t status_blk_mapping;
++
++ struct eth_tx_db_data *hw_tx_prods;
++ dma_addr_t tx_prods_mapping;
++
++ struct sw_tx_bd *tx_buf_ring;
++
++ struct eth_tx_bd *tx_desc_ring;
++ dma_addr_t tx_desc_mapping;
++
++ struct sw_rx_bd *rx_buf_ring; /* BDs mappings ring */
++ struct sw_rx_page *rx_page_ring; /* SGE pages mappings ring */
++
++ struct eth_rx_bd *rx_desc_ring;
++ dma_addr_t rx_desc_mapping;
++
++ union eth_rx_cqe *rx_comp_ring;
++ dma_addr_t rx_comp_mapping;
++
++ /* SGE ring */
++ struct eth_rx_sge *rx_sge_ring;
++ dma_addr_t rx_sge_mapping;
++
++ u64 sge_mask[RX_SGE_MASK_LEN];
++
++ int state;
++#define BNX2X_FP_STATE_CLOSED 0
++#define BNX2X_FP_STATE_IRQ 0x80000
++#define BNX2X_FP_STATE_OPENING 0x90000
++#define BNX2X_FP_STATE_OPEN 0xa0000
++#define BNX2X_FP_STATE_HALTING 0xb0000
++#define BNX2X_FP_STATE_HALTED 0xc0000
++
++ u8 index; /* number in fp array */
++ u8 cl_id; /* eth client id */
++ u8 sb_id; /* status block number in HW */
++#define FP_IDX(fp) (fp->index)
++#define FP_CL_ID(fp) (fp->cl_id)
++#define BP_CL_ID(bp) (bp->fp[0].cl_id)
++#define FP_SB_ID(fp) (fp->sb_id)
++#define CNIC_SB_ID 0
++
++ u16 tx_pkt_prod;
++ u16 tx_pkt_cons;
++ u16 tx_bd_prod;
++ u16 tx_bd_cons;
++ u16 *tx_cons_sb;
++
++ u16 fp_c_idx;
++ u16 fp_u_idx;
++
++ u16 rx_bd_prod;
++ u16 rx_bd_cons;
++ u16 rx_comp_prod;
++ u16 rx_comp_cons;
++ u16 rx_sge_prod;
++ /* The last maximal completed SGE */
++ u16 last_max_sge;
++ u16 *rx_cons_sb;
++ u16 *rx_bd_cons_sb;
++
++ unsigned long tx_pkt,
++ rx_pkt,
++ rx_calls;
++ /* TPA related */
++ struct sw_rx_bd tpa_pool[ETH_MAX_AGGREGATION_QUEUES_E1H];
++ u8 tpa_state[ETH_MAX_AGGREGATION_QUEUES_E1H];
++#define BNX2X_TPA_START 1
++#define BNX2X_TPA_STOP 2
++ u8 disable_tpa;
++#ifdef BNX2X_STOP_ON_ERROR
++ u64 tpa_queue_used;
++#endif
++
++ struct bnx2x *bp; /* parent */
+ };
+
+-union mac_stats {
+- struct emac_stats emac;
+- struct bmac_stats bmac;
++#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
++
++#define BNX2X_HAS_WORK(fp) (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))
++
++
++/* MC hsi */
++#define MAX_FETCH_BD 13 /* HW max BDs per packet */
++#define RX_COPY_THRESH 92
++
++#define NUM_TX_RINGS 16
++#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_tx_bd))
++#define MAX_TX_DESC_CNT (TX_DESC_CNT - 1)
++#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS)
++#define MAX_TX_BD (NUM_TX_BD - 1)
++#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)
++#define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \
++ (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
++#define TX_BD(x) ((x) & MAX_TX_BD)
++#define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT)
++
++/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */
++#define NUM_RX_RINGS 8
++#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
++#define MAX_RX_DESC_CNT (RX_DESC_CNT - 2)
++#define RX_DESC_MASK (RX_DESC_CNT - 1)
++#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS)
++#define MAX_RX_BD (NUM_RX_BD - 1)
++#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
++#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \
++ (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1)
++#define RX_BD(x) ((x) & MAX_RX_BD)
++
++/* As long as CQE is 4 times bigger than BD entry we have to allocate
++ 4 times more pages for CQ ring in order to keep it balanced with
++ BD ring */
++#define NUM_RCQ_RINGS (NUM_RX_RINGS * 4)
++#define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
++#define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - 1)
++#define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS)
++#define MAX_RCQ_BD (NUM_RCQ_BD - 1)
++#define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2)
++#define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \
++ (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
++#define RCQ_BD(x) ((x) & MAX_RCQ_BD)
++
++
++/* This is needed for determining of last_max */
++#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b))
++
++#define __SGE_MASK_SET_BIT(el, bit) \
++ do { \
++ el = ((el) | ((u64)0x1 << (bit))); \
++ } while (0)
++
++#define __SGE_MASK_CLEAR_BIT(el, bit) \
++ do { \
++ el = ((el) & (~((u64)0x1 << (bit)))); \
++ } while (0)
++
++#define SGE_MASK_SET_BIT(fp, idx) \
++ __SGE_MASK_SET_BIT(fp->sge_mask[(idx) >> RX_SGE_MASK_ELEM_SHIFT], \
++ ((idx) & RX_SGE_MASK_ELEM_MASK))
++
++#define SGE_MASK_CLEAR_BIT(fp, idx) \
++ __SGE_MASK_CLEAR_BIT(fp->sge_mask[(idx) >> RX_SGE_MASK_ELEM_SHIFT], \
++ ((idx) & RX_SGE_MASK_ELEM_MASK))
++
++
++/* used on a CID received from the HW */
++#define SW_CID(x) (le32_to_cpu(x) & \
++ (COMMON_RAMROD_ETH_RX_CQE_CID >> 7))
++#define CQE_CMD(x) (le32_to_cpu(x) >> \
++ COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT)
++
++#define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr_hi), \
++ le32_to_cpu((bd)->addr_lo))
++#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes))
++
++
++#define DPM_TRIGER_TYPE 0x40
++#define DOORBELL(bp, cid, val) \
++ do { \
++ writel((u32)val, (bp)->doorbells + (BCM_PAGE_SIZE * cid) + \
++ DPM_TRIGER_TYPE); \
++ } while (0)
++
++
++/* TX CSUM helpers */
++#define SKB_CS_OFF(skb) (offsetof(struct tcphdr, check) - \
++ skb->csum_offset)
++#define SKB_CS(skb) (*(u16 *)(skb_transport_header(skb) + \
++ skb->csum_offset))
++
++#define pbd_tcp_flags(skb) (ntohl(tcp_flag_word(tcp_hdr(skb)))>>16 & 0xff)
++
++#define XMIT_PLAIN 0
++#define XMIT_CSUM_V4 0x1
++#define XMIT_CSUM_V6 0x2
++#define XMIT_CSUM_TCP 0x4
++#define XMIT_GSO_V4 0x8
++#define XMIT_GSO_V6 0x10
++
++#define XMIT_CSUM (XMIT_CSUM_V4 | XMIT_CSUM_V6)
++#define XMIT_GSO (XMIT_GSO_V4 | XMIT_GSO_V6)
++
++
++/* stuff added to make the code fit 80Col */
++
++#define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE)
++
++#define TPA_TYPE_START ETH_FAST_PATH_RX_CQE_START_FLG
++#define TPA_TYPE_END ETH_FAST_PATH_RX_CQE_END_FLG
++#define TPA_TYPE(cqe_fp_flags) ((cqe_fp_flags) & \
++ (TPA_TYPE_START | TPA_TYPE_END))
++
++#define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
++
++#define BNX2X_IP_CSUM_ERR(cqe) \
++ (!((cqe)->fast_path_cqe.status_flags & \
++ ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \
++ ((cqe)->fast_path_cqe.type_error_flags & \
++ ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG))
++
++#define BNX2X_L4_CSUM_ERR(cqe) \
++ (!((cqe)->fast_path_cqe.status_flags & \
++ ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \
++ ((cqe)->fast_path_cqe.type_error_flags & \
++ ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
++
++#define BNX2X_RX_CSUM_OK(cqe) \
++ (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe)))
++
++#define BNX2X_RX_SUM_FIX(cqe) \
++ ((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & \
++ PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == \
++ (1 << PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT))
++
++
++#define FP_USB_FUNC_OFF (2 + 2*HC_USTORM_SB_NUM_INDICES)
++#define FP_CSB_FUNC_OFF (2 + 2*HC_CSTORM_SB_NUM_INDICES)
++
++#define U_SB_ETH_RX_CQ_INDEX HC_INDEX_U_ETH_RX_CQ_CONS
++#define U_SB_ETH_RX_BD_INDEX HC_INDEX_U_ETH_RX_BD_CONS
++#define C_SB_ETH_TX_CQ_INDEX HC_INDEX_C_ETH_TX_CQ_CONS
++
++#define BNX2X_RX_SB_INDEX \
++ (&fp->status_blk->u_status_block.index_values[U_SB_ETH_RX_CQ_INDEX])
++
++#define BNX2X_RX_SB_BD_INDEX \
++ (&fp->status_blk->u_status_block.index_values[U_SB_ETH_RX_BD_INDEX])
++
++#define BNX2X_RX_SB_INDEX_NUM \
++ (((U_SB_ETH_RX_CQ_INDEX << \
++ USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER_SHIFT) & \
++ USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER) | \
++ ((U_SB_ETH_RX_BD_INDEX << \
++ USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER_SHIFT) & \
++ USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER))
++
++#define BNX2X_TX_SB_INDEX \
++ (&fp->status_blk->c_status_block.index_values[C_SB_ETH_TX_CQ_INDEX])
++
++
++/* end of fast path */
++
++/* common */
++
++struct bnx2x_common {
++
++ u32 chip_id;
++/* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
++#define CHIP_ID(bp) (bp->common.chip_id & 0xfffffff0)
++
++#define CHIP_NUM(bp) (bp->common.chip_id >> 16)
++#define CHIP_NUM_57710 0x164e
++#define CHIP_NUM_57711 0x164f
++#define CHIP_NUM_57711E 0x1650
++#define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710)
++#define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711)
++#define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E)
++#define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \
++ CHIP_IS_57711E(bp))
++#define IS_E1H_OFFSET CHIP_IS_E1H(bp)
++
++#define CHIP_REV(bp) (bp->common.chip_id & 0x0000f000)
++#define CHIP_REV_Ax 0x00000000
++/* assume maximum 5 revisions */
++#define CHIP_REV_IS_SLOW(bp) (CHIP_REV(bp) > 0x00005000)
++/* Emul versions are A=>0xe, B=>0xc, C=>0xa, D=>8, E=>6 */
++#define CHIP_REV_IS_EMUL(bp) ((CHIP_REV_IS_SLOW(bp)) && \
++ !(CHIP_REV(bp) & 0x00001000))
++/* FPGA versions are A=>0xf, B=>0xd, C=>0xb, D=>9, E=>7 */
++#define CHIP_REV_IS_FPGA(bp) ((CHIP_REV_IS_SLOW(bp)) && \
++ (CHIP_REV(bp) & 0x00001000))
++
++#define CHIP_TIME(bp) ((CHIP_REV_IS_EMUL(bp)) ? 2000 : \
++ ((CHIP_REV_IS_FPGA(bp)) ? 200 : 1))
++
++#define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0)
++#define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f)
++
++ int flash_size;
++#define NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */
++#define NVRAM_TIMEOUT_COUNT 30000
++#define NVRAM_PAGE_SIZE 256
++
++ u32 shmem_base;
++
++ u32 hw_config;
++ u32 board;
++
++ u32 bc_ver;
++
++ char *name;
+ };
+
++
++/* end of common */
++
++/* port */
++
+ struct nig_stats {
+ u32 brb_discard;
+ u32 brb_packet;
+@@ -244,13 +505,53 @@ struct nig_stats {
+ u32 pbf_octets;
+ u32 pbf_packet;
+ u32 safc_inp;
+- u32 done;
+- u32 pad;
++ u32 egress_mac_pkt0_lo;
++ u32 egress_mac_pkt0_hi;
++ u32 egress_mac_pkt1_lo;
++ u32 egress_mac_pkt1_hi;
++};
++
++struct bnx2x_port {
++ u32 pmf;
++
++ u32 link_config;
++
++ u32 supported;
++/* link settings - missing defines */
++#define SUPPORTED_2500baseX_Full (1 << 15)
++
++ u32 advertising;
++/* link settings - missing defines */
++#define ADVERTISED_2500baseX_Full (1 << 15)
++
++ u32 phy_addr;
++
++ /* used to synchronize phy accesses */
++ struct mutex phy_mutex;
++
++ u32 port_stx;
++
++ struct nig_stats old_nig_stats;
++};
++
++/* end of port */
++
++
++enum bnx2x_stats_event {
++ STATS_EVENT_PMF = 0,
++ STATS_EVENT_LINK_UP,
++ STATS_EVENT_UPDATE,
++ STATS_EVENT_STOP,
++ STATS_EVENT_MAX
++};
++
++enum bnx2x_stats_state {
++ STATS_STATE_DISABLED = 0,
++ STATS_STATE_ENABLED,
++ STATS_STATE_MAX
+ };
+
+ struct bnx2x_eth_stats {
+- u32 pad; /* to make long counters u64 aligned */
+- u32 mac_stx_start;
+ u32 total_bytes_received_hi;
+ u32 total_bytes_received_lo;
+ u32 total_bytes_transmitted_hi;
+@@ -267,97 +568,122 @@ struct bnx2x_eth_stats {
+ u32 total_multicast_packets_transmitted_lo;
+ u32 total_broadcast_packets_transmitted_hi;
+ u32 total_broadcast_packets_transmitted_lo;
+- u32 crc_receive_errors;
+- u32 alignment_errors;
+- u32 false_carrier_detections;
+- u32 runt_packets_received;
+- u32 jabber_packets_received;
+- u32 pause_xon_frames_received;
+- u32 pause_xoff_frames_received;
+- u32 pause_xon_frames_transmitted;
+- u32 pause_xoff_frames_transmitted;
+- u32 single_collision_transmit_frames;
+- u32 multiple_collision_transmit_frames;
+- u32 late_collision_frames;
+- u32 excessive_collision_frames;
+- u32 control_frames_received;
+- u32 frames_received_64_bytes;
+- u32 frames_received_65_127_bytes;
+- u32 frames_received_128_255_bytes;
+- u32 frames_received_256_511_bytes;
+- u32 frames_received_512_1023_bytes;
+- u32 frames_received_1024_1522_bytes;
+- u32 frames_received_1523_9022_bytes;
+- u32 frames_transmitted_64_bytes;
+- u32 frames_transmitted_65_127_bytes;
+- u32 frames_transmitted_128_255_bytes;
+- u32 frames_transmitted_256_511_bytes;
+- u32 frames_transmitted_512_1023_bytes;
+- u32 frames_transmitted_1024_1522_bytes;
+- u32 frames_transmitted_1523_9022_bytes;
+ u32 valid_bytes_received_hi;
+ u32 valid_bytes_received_lo;
+- u32 error_runt_packets_received;
+- u32 error_jabber_packets_received;
+- u32 mac_stx_end;
+-
+- u32 pad2;
+- u32 stat_IfHCInBadOctets_hi;
+- u32 stat_IfHCInBadOctets_lo;
+- u32 stat_IfHCOutBadOctets_hi;
+- u32 stat_IfHCOutBadOctets_lo;
+- u32 stat_Dot3statsFramesTooLong;
+- u32 stat_Dot3statsInternalMacTransmitErrors;
+- u32 stat_Dot3StatsCarrierSenseErrors;
+- u32 stat_Dot3StatsDeferredTransmissions;
+- u32 stat_FlowControlDone;
+- u32 stat_XoffStateEntered;
+-
+- u32 x_total_sent_bytes_hi;
+- u32 x_total_sent_bytes_lo;
+- u32 x_total_sent_pkts;
+-
+- u32 t_rcv_unicast_bytes_hi;
+- u32 t_rcv_unicast_bytes_lo;
+- u32 t_rcv_broadcast_bytes_hi;
+- u32 t_rcv_broadcast_bytes_lo;
+- u32 t_rcv_multicast_bytes_hi;
+- u32 t_rcv_multicast_bytes_lo;
+- u32 t_total_rcv_pkt;
+-
+- u32 checksum_discard;
+- u32 packets_too_big_discard;
++
++ u32 error_bytes_received_hi;
++ u32 error_bytes_received_lo;
++
++ u32 rx_stat_ifhcinbadoctets_hi;
++ u32 rx_stat_ifhcinbadoctets_lo;
++ u32 tx_stat_ifhcoutbadoctets_hi;
++ u32 tx_stat_ifhcoutbadoctets_lo;
++ u32 rx_stat_dot3statsfcserrors_hi;
++ u32 rx_stat_dot3statsfcserrors_lo;
++ u32 rx_stat_dot3statsalignmenterrors_hi;
++ u32 rx_stat_dot3statsalignmenterrors_lo;
++ u32 rx_stat_dot3statscarriersenseerrors_hi;
++ u32 rx_stat_dot3statscarriersenseerrors_lo;
++ u32 rx_stat_falsecarriererrors_hi;
++ u32 rx_stat_falsecarriererrors_lo;
++ u32 rx_stat_etherstatsundersizepkts_hi;
++ u32 rx_stat_etherstatsundersizepkts_lo;
++ u32 rx_stat_dot3statsframestoolong_hi;
++ u32 rx_stat_dot3statsframestoolong_lo;
++ u32 rx_stat_etherstatsfragments_hi;
++ u32 rx_stat_etherstatsfragments_lo;
++ u32 rx_stat_etherstatsjabbers_hi;
++ u32 rx_stat_etherstatsjabbers_lo;
++ u32 rx_stat_maccontrolframesreceived_hi;
++ u32 rx_stat_maccontrolframesreceived_lo;
++ u32 rx_stat_bmac_xpf_hi;
++ u32 rx_stat_bmac_xpf_lo;
++ u32 rx_stat_bmac_xcf_hi;
++ u32 rx_stat_bmac_xcf_lo;
++ u32 rx_stat_xoffstateentered_hi;
++ u32 rx_stat_xoffstateentered_lo;
++ u32 rx_stat_xonpauseframesreceived_hi;
++ u32 rx_stat_xonpauseframesreceived_lo;
++ u32 rx_stat_xoffpauseframesreceived_hi;
++ u32 rx_stat_xoffpauseframesreceived_lo;
++ u32 tx_stat_outxonsent_hi;
++ u32 tx_stat_outxonsent_lo;
++ u32 tx_stat_outxoffsent_hi;
++ u32 tx_stat_outxoffsent_lo;
++ u32 tx_stat_flowcontroldone_hi;
++ u32 tx_stat_flowcontroldone_lo;
++ u32 tx_stat_etherstatscollisions_hi;
++ u32 tx_stat_etherstatscollisions_lo;
++ u32 tx_stat_dot3statssinglecollisionframes_hi;
++ u32 tx_stat_dot3statssinglecollisionframes_lo;
++ u32 tx_stat_dot3statsmultiplecollisionframes_hi;
++ u32 tx_stat_dot3statsmultiplecollisionframes_lo;
++ u32 tx_stat_dot3statsdeferredtransmissions_hi;
++ u32 tx_stat_dot3statsdeferredtransmissions_lo;
++ u32 tx_stat_dot3statsexcessivecollisions_hi;
++ u32 tx_stat_dot3statsexcessivecollisions_lo;
++ u32 tx_stat_dot3statslatecollisions_hi;
++ u32 tx_stat_dot3statslatecollisions_lo;
++ u32 tx_stat_etherstatspkts64octets_hi;
++ u32 tx_stat_etherstatspkts64octets_lo;
++ u32 tx_stat_etherstatspkts65octetsto127octets_hi;
++ u32 tx_stat_etherstatspkts65octetsto127octets_lo;
++ u32 tx_stat_etherstatspkts128octetsto255octets_hi;
++ u32 tx_stat_etherstatspkts128octetsto255octets_lo;
++ u32 tx_stat_etherstatspkts256octetsto511octets_hi;
++ u32 tx_stat_etherstatspkts256octetsto511octets_lo;
++ u32 tx_stat_etherstatspkts512octetsto1023octets_hi;
++ u32 tx_stat_etherstatspkts512octetsto1023octets_lo;
++ u32 tx_stat_etherstatspkts1024octetsto1522octets_hi;
++ u32 tx_stat_etherstatspkts1024octetsto1522octets_lo;
++ u32 tx_stat_etherstatspktsover1522octets_hi;
++ u32 tx_stat_etherstatspktsover1522octets_lo;
++ u32 tx_stat_bmac_2047_hi;
++ u32 tx_stat_bmac_2047_lo;
++ u32 tx_stat_bmac_4095_hi;
++ u32 tx_stat_bmac_4095_lo;
++ u32 tx_stat_bmac_9216_hi;
++ u32 tx_stat_bmac_9216_lo;
++ u32 tx_stat_bmac_16383_hi;
++ u32 tx_stat_bmac_16383_lo;
++ u32 tx_stat_dot3statsinternalmactransmiterrors_hi;
++ u32 tx_stat_dot3statsinternalmactransmiterrors_lo;
++ u32 tx_stat_bmac_ufl_hi;
++ u32 tx_stat_bmac_ufl_lo;
++
++ u32 brb_drop_hi;
++ u32 brb_drop_lo;
++ u32 brb_truncate_hi;
++ u32 brb_truncate_lo;
++
++ u32 jabber_packets_received;
++
++ u32 etherstatspkts1024octetsto1522octets_hi;
++ u32 etherstatspkts1024octetsto1522octets_lo;
++ u32 etherstatspktsover1522octets_hi;
++ u32 etherstatspktsover1522octets_lo;
++
+ u32 no_buff_discard;
+- u32 ttl0_discard;
+- u32 mac_discard;
++
+ u32 mac_filter_discard;
+ u32 xxoverflow_discard;
+ u32 brb_truncate_discard;
++ u32 mac_discard;
+
+- u32 brb_discard;
+- u32 brb_packet;
+- u32 brb_truncate;
+- u32 flow_ctrl_discard;
+- u32 flow_ctrl_octets;
+- u32 flow_ctrl_packet;
+- u32 mng_discard;
+- u32 mng_octet_inp;
+- u32 mng_octet_out;
+- u32 mng_packet_inp;
+- u32 mng_packet_out;
+- u32 pbf_octets;
+- u32 pbf_packet;
+- u32 safc_inp;
+ u32 driver_xoff;
+- u32 number_of_bugs_found_in_stats_spec; /* just kidding */
++ u32 rx_err_discard_pkt;
++ u32 rx_skb_alloc_failed;
++ u32 hw_csum_err;
+ };
+
+-#define MAC_STX_NA 0xffffffff
++#define STATS_OFFSET32(stat_name) \
++ (offsetof(struct bnx2x_eth_stats, stat_name) / 4)
++
+
+ #ifdef BNX2X_MULTI
+-#define MAX_CONTEXT 16
++#define MAX_CONTEXT 16
+ #else
+-#define MAX_CONTEXT 1
++#define MAX_CONTEXT 1
+ #endif
+
+ union cdu_context {
+@@ -365,345 +691,190 @@ union cdu_context {
+ char pad[1024];
+ };
+
+-#define MAX_DMAE_C 5
++#define MAX_DMAE_C 8
+
+ /* DMA memory not used in fastpath */
+ struct bnx2x_slowpath {
+- union cdu_context context[MAX_CONTEXT];
+- struct eth_stats_query fw_stats;
+- struct mac_configuration_cmd mac_config;
+- struct mac_configuration_cmd mcast_config;
++ union cdu_context context[MAX_CONTEXT];
++ struct eth_stats_query fw_stats;
++ struct mac_configuration_cmd mac_config;
++ struct mac_configuration_cmd mcast_config;
+
+ /* used by dmae command executer */
+- struct dmae_command dmae[MAX_DMAE_C];
++ struct dmae_command dmae[MAX_DMAE_C];
+
+- union mac_stats mac_stats;
+- struct nig_stats nig;
+- struct bnx2x_eth_stats eth_stats;
++ u32 stats_comp;
++ union mac_stats mac_stats;
++ struct nig_stats nig_stats;
++ struct host_port_stats port_stats;
++ struct host_func_stats func_stats;
+
+- u32 wb_comp;
+-#define BNX2X_WB_COMP_VAL 0xe0d0d0ae
+- u32 wb_data[4];
++ u32 wb_comp;
++ u32 wb_data[4];
+ };
+
+-#define bnx2x_sp(bp, var) (&bp->slowpath->var)
+-#define bnx2x_sp_check(bp, var) ((bp->slowpath) ? (&bp->slowpath->var) : NULL)
++#define bnx2x_sp(bp, var) (&bp->slowpath->var)
+ #define bnx2x_sp_mapping(bp, var) \
+ (bp->slowpath_mapping + offsetof(struct bnx2x_slowpath, var))
+
+
+-struct sw_rx_bd {
+- struct sk_buff *skb;
+- DECLARE_PCI_UNMAP_ADDR(mapping)
+-};
+-
+-struct sw_tx_bd {
+- struct sk_buff *skb;
+- u16 first_bd;
+-};
+-
+-struct bnx2x_fastpath {
+-
+- struct napi_struct napi;
+-
+- struct host_status_block *status_blk;
+- dma_addr_t status_blk_mapping;
+-
+- struct eth_tx_db_data *hw_tx_prods;
+- dma_addr_t tx_prods_mapping;
+-
+- struct sw_tx_bd *tx_buf_ring;
+-
+- struct eth_tx_bd *tx_desc_ring;
+- dma_addr_t tx_desc_mapping;
+-
+- struct sw_rx_bd *rx_buf_ring;
+-
+- struct eth_rx_bd *rx_desc_ring;
+- dma_addr_t rx_desc_mapping;
+-
+- union eth_rx_cqe *rx_comp_ring;
+- dma_addr_t rx_comp_mapping;
+-
+- int state;
+-#define BNX2X_FP_STATE_CLOSED 0
+-#define BNX2X_FP_STATE_IRQ 0x80000
+-#define BNX2X_FP_STATE_OPENING 0x90000
+-#define BNX2X_FP_STATE_OPEN 0xa0000
+-#define BNX2X_FP_STATE_HALTING 0xb0000
+-#define BNX2X_FP_STATE_HALTED 0xc0000
+-
+- int index;
+-
+- u16 tx_pkt_prod;
+- u16 tx_pkt_cons;
+- u16 tx_bd_prod;
+- u16 tx_bd_cons;
+- u16 *tx_cons_sb;
+-
+- u16 fp_c_idx;
+- u16 fp_u_idx;
+-
+- u16 rx_bd_prod;
+- u16 rx_bd_cons;
+- u16 rx_comp_prod;
+- u16 rx_comp_cons;
+- u16 *rx_cons_sb;
+-
+- unsigned long tx_pkt,
+- rx_pkt,
+- rx_calls;
+-
+- struct bnx2x *bp; /* parent */
+-};
+-
+-#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
+-
+-
+ /* attn group wiring */
+-#define MAX_DYNAMIC_ATTN_GRPS 8
++#define MAX_DYNAMIC_ATTN_GRPS 8
+
+ struct attn_route {
+- u32 sig[4];
++ u32 sig[4];
+ };
+
+ struct bnx2x {
+ /* Fields used in the tx and intr/napi performance paths
+ * are grouped together in the beginning of the structure
+ */
+- struct bnx2x_fastpath *fp;
+- void __iomem *regview;
+- void __iomem *doorbells;
++ struct bnx2x_fastpath fp[MAX_CONTEXT];
++ void __iomem *regview;
++ void __iomem *doorbells;
++#define BNX2X_DB_SIZE (16*BCM_PAGE_SIZE)
+
+- struct net_device *dev;
+- struct pci_dev *pdev;
++ struct net_device *dev;
++ struct pci_dev *pdev;
+
+ atomic_t intr_sem;
+- struct msix_entry msix_table[MAX_CONTEXT+1];
++ struct msix_entry msix_table[MAX_CONTEXT+1];
+
+- int tx_ring_size;
++ int tx_ring_size;
+
+ #ifdef BCM_VLAN
+- struct vlan_group *vlgrp;
++ struct vlan_group *vlgrp;
+ #endif
+
+- u32 rx_csum;
+- u32 rx_offset;
+- u32 rx_buf_use_size; /* useable size */
+- u32 rx_buf_size; /* with alignment */
+-#define ETH_OVREHEAD (ETH_HLEN + 8) /* 8 for CRC + VLAN */
+-#define ETH_MIN_PACKET_SIZE 60
+-#define ETH_MAX_PACKET_SIZE 1500
+-#define ETH_MAX_JUMBO_PACKET_SIZE 9600
++ u32 rx_csum;
++ u32 rx_offset;
++ u32 rx_buf_size;
++#define ETH_OVREHEAD (ETH_HLEN + 8) /* 8 for CRC + VLAN */
++#define ETH_MIN_PACKET_SIZE 60
++#define ETH_MAX_PACKET_SIZE 1500
++#define ETH_MAX_JUMBO_PACKET_SIZE 9600
+
+ struct host_def_status_block *def_status_blk;
+-#define DEF_SB_ID 16
+- u16 def_c_idx;
+- u16 def_u_idx;
+- u16 def_t_idx;
+- u16 def_x_idx;
+- u16 def_att_idx;
+- u32 attn_state;
+- struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS];
+- u32 aeu_mask;
+- u32 nig_mask;
++#define DEF_SB_ID 16
++ u16 def_c_idx;
++ u16 def_u_idx;
++ u16 def_x_idx;
++ u16 def_t_idx;
++ u16 def_att_idx;
++ u32 attn_state;
++ struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS];
++ u32 nig_mask;
+
+ /* slow path ring */
+- struct eth_spe *spq;
+- dma_addr_t spq_mapping;
+- u16 spq_prod_idx;
+- struct eth_spe *spq_prod_bd;
+- struct eth_spe *spq_last_bd;
+- u16 *dsb_sp_prod;
+- u16 spq_left; /* serialize spq */
+- spinlock_t spq_lock;
+-
+- /* Flag for marking that there is either
+- * STAT_QUERY or CFC DELETE ramrod pending
+- */
+- u8 stat_pending;
++ struct eth_spe *spq;
++ dma_addr_t spq_mapping;
++ u16 spq_prod_idx;
++ struct eth_spe *spq_prod_bd;
++ struct eth_spe *spq_last_bd;
++ u16 *dsb_sp_prod;
++ u16 spq_left; /* serialize spq */
++ /* used to synchronize spq accesses */
++ spinlock_t spq_lock;
++
++ /* Flags for marking that there is a STAT_QUERY or
++ SET_MAC ramrod pending */
++ u8 stats_pending;
++ u8 set_mac_pending;
+
+ /* End of fields used in the performance code paths */
+
+- int panic;
+- int msglevel;
+-
+- u32 flags;
+-#define PCIX_FLAG 1
+-#define PCI_32BIT_FLAG 2
+-#define ONE_TDMA_FLAG 4 /* no longer used */
+-#define NO_WOL_FLAG 8
+-#define USING_DAC_FLAG 0x10
+-#define USING_MSIX_FLAG 0x20
+-#define ASF_ENABLE_FLAG 0x40
+-
+- int port;
+-
+- int pm_cap;
+- int pcie_cap;
+-
+- /* Used to synchronize phy accesses */
+- spinlock_t phy_lock;
+-
+- struct work_struct reset_task;
+- struct work_struct sp_task;
+-
+- struct timer_list timer;
+- int timer_interval;
+- int current_interval;
+-
+- u32 shmem_base;
+-
+- u32 chip_id;
+-/* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
+-#define CHIP_ID(bp) (((bp)->chip_id) & 0xfffffff0)
+-
+-#define CHIP_NUM(bp) (((bp)->chip_id) & 0xffff0000)
+-
+-#define CHIP_REV(bp) (((bp)->chip_id) & 0x0000f000)
+-#define CHIP_REV_Ax 0x00000000
+-#define CHIP_REV_Bx 0x00001000
+-#define CHIP_REV_Cx 0x00002000
+-#define CHIP_REV_EMUL 0x0000e000
+-#define CHIP_REV_FPGA 0x0000f000
+-#define CHIP_REV_IS_SLOW(bp) ((CHIP_REV(bp) == CHIP_REV_EMUL) || \
+- (CHIP_REV(bp) == CHIP_REV_FPGA))
+-
+-#define CHIP_METAL(bp) (((bp)->chip_id) & 0x00000ff0)
+-#define CHIP_BOND_ID(bp) (((bp)->chip_id) & 0x0000000f)
+-
+- u16 fw_seq;
+- u16 fw_drv_pulse_wr_seq;
+- u32 fw_mb;
+-
+- u32 hw_config;
+- u32 board;
+- u32 serdes_config;
+- u32 lane_config;
+- u32 ext_phy_config;
+-#define XGXS_EXT_PHY_TYPE(bp) (bp->ext_phy_config & \
+- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK)
+-#define SERDES_EXT_PHY_TYPE(bp) (bp->ext_phy_config & \
+- PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK)
+-
+- u32 speed_cap_mask;
+- u32 link_config;
+-#define SWITCH_CFG_1G PORT_FEATURE_CON_SWITCH_1G_SWITCH
+-#define SWITCH_CFG_10G PORT_FEATURE_CON_SWITCH_10G_SWITCH
+-#define SWITCH_CFG_AUTO_DETECT PORT_FEATURE_CON_SWITCH_AUTO_DETECT
+-#define SWITCH_CFG_ONE_TIME_DETECT \
+- PORT_FEATURE_CON_SWITCH_ONE_TIME_DETECT
+-
+- u8 ser_lane;
+- u8 rx_lane_swap;
+- u8 tx_lane_swap;
+-
+- u8 link_up;
+- u8 phy_link_up;
+-
+- u32 supported;
+-/* link settings - missing defines */
+-#define SUPPORTED_2500baseT_Full (1 << 15)
+-
+- u32 phy_flags;
+-/*#define PHY_SERDES_FLAG 0x1*/
+-#define PHY_BMAC_FLAG 0x2
+-#define PHY_EMAC_FLAG 0x4
+-#define PHY_XGXS_FLAG 0x8
+-#define PHY_SGMII_FLAG 0x10
+-#define PHY_INT_MODE_MASK_FLAG 0x300
+-#define PHY_INT_MODE_AUTO_POLLING_FLAG 0x100
+-#define PHY_INT_MODE_LINK_READY_FLAG 0x200
+-
+- u32 phy_addr;
+- u32 phy_id;
+-
+- u32 autoneg;
+-#define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37
+-#define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73
+-#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM
+-#define AUTONEG_PARALLEL \
+- SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION
+-#define AUTONEG_SGMII_FIBER_AUTODET \
+- SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT
+-#define AUTONEG_REMOTE_PHY SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY
+-
+- u32 req_autoneg;
+-#define AUTONEG_SPEED 0x1
+-#define AUTONEG_FLOW_CTRL 0x2
+-
+- u32 req_line_speed;
+-/* link settings - missing defines */
+-#define SPEED_12000 12000
+-#define SPEED_12500 12500
+-#define SPEED_13000 13000
+-#define SPEED_15000 15000
+-#define SPEED_16000 16000
+-
+- u32 req_duplex;
+- u32 req_flow_ctrl;
+-#define FLOW_CTRL_AUTO PORT_FEATURE_FLOW_CONTROL_AUTO
+-#define FLOW_CTRL_TX PORT_FEATURE_FLOW_CONTROL_TX
+-#define FLOW_CTRL_RX PORT_FEATURE_FLOW_CONTROL_RX
+-#define FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH
+-#define FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE
+-
+- u32 advertising;
+-/* link settings - missing defines */
+-#define ADVERTISED_2500baseT_Full (1 << 15)
+-
+- u32 link_status;
+- u32 line_speed;
+- u32 duplex;
+- u32 flow_ctrl;
+-
+- u32 bc_ver;
+-
+- int flash_size;
+-#define NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */
+-#define NVRAM_TIMEOUT_COUNT 30000
+-#define NVRAM_PAGE_SIZE 256
++ int panic;
++ int msglevel;
++
++ u32 flags;
++#define PCIX_FLAG 1
++#define PCI_32BIT_FLAG 2
++#define ONE_TDMA_FLAG 4 /* no longer used */
++#define NO_WOL_FLAG 8
++#define USING_DAC_FLAG 0x10
++#define USING_MSIX_FLAG 0x20
++#define ASF_ENABLE_FLAG 0x40
++#define TPA_ENABLE_FLAG 0x80
++#define NO_MCP_FLAG 0x100
++#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG)
++#define HW_VLAN_TX_FLAG 0x400
++#define HW_VLAN_RX_FLAG 0x800
++
++ int func;
++#define BP_PORT(bp) (bp->func % PORT_MAX)
++#define BP_FUNC(bp) (bp->func)
++#define BP_E1HVN(bp) (bp->func >> 1)
++#define BP_L_ID(bp) (BP_E1HVN(bp) << 2)
++
++ int pm_cap;
++ int pcie_cap;
++ int mrrs;
++
++ struct delayed_work sp_task;
++ struct work_struct reset_task;
++
++ struct timer_list timer;
++ int timer_interval;
++ int current_interval;
++
++ u16 fw_seq;
++ u16 fw_drv_pulse_wr_seq;
++ u32 func_stx;
++
++ struct link_params link_params;
++ struct link_vars link_vars;
++
++ struct bnx2x_common common;
++ struct bnx2x_port port;
++
++ u32 mf_config;
++ u16 e1hov;
++ u8 e1hmf;
++#define IS_E1HMF(bp) (bp->e1hmf != 0)
+
+ u8 wol;
+
+- int rx_ring_size;
++ int rx_ring_size;
+
+- u16 tx_quick_cons_trip_int;
+- u16 tx_quick_cons_trip;
+- u16 tx_ticks_int;
+- u16 tx_ticks;
++ u16 tx_quick_cons_trip_int;
++ u16 tx_quick_cons_trip;
++ u16 tx_ticks_int;
++ u16 tx_ticks;
+
+- u16 rx_quick_cons_trip_int;
+- u16 rx_quick_cons_trip;
+- u16 rx_ticks_int;
+- u16 rx_ticks;
++ u16 rx_quick_cons_trip_int;
++ u16 rx_quick_cons_trip;
++ u16 rx_ticks_int;
++ u16 rx_ticks;
+
+- u32 stats_ticks;
++ u32 lin_cnt;
+
+- int state;
+-#define BNX2X_STATE_CLOSED 0x0
+-#define BNX2X_STATE_OPENING_WAIT4_LOAD 0x1000
+-#define BNX2X_STATE_OPENING_WAIT4_PORT 0x2000
++ int state;
++#define BNX2X_STATE_CLOSED 0x0
++#define BNX2X_STATE_OPENING_WAIT4_LOAD 0x1000
++#define BNX2X_STATE_OPENING_WAIT4_PORT 0x2000
+ #define BNX2X_STATE_OPEN 0x3000
+-#define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000
++#define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000
+ #define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000
+ #define BNX2X_STATE_CLOSING_WAIT4_UNLOAD 0x6000
+-#define BNX2X_STATE_ERROR 0xF000
++#define BNX2X_STATE_DISABLED 0xd000
++#define BNX2X_STATE_DIAG 0xe000
++#define BNX2X_STATE_ERROR 0xf000
+
+- int num_queues;
++ int num_queues;
++#define BP_MAX_QUEUES(bp) (IS_E1HMF(bp) ? 4 : 16)
+
+- u32 rx_mode;
+-#define BNX2X_RX_MODE_NONE 0
+-#define BNX2X_RX_MODE_NORMAL 1
+-#define BNX2X_RX_MODE_ALLMULTI 2
+-#define BNX2X_RX_MODE_PROMISC 3
+-#define BNX2X_MAX_MULTICAST 64
+-#define BNX2X_MAX_EMUL_MULTI 16
++ u32 rx_mode;
++#define BNX2X_RX_MODE_NONE 0
++#define BNX2X_RX_MODE_NORMAL 1
++#define BNX2X_RX_MODE_ALLMULTI 2
++#define BNX2X_RX_MODE_PROMISC 3
++#define BNX2X_MAX_MULTICAST 64
++#define BNX2X_MAX_EMUL_MULTI 16
+
+- dma_addr_t def_status_blk_mapping;
++ dma_addr_t def_status_blk_mapping;
+
+- struct bnx2x_slowpath *slowpath;
+- dma_addr_t slowpath_mapping;
++ struct bnx2x_slowpath *slowpath;
++ dma_addr_t slowpath_mapping;
+
+ #ifdef BCM_ISCSI
+ void *t1;
+@@ -716,264 +887,171 @@ struct bnx2x {
+ dma_addr_t qm_mapping;
+ #endif
+
+- char *name;
++ int dmae_ready;
++ /* used to synchronize dmae accesses */
++ struct mutex dmae_mutex;
++ struct dmae_command init_dmae;
+
+ /* used to synchronize stats collecting */
+- int stats_state;
+-#define STATS_STATE_DISABLE 0
+-#define STATS_STATE_ENABLE 1
+-#define STATS_STATE_STOP 2 /* stop stats on next iteration */
+-
++ int stats_state;
+ /* used by dmae command loader */
+- struct dmae_command dmae;
+- int executer_idx;
++ struct dmae_command stats_dmae;
++ int executer_idx;
+
+- u32 old_brb_discard;
+- struct bmac_stats old_bmac;
++ u16 stats_counter;
+ struct tstorm_per_client_stats old_tclient;
+- struct z_stream_s *strm;
+- void *gunzip_buf;
+- dma_addr_t gunzip_mapping;
+- int gunzip_outlen;
+-#define FW_BUF_SIZE 0x8000
++ struct xstorm_per_client_stats old_xclient;
++ struct bnx2x_eth_stats eth_stats;
++
++ struct z_stream_s *strm;
++ void *gunzip_buf;
++ dma_addr_t gunzip_mapping;
++ int gunzip_outlen;
++#define FW_BUF_SIZE 0x8000
+
+ };
+
+
+-/* DMAE command defines */
+-#define DMAE_CMD_SRC_PCI 0
+-#define DMAE_CMD_SRC_GRC DMAE_COMMAND_SRC
++#define for_each_queue(bp, var) for (var = 0; var < bp->num_queues; var++)
+
+-#define DMAE_CMD_DST_PCI (1 << DMAE_COMMAND_DST_SHIFT)
+-#define DMAE_CMD_DST_GRC (2 << DMAE_COMMAND_DST_SHIFT)
++#define for_each_nondefault_queue(bp, var) \
++ for (var = 1; var < bp->num_queues; var++)
++#define is_multi(bp) (bp->num_queues > 1)
+
+-#define DMAE_CMD_C_DST_PCI 0
+-#define DMAE_CMD_C_DST_GRC (1 << DMAE_COMMAND_C_DST_SHIFT)
+
+-#define DMAE_CMD_C_ENABLE DMAE_COMMAND_C_TYPE_ENABLE
++void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
++void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
++ u32 len32);
++int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
+
+-#define DMAE_CMD_ENDIANITY_NO_SWAP (0 << DMAE_COMMAND_ENDIANITY_SHIFT)
+-#define DMAE_CMD_ENDIANITY_B_SWAP (1 << DMAE_COMMAND_ENDIANITY_SHIFT)
+-#define DMAE_CMD_ENDIANITY_DW_SWAP (2 << DMAE_COMMAND_ENDIANITY_SHIFT)
+-#define DMAE_CMD_ENDIANITY_B_DW_SWAP (3 << DMAE_COMMAND_ENDIANITY_SHIFT)
++static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
++ int wait)
++{
++ u32 val;
+
+-#define DMAE_CMD_PORT_0 0
+-#define DMAE_CMD_PORT_1 DMAE_COMMAND_PORT
++ do {
++ val = REG_RD(bp, reg);
++ if (val == expected)
++ break;
++ ms -= wait;
++ msleep(wait);
+
+-#define DMAE_CMD_SRC_RESET DMAE_COMMAND_SRC_RESET
+-#define DMAE_CMD_DST_RESET DMAE_COMMAND_DST_RESET
++ } while (ms > 0);
+
+-#define DMAE_LEN32_MAX 0x400
++ return val;
++}
+
+
+-/* MC hsi */
+-#define RX_COPY_THRESH 92
+-#define BCM_PAGE_BITS 12
+-#define BCM_PAGE_SIZE (1 << BCM_PAGE_BITS)
+-
+-#define NUM_TX_RINGS 16
+-#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_tx_bd))
+-#define MAX_TX_DESC_CNT (TX_DESC_CNT - 1)
+-#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS)
+-#define MAX_TX_BD (NUM_TX_BD - 1)
+-#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)
+-#define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \
+- (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
+-#define TX_BD(x) ((x) & MAX_TX_BD)
+-#define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT)
++/* load/unload mode */
++#define LOAD_NORMAL 0
++#define LOAD_OPEN 1
++#define LOAD_DIAG 2
++#define UNLOAD_NORMAL 0
++#define UNLOAD_CLOSE 1
+
+-/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */
+-#define NUM_RX_RINGS 8
+-#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
+-#define MAX_RX_DESC_CNT (RX_DESC_CNT - 2)
+-#define RX_DESC_MASK (RX_DESC_CNT - 1)
+-#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS)
+-#define MAX_RX_BD (NUM_RX_BD - 1)
+-#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
+-#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \
+- (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1)
+-#define RX_BD(x) ((x) & MAX_RX_BD)
+
+-#define NUM_RCQ_RINGS (NUM_RX_RINGS * 2)
+-#define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
+-#define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - 1)
+-#define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS)
+-#define MAX_RCQ_BD (NUM_RCQ_BD - 1)
+-#define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2)
+-#define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \
+- (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
+-#define RCQ_BD(x) ((x) & MAX_RCQ_BD)
++/* DMAE command defines */
++#define DMAE_CMD_SRC_PCI 0
++#define DMAE_CMD_SRC_GRC DMAE_COMMAND_SRC
+
++#define DMAE_CMD_DST_PCI (1 << DMAE_COMMAND_DST_SHIFT)
++#define DMAE_CMD_DST_GRC (2 << DMAE_COMMAND_DST_SHIFT)
+
+-/* used on a CID received from the HW */
+-#define SW_CID(x) (le32_to_cpu(x) & \
+- (COMMON_RAMROD_ETH_RX_CQE_CID >> 1))
+-#define CQE_CMD(x) (le32_to_cpu(x) >> \
+- COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT)
++#define DMAE_CMD_C_DST_PCI 0
++#define DMAE_CMD_C_DST_GRC (1 << DMAE_COMMAND_C_DST_SHIFT)
+
+-#define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr_hi), \
+- le32_to_cpu((bd)->addr_lo))
+-#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes))
++#define DMAE_CMD_C_ENABLE DMAE_COMMAND_C_TYPE_ENABLE
+
++#define DMAE_CMD_ENDIANITY_NO_SWAP (0 << DMAE_COMMAND_ENDIANITY_SHIFT)
++#define DMAE_CMD_ENDIANITY_B_SWAP (1 << DMAE_COMMAND_ENDIANITY_SHIFT)
++#define DMAE_CMD_ENDIANITY_DW_SWAP (2 << DMAE_COMMAND_ENDIANITY_SHIFT)
++#define DMAE_CMD_ENDIANITY_B_DW_SWAP (3 << DMAE_COMMAND_ENDIANITY_SHIFT)
+
+-#define STROM_ASSERT_ARRAY_SIZE 50
++#define DMAE_CMD_PORT_0 0
++#define DMAE_CMD_PORT_1 DMAE_COMMAND_PORT
+
++#define DMAE_CMD_SRC_RESET DMAE_COMMAND_SRC_RESET
++#define DMAE_CMD_DST_RESET DMAE_COMMAND_DST_RESET
++#define DMAE_CMD_E1HVN_SHIFT DMAE_COMMAND_E1HVN_SHIFT
+
+-#define MDIO_INDIRECT_REG_ADDR 0x1f
+-#define MDIO_SET_REG_BANK(bp, reg_bank) \
+- bnx2x_mdio22_write(bp, MDIO_INDIRECT_REG_ADDR, reg_bank)
++#define DMAE_LEN32_RD_MAX 0x80
++#define DMAE_LEN32_WR_MAX 0x400
+
+-#define MDIO_ACCESS_TIMEOUT 1000
++#define DMAE_COMP_VAL 0xe0d0d0ae
+
++#define MAX_DMAE_C_PER_PORT 8
++#define INIT_DMAE_C(bp) (BP_PORT(bp)*MAX_DMAE_C_PER_PORT + \
++ BP_E1HVN(bp))
++#define PMF_DMAE_C(bp) (BP_PORT(bp)*MAX_DMAE_C_PER_PORT + \
++ E1HVN_MAX)
+
+-/* must be used on a CID before placing it on a HW ring */
+-#define HW_CID(bp, x) (x | (bp->port << 23))
+
+-#define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe))
+-#define MAX_SP_DESC_CNT (SP_DESC_CNT - 1)
++/* PCIE link and speed */
++#define PCICFG_LINK_WIDTH 0x1f00000
++#define PCICFG_LINK_WIDTH_SHIFT 20
++#define PCICFG_LINK_SPEED 0xf0000
++#define PCICFG_LINK_SPEED_SHIFT 16
+
+-#define ATTN_NIG_FOR_FUNC (1L << 8)
+-#define ATTN_SW_TIMER_4_FUNC (1L << 9)
+-#define GPIO_2_FUNC (1L << 10)
+-#define GPIO_3_FUNC (1L << 11)
+-#define GPIO_4_FUNC (1L << 12)
+-#define ATTN_GENERAL_ATTN_1 (1L << 13)
+-#define ATTN_GENERAL_ATTN_2 (1L << 14)
+-#define ATTN_GENERAL_ATTN_3 (1L << 15)
+-#define ATTN_GENERAL_ATTN_4 (1L << 13)
+-#define ATTN_GENERAL_ATTN_5 (1L << 14)
+-#define ATTN_GENERAL_ATTN_6 (1L << 15)
+
+-#define ATTN_HARD_WIRED_MASK 0xff00
+-#define ATTENTION_ID 4
++#define BNX2X_NUM_STATS 42
++#define BNX2X_NUM_TESTS 8
+
++#define BNX2X_MAC_LOOPBACK 0
++#define BNX2X_PHY_LOOPBACK 1
++#define BNX2X_MAC_LOOPBACK_FAILED 1
++#define BNX2X_PHY_LOOPBACK_FAILED 2
++#define BNX2X_LOOPBACK_FAILED (BNX2X_MAC_LOOPBACK_FAILED | \
++ BNX2X_PHY_LOOPBACK_FAILED)
+
+-#define BNX2X_BTR 3
+-#define MAX_SPQ_PENDING 8
+
++#define STROM_ASSERT_ARRAY_SIZE 50
+
+-#define BNX2X_NUM_STATS 34
+-#define BNX2X_NUM_TESTS 1
+
++/* must be used on a CID before placing it on a HW ring */
++#define HW_CID(bp, x) ((BP_PORT(bp) << 23) | (BP_E1HVN(bp) << 17) | x)
+
+-#define DPM_TRIGER_TYPE 0x40
+-#define DOORBELL(bp, cid, val) \
+- do { \
+- writel((u32)val, (bp)->doorbells + (BCM_PAGE_SIZE * cid) + \
+- DPM_TRIGER_TYPE); \
+- } while (0)
++#define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe))
++#define MAX_SP_DESC_CNT (SP_DESC_CNT - 1)
+
+-/* PCIE link and speed */
+-#define PCICFG_LINK_WIDTH 0x1f00000
+-#define PCICFG_LINK_WIDTH_SHIFT 20
+-#define PCICFG_LINK_SPEED 0xf0000
+-#define PCICFG_LINK_SPEED_SHIFT 16
+
+-#define BMAC_CONTROL_RX_ENABLE 2
++#define BNX2X_BTR 3
++#define MAX_SPQ_PENDING 8
+
+-#define pbd_tcp_flags(skb) (ntohl(tcp_flag_word(tcp_hdr(skb)))>>16 & 0xff)
+
+-/* stuff added to make the code fit 80Col */
++/* CMNG constants
++ derived from lab experiments, and not from system spec calculations !!! */
++#define DEF_MIN_RATE 100
++/* resolution of the rate shaping timer - 100 usec */
++#define RS_PERIODIC_TIMEOUT_USEC 100
++/* resolution of fairness algorithm in usecs -
++ coefficient for calculating the actual t fair */
++#define T_FAIR_COEF 10000000
++/* number of bytes in single QM arbitration cycle -
++ coefficient for calculating the fairness timer */
++#define QM_ARB_BYTES 40000
++#define FAIR_MEM 2
+
+-#define TPA_TYPE_START ETH_FAST_PATH_RX_CQE_START_FLG
+-#define TPA_TYPE_END ETH_FAST_PATH_RX_CQE_END_FLG
+-#define TPA_TYPE(cqe) (cqe->fast_path_cqe.error_type_flags & \
+- (TPA_TYPE_START | TPA_TYPE_END))
+-#define BNX2X_RX_SUM_OK(cqe) \
+- (!(cqe->fast_path_cqe.status_flags & \
+- (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG | \
+- ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)))
+
+-#define BNX2X_RX_SUM_FIX(cqe) \
+- ((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & \
+- PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == \
+- (1 << PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT))
++#define ATTN_NIG_FOR_FUNC (1L << 8)
++#define ATTN_SW_TIMER_4_FUNC (1L << 9)
++#define GPIO_2_FUNC (1L << 10)
++#define GPIO_3_FUNC (1L << 11)
++#define GPIO_4_FUNC (1L << 12)
++#define ATTN_GENERAL_ATTN_1 (1L << 13)
++#define ATTN_GENERAL_ATTN_2 (1L << 14)
++#define ATTN_GENERAL_ATTN_3 (1L << 15)
++#define ATTN_GENERAL_ATTN_4 (1L << 13)
++#define ATTN_GENERAL_ATTN_5 (1L << 14)
++#define ATTN_GENERAL_ATTN_6 (1L << 15)
+
++#define ATTN_HARD_WIRED_MASK 0xff00
++#define ATTENTION_ID 4
+
+-#define MDIO_AN_CL73_OR_37_COMPLETE \
+- (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | \
+- MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE)
+-
+-#define GP_STATUS_PAUSE_RSOLUTION_TXSIDE \
+- MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE
+-#define GP_STATUS_PAUSE_RSOLUTION_RXSIDE \
+- MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE
+-#define GP_STATUS_SPEED_MASK \
+- MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK
+-#define GP_STATUS_10M MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M
+-#define GP_STATUS_100M MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M
+-#define GP_STATUS_1G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G
+-#define GP_STATUS_2_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G
+-#define GP_STATUS_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G
+-#define GP_STATUS_6G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G
+-#define GP_STATUS_10G_HIG \
+- MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG
+-#define GP_STATUS_10G_CX4 \
+- MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4
+-#define GP_STATUS_12G_HIG \
+- MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12G_HIG
+-#define GP_STATUS_12_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12_5G
+-#define GP_STATUS_13G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_13G
+-#define GP_STATUS_15G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_15G
+-#define GP_STATUS_16G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_16G
+-#define GP_STATUS_1G_KX MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX
+-#define GP_STATUS_10G_KX4 \
+- MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4
+-
+-#define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD
+-#define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD
+-#define LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD
+-#define LINK_100T4 LINK_STATUS_SPEED_AND_DUPLEX_100T4
+-#define LINK_100TXFD LINK_STATUS_SPEED_AND_DUPLEX_100TXFD
+-#define LINK_1000THD LINK_STATUS_SPEED_AND_DUPLEX_1000THD
+-#define LINK_1000TFD LINK_STATUS_SPEED_AND_DUPLEX_1000TFD
+-#define LINK_1000XFD LINK_STATUS_SPEED_AND_DUPLEX_1000XFD
+-#define LINK_2500THD LINK_STATUS_SPEED_AND_DUPLEX_2500THD
+-#define LINK_2500TFD LINK_STATUS_SPEED_AND_DUPLEX_2500TFD
+-#define LINK_2500XFD LINK_STATUS_SPEED_AND_DUPLEX_2500XFD
+-#define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD
+-#define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD
+-#define LINK_12GTFD LINK_STATUS_SPEED_AND_DUPLEX_12GTFD
+-#define LINK_12GXFD LINK_STATUS_SPEED_AND_DUPLEX_12GXFD
+-#define LINK_12_5GTFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD
+-#define LINK_12_5GXFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD
+-#define LINK_13GTFD LINK_STATUS_SPEED_AND_DUPLEX_13GTFD
+-#define LINK_13GXFD LINK_STATUS_SPEED_AND_DUPLEX_13GXFD
+-#define LINK_15GTFD LINK_STATUS_SPEED_AND_DUPLEX_15GTFD
+-#define LINK_15GXFD LINK_STATUS_SPEED_AND_DUPLEX_15GXFD
+-#define LINK_16GTFD LINK_STATUS_SPEED_AND_DUPLEX_16GTFD
+-#define LINK_16GXFD LINK_STATUS_SPEED_AND_DUPLEX_16GXFD
+-
+-#define NIG_STATUS_XGXS0_LINK10G \
+- NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G
+-#define NIG_STATUS_XGXS0_LINK_STATUS \
+- NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS
+-#define NIG_STATUS_XGXS0_LINK_STATUS_SIZE \
+- NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE
+-#define NIG_STATUS_SERDES0_LINK_STATUS \
+- NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS
+-#define NIG_MASK_MI_INT \
+- NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT
+-#define NIG_MASK_XGXS0_LINK10G \
+- NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G
+-#define NIG_MASK_XGXS0_LINK_STATUS \
+- NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK_STATUS
+-#define NIG_MASK_SERDES0_LINK_STATUS \
+- NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS
+-
+-#define XGXS_RESET_BITS \
+- (MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW | \
+- MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_IDDQ | \
+- MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN | \
+- MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN_SD | \
+- MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB)
+-
+-#define SERDES_RESET_BITS \
+- (MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_RSTB_HW | \
+- MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ | \
+- MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN | \
+- MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN_SD)
+
++/* stuff added to make the code fit 80Col */
++
++#define BNX2X_PMF_LINK_ASSERT \
++ GENERAL_ATTEN_OFFSET(LINK_SYNC_ATTENTION_BIT_FUNC_0 + BP_FUNC(bp))
+
+ #define BNX2X_MC_ASSERT_BITS \
+ (GENERAL_ATTEN_OFFSET(TSTORM_FATAL_ASSERT_ATTENTION_BIT) | \
+@@ -987,12 +1065,20 @@ struct bnx2x {
+ #define BNX2X_DOORQ_ASSERT \
+ AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT
+
++#define BNX2X_GRC_TIMEOUT GENERAL_ATTEN_OFFSET(LATCHED_ATTN_TIMEOUT_GRC)
++#define BNX2X_GRC_RSV (GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCR) | \
++ GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCT) | \
++ GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCN) | \
++ GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCU) | \
++ GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \
++ GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC))
++
+ #define HW_INTERRUT_ASSERT_SET_0 \
+ (AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_PBF_HW_INTERRUPT)
+-#define HW_PRTY_ASSERT_SET_0 (AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR | \
++#define HW_PRTY_ASSERT_SET_0 (AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR |\
+@@ -1009,7 +1095,7 @@ struct bnx2x {
+ AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT)
+-#define HW_PRTY_ASSERT_SET_1 (AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR |\
++#define HW_PRTY_ASSERT_SET_1 (AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR | \
+@@ -1026,7 +1112,7 @@ struct bnx2x {
+ AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT)
+-#define HW_PRTY_ASSERT_SET_2 (AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR | \
++#define HW_PRTY_ASSERT_SET_2 (AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR | \
+@@ -1035,42 +1121,44 @@ struct bnx2x {
+ AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR)
+
+
+-#define ETH_RX_ERROR_FALGS (ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG | \
+- ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG | \
+- ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)
+-
+-
+ #define MULTI_FLAGS \
+- (TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \
+- TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \
+- TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \
+- TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY | \
+- TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_ENABLE)
+-
+-#define MULTI_MASK 0x7f
++ (TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \
++ TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \
++ TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \
++ TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY | \
++ TSTORM_ETH_FUNCTION_COMMON_CONFIG_DEFAULT_ENABLE)
+
++#define MULTI_MASK 0x7f
+
+-#define U_SB_ETH_RX_CQ_INDEX HC_INDEX_U_ETH_RX_CQ_CONS
+-#define C_SB_ETH_TX_CQ_INDEX HC_INDEX_C_ETH_TX_CQ_CONS
+-#define C_DEF_SB_SP_INDEX HC_INDEX_DEF_C_ETH_SLOW_PATH
+
+-#define BNX2X_RX_SB_INDEX \
+- &fp->status_blk->u_status_block.index_values[U_SB_ETH_RX_CQ_INDEX]
++#define DEF_USB_FUNC_OFF (2 + 2*HC_USTORM_DEF_SB_NUM_INDICES)
++#define DEF_CSB_FUNC_OFF (2 + 2*HC_CSTORM_DEF_SB_NUM_INDICES)
++#define DEF_XSB_FUNC_OFF (2 + 2*HC_XSTORM_DEF_SB_NUM_INDICES)
++#define DEF_TSB_FUNC_OFF (2 + 2*HC_TSTORM_DEF_SB_NUM_INDICES)
+
+-#define BNX2X_TX_SB_INDEX \
+- &fp->status_blk->c_status_block.index_values[C_SB_ETH_TX_CQ_INDEX]
++#define C_DEF_SB_SP_INDEX HC_INDEX_DEF_C_ETH_SLOW_PATH
+
+ #define BNX2X_SP_DSB_INDEX \
+-&bp->def_status_blk->c_def_status_block.index_values[C_DEF_SB_SP_INDEX]
++(&bp->def_status_blk->c_def_status_block.index_values[C_DEF_SB_SP_INDEX])
+
+
+ #define CAM_IS_INVALID(x) \
+ (x.target_table_entry.flags == TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE)
+
+ #define CAM_INVALIDATE(x) \
+-x.target_table_entry.flags = TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE
++ (x.target_table_entry.flags = TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE)
+
+
++/* Number of u32 elements in MC hash array */
++#define MC_HASH_SIZE 8
++#define MC_HASH_OFFSET(bp, i) (BAR_TSTRORM_INTMEM + \
++ TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(BP_FUNC(bp)) + i*4)
++
++
++#ifndef PXP2_REG_PXP2_INT_STS
++#define PXP2_REG_PXP2_INT_STS PXP2_REG_PXP2_INT_STS_0
++#endif
++
+ /* MISC_REG_RESET_REG - this is here for the hsi to work don't touch */
+
+ #endif /* bnx2x.h */
+diff --git a/drivers/net/bnx2x_fw_defs.h b/drivers/net/bnx2x_fw_defs.h
+index 3b96890..2fe14a2 100644
+--- a/drivers/net/bnx2x_fw_defs.h
++++ b/drivers/net/bnx2x_fw_defs.h
+@@ -8,191 +8,398 @@
+ */
+
+
+-#define CSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index)\
+- (0x1922 + (port * 0x40) + (index * 0x4))
+-#define CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)\
+- (0x1900 + (port * 0x40))
+-#define CSTORM_HC_BTR_OFFSET(port)\
+- (0x1984 + (port * 0xc0))
+-#define CSTORM_SB_HC_DISABLE_OFFSET(port, cpu_id, index)\
+- (0x141a + (port * 0x280) + (cpu_id * 0x28) + (index * 0x4))
+-#define CSTORM_SB_HC_TIMEOUT_OFFSET(port, cpu_id, index)\
+- (0x1418 + (port * 0x280) + (cpu_id * 0x28) + (index * 0x4))
+-#define CSTORM_SB_HOST_SB_ADDR_OFFSET(port, cpu_id)\
+- (0x1400 + (port * 0x280) + (cpu_id * 0x28))
+-#define CSTORM_STATS_FLAGS_OFFSET(port) (0x5108 + (port * 0x8))
+-#define TSTORM_CLIENT_CONFIG_OFFSET(port, client_id)\
+- (0x1510 + (port * 0x240) + (client_id * 0x20))
+-#define TSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index)\
+- (0x138a + (port * 0x28) + (index * 0x4))
+-#define TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)\
+- (0x1370 + (port * 0x28))
+-#define TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port)\
+- (0x4b70 + (port * 0x8))
+-#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(function)\
+- (0x1418 + (function * 0x30))
+-#define TSTORM_HC_BTR_OFFSET(port)\
+- (0x13c4 + (port * 0x18))
+-#define TSTORM_INDIRECTION_TABLE_OFFSET(port)\
+- (0x22c8 + (port * 0x80))
+-#define TSTORM_INDIRECTION_TABLE_SIZE 0x80
+-#define TSTORM_MAC_FILTER_CONFIG_OFFSET(port)\
+- (0x1420 + (port * 0x30))
+-#define TSTORM_RCQ_PROD_OFFSET(port, client_id)\
+- (0x1508 + (port * 0x240) + (client_id * 0x20))
+-#define TSTORM_STATS_FLAGS_OFFSET(port) (0x4b90 + (port * 0x8))
+-#define USTORM_DEF_SB_HC_DISABLE_OFFSET(port, index)\
+- (0x191a + (port * 0x28) + (index * 0x4))
+-#define USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)\
+- (0x1900 + (port * 0x28))
+-#define USTORM_HC_BTR_OFFSET(port)\
+- (0x1954 + (port * 0xb8))
+-#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port)\
+- (0x5408 + (port * 0x8))
+-#define USTORM_SB_HC_DISABLE_OFFSET(port, cpu_id, index)\
+- (0x141a + (port * 0x280) + (cpu_id * 0x28) + (index * 0x4))
+-#define USTORM_SB_HC_TIMEOUT_OFFSET(port, cpu_id, index)\
+- (0x1418 + (port * 0x280) + (cpu_id * 0x28) + (index * 0x4))
+-#define USTORM_SB_HOST_SB_ADDR_OFFSET(port, cpu_id)\
+- (0x1400 + (port * 0x280) + (cpu_id * 0x28))
+-#define XSTORM_ASSERT_LIST_INDEX_OFFSET 0x1000
+-#define XSTORM_ASSERT_LIST_OFFSET(idx) (0x1020 + (idx * 0x10))
+-#define XSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index)\
+- (0x141a + (port * 0x28) + (index * 0x4))
+-#define XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)\
+- (0x1400 + (port * 0x28))
+-#define XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port)\
+- (0x5408 + (port * 0x8))
+-#define XSTORM_HC_BTR_OFFSET(port)\
+- (0x1454 + (port * 0x18))
+-#define XSTORM_SPQ_PAGE_BASE_OFFSET(port)\
+- (0x5328 + (port * 0x18))
+-#define XSTORM_SPQ_PROD_OFFSET(port)\
+- (0x5330 + (port * 0x18))
+-#define XSTORM_STATS_FLAGS_OFFSET(port) (0x53f8 + (port * 0x8))
++#define CSTORM_ASSERT_LIST_INDEX_OFFSET \
++ (IS_E1H_OFFSET ? 0x7000 : 0x1000)
++#define CSTORM_ASSERT_LIST_OFFSET(idx) \
++ (IS_E1H_OFFSET ? (0x7020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
++#define CSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \
++ (IS_E1H_OFFSET ? (0x8522 + ((function>>1) * 0x40) + \
++ ((function&1) * 0x100) + (index * 0x4)) : (0x1922 + (function * \
++ 0x40) + (index * 0x4)))
++#define CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \
++ (IS_E1H_OFFSET ? (0x8500 + ((function>>1) * 0x40) + \
++ ((function&1) * 0x100)) : (0x1900 + (function * 0x40)))
++#define CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \
++ (IS_E1H_OFFSET ? (0x8508 + ((function>>1) * 0x40) + \
++ ((function&1) * 0x100)) : (0x1908 + (function * 0x40)))
++#define CSTORM_FUNCTION_MODE_OFFSET \
++ (IS_E1H_OFFSET ? 0x11e8 : 0xffffffff)
++#define CSTORM_HC_BTR_OFFSET(port) \
++ (IS_E1H_OFFSET ? (0x8704 + (port * 0xf0)) : (0x1984 + (port * 0xc0)))
++#define CSTORM_SB_HC_DISABLE_OFFSET(port, cpu_id, index) \
++ (IS_E1H_OFFSET ? (0x801a + (port * 0x280) + (cpu_id * 0x28) + \
++ (index * 0x4)) : (0x141a + (port * 0x280) + (cpu_id * 0x28) + \
++ (index * 0x4)))
++#define CSTORM_SB_HC_TIMEOUT_OFFSET(port, cpu_id, index) \
++ (IS_E1H_OFFSET ? (0x8018 + (port * 0x280) + (cpu_id * 0x28) + \
++ (index * 0x4)) : (0x1418 + (port * 0x280) + (cpu_id * 0x28) + \
++ (index * 0x4)))
++#define CSTORM_SB_HOST_SB_ADDR_OFFSET(port, cpu_id) \
++ (IS_E1H_OFFSET ? (0x8000 + (port * 0x280) + (cpu_id * 0x28)) : \
++ (0x1400 + (port * 0x280) + (cpu_id * 0x28)))
++#define CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, cpu_id) \
++ (IS_E1H_OFFSET ? (0x8008 + (port * 0x280) + (cpu_id * 0x28)) : \
++ (0x1408 + (port * 0x280) + (cpu_id * 0x28)))
++#define CSTORM_STATS_FLAGS_OFFSET(function) \
++ (IS_E1H_OFFSET ? (0x1108 + (function * 0x8)) : (0x5108 + \
++ (function * 0x8)))
++#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(function) \
++ (IS_E1H_OFFSET ? (0x31c0 + (function * 0x20)) : 0xffffffff)
++#define TSTORM_ASSERT_LIST_INDEX_OFFSET \
++ (IS_E1H_OFFSET ? 0xa000 : 0x1000)
++#define TSTORM_ASSERT_LIST_OFFSET(idx) \
++ (IS_E1H_OFFSET ? (0xa020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
++#define TSTORM_CLIENT_CONFIG_OFFSET(port, client_id) \
++ (IS_E1H_OFFSET ? (0x3350 + (port * 0x190) + (client_id * 0x10)) \
++ : (0x9c0 + (port * 0x130) + (client_id * 0x10)))
++#define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET \
++ (IS_E1H_OFFSET ? 0x1ad8 : 0xffffffff)
++#define TSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \
++ (IS_E1H_OFFSET ? (0xb01a + ((function>>1) * 0x28) + \
++ ((function&1) * 0xa0) + (index * 0x4)) : (0x141a + (function * \
++ 0x28) + (index * 0x4)))
++#define TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \
++ (IS_E1H_OFFSET ? (0xb000 + ((function>>1) * 0x28) + \
++ ((function&1) * 0xa0)) : (0x1400 + (function * 0x28)))
++#define TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \
++ (IS_E1H_OFFSET ? (0xb008 + ((function>>1) * 0x28) + \
++ ((function&1) * 0xa0)) : (0x1408 + (function * 0x28)))
++#define TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \
++ (IS_E1H_OFFSET ? (0x2b80 + (function * 0x8)) : (0x4b68 + \
++ (function * 0x8)))
++#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(function) \
++ (IS_E1H_OFFSET ? (0x3000 + (function * 0x38)) : (0x1500 + \
++ (function * 0x38)))
++#define TSTORM_FUNCTION_MODE_OFFSET \
++ (IS_E1H_OFFSET ? 0x1ad0 : 0xffffffff)
++#define TSTORM_HC_BTR_OFFSET(port) \
++ (IS_E1H_OFFSET ? (0xb144 + (port * 0x30)) : (0x1454 + (port * 0x18)))
++#define TSTORM_INDIRECTION_TABLE_OFFSET(function) \
++ (IS_E1H_OFFSET ? (0x12c8 + (function * 0x80)) : (0x22c8 + \
++ (function * 0x80)))
++#define TSTORM_INDIRECTION_TABLE_SIZE 0x80
++#define TSTORM_MAC_FILTER_CONFIG_OFFSET(function) \
++ (IS_E1H_OFFSET ? (0x3008 + (function * 0x38)) : (0x1508 + \
++ (function * 0x38)))
++#define TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \
++ (IS_E1H_OFFSET ? (0x2010 + (port * 0x5b0) + (stats_counter_id * \
++ 0x50)) : (0x4080 + (port * 0x5b0) + (stats_counter_id * 0x50)))
++#define TSTORM_STATS_FLAGS_OFFSET(function) \
++ (IS_E1H_OFFSET ? (0x2c00 + (function * 0x8)) : (0x4b88 + \
++ (function * 0x8)))
++#define TSTORM_TPA_EXIST_OFFSET (IS_E1H_OFFSET ? 0x3680 : 0x1c20)
++#define USTORM_AGG_DATA_OFFSET (IS_E1H_OFFSET ? 0xa040 : 0x2c10)
++#define USTORM_AGG_DATA_SIZE (IS_E1H_OFFSET ? 0x2440 : 0x1200)
++#define USTORM_ASSERT_LIST_INDEX_OFFSET \
++ (IS_E1H_OFFSET ? 0x8960 : 0x1000)
++#define USTORM_ASSERT_LIST_OFFSET(idx) \
++ (IS_E1H_OFFSET ? (0x8980 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
++#define USTORM_CQE_PAGE_BASE_OFFSET(port, clientId) \
++ (IS_E1H_OFFSET ? (0x8018 + (port * 0x4b0) + (clientId * 0x30)) : \
++ (0x5330 + (port * 0x260) + (clientId * 0x20)))
++#define USTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \
++ (IS_E1H_OFFSET ? (0x9522 + ((function>>1) * 0x40) + \
++ ((function&1) * 0x100) + (index * 0x4)) : (0x1922 + (function * \
++ 0x40) + (index * 0x4)))
++#define USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \
++ (IS_E1H_OFFSET ? (0x9500 + ((function>>1) * 0x40) + \
++ ((function&1) * 0x100)) : (0x1900 + (function * 0x40)))
++#define USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \
++ (IS_E1H_OFFSET ? (0x9508 + ((function>>1) * 0x40) + \
++ ((function&1) * 0x100)) : (0x1908 + (function * 0x40)))
++#define USTORM_FUNCTION_MODE_OFFSET \
++ (IS_E1H_OFFSET ? 0x2448 : 0xffffffff)
++#define USTORM_HC_BTR_OFFSET(port) \
++ (IS_E1H_OFFSET ? (0x9704 + (port * 0xf0)) : (0x1984 + (port * 0xc0)))
++#define USTORM_MAX_AGG_SIZE_OFFSET(port, clientId) \
++ (IS_E1H_OFFSET ? (0x8010 + (port * 0x4b0) + (clientId * 0x30)) : \
++ (0x5328 + (port * 0x260) + (clientId * 0x20)))
++#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(function) \
++ (IS_E1H_OFFSET ? (0x2408 + (function * 0x8)) : (0x5308 + \
++ (function * 0x8)))
++#define USTORM_RX_PRODS_OFFSET(port, client_id) \
++ (IS_E1H_OFFSET ? (0x8000 + (port * 0x4b0) + (client_id * 0x30)) \
++ : (0x5318 + (port * 0x260) + (client_id * 0x20)))
++#define USTORM_SB_HC_DISABLE_OFFSET(port, cpu_id, index) \
++ (IS_E1H_OFFSET ? (0x901a + (port * 0x280) + (cpu_id * 0x28) + \
++ (index * 0x4)) : (0x141a + (port * 0x280) + (cpu_id * 0x28) + \
++ (index * 0x4)))
++#define USTORM_SB_HC_TIMEOUT_OFFSET(port, cpu_id, index) \
++ (IS_E1H_OFFSET ? (0x9018 + (port * 0x280) + (cpu_id * 0x28) + \
++ (index * 0x4)) : (0x1418 + (port * 0x280) + (cpu_id * 0x28) + \
++ (index * 0x4)))
++#define USTORM_SB_HOST_SB_ADDR_OFFSET(port, cpu_id) \
++ (IS_E1H_OFFSET ? (0x9000 + (port * 0x280) + (cpu_id * 0x28)) : \
++ (0x1400 + (port * 0x280) + (cpu_id * 0x28)))
++#define USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, cpu_id) \
++ (IS_E1H_OFFSET ? (0x9008 + (port * 0x280) + (cpu_id * 0x28)) : \
++ (0x1408 + (port * 0x280) + (cpu_id * 0x28)))
++#define XSTORM_ASSERT_LIST_INDEX_OFFSET \
++ (IS_E1H_OFFSET ? 0x9000 : 0x1000)
++#define XSTORM_ASSERT_LIST_OFFSET(idx) \
++ (IS_E1H_OFFSET ? (0x9020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
++#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) \
++ (IS_E1H_OFFSET ? (0x24a8 + (port * 0x50)) : (0x3ba0 + (port * 0x50)))
++#define XSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \
++ (IS_E1H_OFFSET ? (0xa01a + ((function>>1) * 0x28) + \
++ ((function&1) * 0xa0) + (index * 0x4)) : (0x141a + (function * \
++ 0x28) + (index * 0x4)))
++#define XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \
++ (IS_E1H_OFFSET ? (0xa000 + ((function>>1) * 0x28) + \
++ ((function&1) * 0xa0)) : (0x1400 + (function * 0x28)))
++#define XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \
++ (IS_E1H_OFFSET ? (0xa008 + ((function>>1) * 0x28) + \
++ ((function&1) * 0xa0)) : (0x1408 + (function * 0x28)))
++#define XSTORM_E1HOV_OFFSET(function) \
++ (IS_E1H_OFFSET ? (0x2c10 + (function * 0x2)) : 0xffffffff)
++#define XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \
++ (IS_E1H_OFFSET ? (0x2418 + (function * 0x8)) : (0x3b70 + \
++ (function * 0x8)))
++#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(function) \
++ (IS_E1H_OFFSET ? (0x2588 + (function * 0x90)) : (0x3c80 + \
++ (function * 0x90)))
++#define XSTORM_FUNCTION_MODE_OFFSET \
++ (IS_E1H_OFFSET ? 0x2c20 : 0xffffffff)
++#define XSTORM_HC_BTR_OFFSET(port) \
++ (IS_E1H_OFFSET ? (0xa144 + (port * 0x30)) : (0x1454 + (port * 0x18)))
++#define XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \
++ (IS_E1H_OFFSET ? (0xc000 + (port * 0x3f0) + (stats_counter_id * \
++ 0x38)) : (0x3378 + (port * 0x3f0) + (stats_counter_id * 0x38)))
++#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(function) \
++ (IS_E1H_OFFSET ? (0x2548 + (function * 0x90)) : (0x3c40 + \
++ (function * 0x90)))
++#define XSTORM_SPQ_PAGE_BASE_OFFSET(function) \
++ (IS_E1H_OFFSET ? (0x2000 + (function * 0x10)) : (0x3328 + \
++ (function * 0x10)))
++#define XSTORM_SPQ_PROD_OFFSET(function) \
++ (IS_E1H_OFFSET ? (0x2008 + (function * 0x10)) : (0x3330 + \
++ (function * 0x10)))
++#define XSTORM_STATS_FLAGS_OFFSET(function) \
++ (IS_E1H_OFFSET ? (0x23d8 + (function * 0x8)) : (0x3b60 + \
++ (function * 0x8)))
+ #define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
+
+ /**
+ * This file defines HSI constatnts for the ETH flow
+ */
+-
+-/* hash types */
+-#define DEFAULT_HASH_TYPE 0
+-#define IPV4_HASH_TYPE 1
+-#define TCP_IPV4_HASH_TYPE 2
+-#define IPV6_HASH_TYPE 3
+-#define TCP_IPV6_HASH_TYPE 4
++#ifdef _EVEREST_MICROCODE
++#include "microcode_constants.h"
++#include "eth_rx_bd.h"
++#include "eth_tx_bd.h"
++#include "eth_rx_cqe.h"
++#include "eth_rx_sge.h"
++#include "eth_rx_cqe_next_page.h"
++#endif
++
++/* RSS hash types */
++#define DEFAULT_HASH_TYPE 0
++#define IPV4_HASH_TYPE 1
++#define TCP_IPV4_HASH_TYPE 2
++#define IPV6_HASH_TYPE 3
++#define TCP_IPV6_HASH_TYPE 4
++
++/* Ethernet Ring parmaters */
++#define X_ETH_LOCAL_RING_SIZE 13
++#define FIRST_BD_IN_PKT 0
++#define PARSE_BD_INDEX 1
++#define NUM_OF_ETH_BDS_IN_PAGE \
++ ((PAGE_SIZE) / (STRUCT_SIZE(eth_tx_bd)/8))
++
++
++/* Rx ring params */
++#define U_ETH_LOCAL_BD_RING_SIZE (16)
++#define U_ETH_LOCAL_SGE_RING_SIZE (12)
++#define U_ETH_SGL_SIZE (8)
++
++
++#define U_ETH_BDS_PER_PAGE_MASK \
++ ((PAGE_SIZE/(STRUCT_SIZE(eth_rx_bd)/8))-1)
++#define U_ETH_CQE_PER_PAGE_MASK \
++ ((PAGE_SIZE/(STRUCT_SIZE(eth_rx_cqe)/8))-1)
++#define U_ETH_SGES_PER_PAGE_MASK \
++ ((PAGE_SIZE/(STRUCT_SIZE(eth_rx_sge)/8))-1)
++
++#define U_ETH_SGES_PER_PAGE_INVERSE_MASK \
++ (0xFFFF - ((PAGE_SIZE/((STRUCT_SIZE(eth_rx_sge))/8))-1))
++
++
++#define TU_ETH_CQES_PER_PAGE \
++ (PAGE_SIZE/(STRUCT_SIZE(eth_rx_cqe_next_page)/8))
++#define U_ETH_BDS_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_bd)/8))
++#define U_ETH_SGES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_sge)/8))
++
++#define U_ETH_UNDEFINED_Q 0xFF
+
+ /* values of command IDs in the ramrod message */
+-#define RAMROD_CMD_ID_ETH_PORT_SETUP (80)
+-#define RAMROD_CMD_ID_ETH_CLIENT_SETUP (85)
+-#define RAMROD_CMD_ID_ETH_STAT_QUERY (90)
+-#define RAMROD_CMD_ID_ETH_UPDATE (100)
+-#define RAMROD_CMD_ID_ETH_HALT (105)
+-#define RAMROD_CMD_ID_ETH_SET_MAC (110)
+-#define RAMROD_CMD_ID_ETH_CFC_DEL (115)
+-#define RAMROD_CMD_ID_ETH_PORT_DEL (120)
+-#define RAMROD_CMD_ID_ETH_FORWARD_SETUP (125)
++#define RAMROD_CMD_ID_ETH_PORT_SETUP (80)
++#define RAMROD_CMD_ID_ETH_CLIENT_SETUP (85)
++#define RAMROD_CMD_ID_ETH_STAT_QUERY (90)
++#define RAMROD_CMD_ID_ETH_UPDATE (100)
++#define RAMROD_CMD_ID_ETH_HALT (105)
++#define RAMROD_CMD_ID_ETH_SET_MAC (110)
++#define RAMROD_CMD_ID_ETH_CFC_DEL (115)
++#define RAMROD_CMD_ID_ETH_PORT_DEL (120)
++#define RAMROD_CMD_ID_ETH_FORWARD_SETUP (125)
+
+
+ /* command values for set mac command */
+-#define T_ETH_MAC_COMMAND_SET 0
+-#define T_ETH_MAC_COMMAND_INVALIDATE 1
++#define T_ETH_MAC_COMMAND_SET 0
++#define T_ETH_MAC_COMMAND_INVALIDATE 1
++
++#define T_ETH_INDIRECTION_TABLE_SIZE 128
+
+-#define T_ETH_INDIRECTION_TABLE_SIZE 128
++/*The CRC32 seed, that is used for the hash(reduction) multicast address */
++#define T_ETH_CRC32_HASH_SEED 0x00000000
+
+ /* Maximal L2 clients supported */
+-#define ETH_MAX_RX_CLIENTS (18)
++#define ETH_MAX_RX_CLIENTS_E1 19
++#define ETH_MAX_RX_CLIENTS_E1H 25
++
++/* Maximal aggregation queues supported */
++#define ETH_MAX_AGGREGATION_QUEUES_E1 (32)
++#define ETH_MAX_AGGREGATION_QUEUES_E1H (64)
++
+
+ /**
+ * This file defines HSI constatnts common to all microcode flows
+ */
+
+ /* Connection types */
+-#define ETH_CONNECTION_TYPE 0
++#define ETH_CONNECTION_TYPE 0
++#define TOE_CONNECTION_TYPE 1
++#define RDMA_CONNECTION_TYPE 2
++#define ISCSI_CONNECTION_TYPE 3
++#define FCOE_CONNECTION_TYPE 4
++#define RESERVED_CONNECTION_TYPE_0 5
++#define RESERVED_CONNECTION_TYPE_1 6
++#define RESERVED_CONNECTION_TYPE_2 7
+
+-#define PROTOCOL_STATE_BIT_OFFSET 6
+
+-#define ETH_STATE (ETH_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
++#define PROTOCOL_STATE_BIT_OFFSET 6
++
++#define ETH_STATE (ETH_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
++#define TOE_STATE (TOE_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
++#define RDMA_STATE (RDMA_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
+
+ /* microcode fixed page page size 4K (chains and ring segments) */
+-#define MC_PAGE_SIZE (4096)
++#define MC_PAGE_SIZE (4096)
+
+-/* Host coalescing constants */
+
+-/* IGU constants */
+-#define IGU_PORT_BASE 0x0400
+-
+-#define IGU_ADDR_MSIX 0x0000
+-#define IGU_ADDR_INT_ACK 0x0200
+-#define IGU_ADDR_PROD_UPD 0x0201
+-#define IGU_ADDR_ATTN_BITS_UPD 0x0202
+-#define IGU_ADDR_ATTN_BITS_SET 0x0203
+-#define IGU_ADDR_ATTN_BITS_CLR 0x0204
+-#define IGU_ADDR_COALESCE_NOW 0x0205
+-#define IGU_ADDR_SIMD_MASK 0x0206
+-#define IGU_ADDR_SIMD_NOMASK 0x0207
+-#define IGU_ADDR_MSI_CTL 0x0210
+-#define IGU_ADDR_MSI_ADDR_LO 0x0211
+-#define IGU_ADDR_MSI_ADDR_HI 0x0212
+-#define IGU_ADDR_MSI_DATA 0x0213
+-
+-#define IGU_INT_ENABLE 0
+-#define IGU_INT_DISABLE 1
+-#define IGU_INT_NOP 2
+-#define IGU_INT_NOP2 3
++/* Host coalescing constants */
+
+ /* index numbers */
+-#define HC_USTORM_DEF_SB_NUM_INDICES 4
+-#define HC_CSTORM_DEF_SB_NUM_INDICES 8
+-#define HC_XSTORM_DEF_SB_NUM_INDICES 4
+-#define HC_TSTORM_DEF_SB_NUM_INDICES 4
+-#define HC_USTORM_SB_NUM_INDICES 4
+-#define HC_CSTORM_SB_NUM_INDICES 4
++#define HC_USTORM_DEF_SB_NUM_INDICES 8
++#define HC_CSTORM_DEF_SB_NUM_INDICES 8
++#define HC_XSTORM_DEF_SB_NUM_INDICES 4
++#define HC_TSTORM_DEF_SB_NUM_INDICES 4
++#define HC_USTORM_SB_NUM_INDICES 4
++#define HC_CSTORM_SB_NUM_INDICES 4
+
+ /* index values - which counterto update */
+
+-#define HC_INDEX_U_ETH_RX_CQ_CONS 1
++#define HC_INDEX_U_TOE_RX_CQ_CONS 0
++#define HC_INDEX_U_ETH_RX_CQ_CONS 1
++#define HC_INDEX_U_ETH_RX_BD_CONS 2
++#define HC_INDEX_U_FCOE_EQ_CONS 3
+
+-#define HC_INDEX_C_ETH_TX_CQ_CONS 1
++#define HC_INDEX_C_TOE_TX_CQ_CONS 0
++#define HC_INDEX_C_ETH_TX_CQ_CONS 1
++#define HC_INDEX_C_ISCSI_EQ_CONS 2
+
+-#define HC_INDEX_DEF_X_SPQ_CONS 0
++#define HC_INDEX_DEF_X_SPQ_CONS 0
++
++#define HC_INDEX_DEF_C_RDMA_EQ_CONS 0
++#define HC_INDEX_DEF_C_RDMA_NAL_PROD 1
++#define HC_INDEX_DEF_C_ETH_FW_TX_CQ_CONS 2
++#define HC_INDEX_DEF_C_ETH_SLOW_PATH 3
++#define HC_INDEX_DEF_C_ETH_RDMA_CQ_CONS 4
++#define HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS 5
++
++#define HC_INDEX_DEF_U_ETH_RDMA_RX_CQ_CONS 0
++#define HC_INDEX_DEF_U_ETH_ISCSI_RX_CQ_CONS 1
++#define HC_INDEX_DEF_U_ETH_RDMA_RX_BD_CONS 2
++#define HC_INDEX_DEF_U_ETH_ISCSI_RX_BD_CONS 3
+
+-#define HC_INDEX_DEF_C_ETH_FW_TX_CQ_CONS 2
+-#define HC_INDEX_DEF_C_ETH_SLOW_PATH 3
+
+ /* used by the driver to get the SB offset */
+-#define USTORM_ID 0
+-#define CSTORM_ID 1
+-#define XSTORM_ID 2
+-#define TSTORM_ID 3
+-#define ATTENTION_ID 4
++#define USTORM_ID 0
++#define CSTORM_ID 1
++#define XSTORM_ID 2
++#define TSTORM_ID 3
++#define ATTENTION_ID 4
+
+ /* max number of slow path commands per port */
+-#define MAX_RAMRODS_PER_PORT (8)
++#define MAX_RAMRODS_PER_PORT (8)
+
+ /* values for RX ETH CQE type field */
+-#define RX_ETH_CQE_TYPE_ETH_FASTPATH (0)
+-#define RX_ETH_CQE_TYPE_ETH_RAMROD (1)
+-
+-/* MAC address list size */
+-#define T_MAC_ADDRESS_LIST_SIZE (96)
+-
++#define RX_ETH_CQE_TYPE_ETH_FASTPATH (0)
++#define RX_ETH_CQE_TYPE_ETH_RAMROD (1)
++
++
++/**** DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
++#define EMULATION_FREQUENCY_FACTOR (1600)
++#define FPGA_FREQUENCY_FACTOR (100)
++
++#define TIMERS_TICK_SIZE_CHIP (1e-3)
++#define TIMERS_TICK_SIZE_EMUL \
++ ((TIMERS_TICK_SIZE_CHIP)/((EMULATION_FREQUENCY_FACTOR)))
++#define TIMERS_TICK_SIZE_FPGA \
++ ((TIMERS_TICK_SIZE_CHIP)/((FPGA_FREQUENCY_FACTOR)))
++
++#define TSEMI_CLK1_RESUL_CHIP (1e-3)
++#define TSEMI_CLK1_RESUL_EMUL \
++ ((TSEMI_CLK1_RESUL_CHIP)/(EMULATION_FREQUENCY_FACTOR))
++#define TSEMI_CLK1_RESUL_FPGA \
++ ((TSEMI_CLK1_RESUL_CHIP)/(FPGA_FREQUENCY_FACTOR))
++
++#define USEMI_CLK1_RESUL_CHIP \
++ (TIMERS_TICK_SIZE_CHIP)
++#define USEMI_CLK1_RESUL_EMUL \
++ (TIMERS_TICK_SIZE_EMUL)
++#define USEMI_CLK1_RESUL_FPGA \
++ (TIMERS_TICK_SIZE_FPGA)
++
++#define XSEMI_CLK1_RESUL_CHIP (1e-3)
++#define XSEMI_CLK1_RESUL_EMUL \
++ ((XSEMI_CLK1_RESUL_CHIP)/(EMULATION_FREQUENCY_FACTOR))
++#define XSEMI_CLK1_RESUL_FPGA \
++ ((XSEMI_CLK1_RESUL_CHIP)/(FPGA_FREQUENCY_FACTOR))
++
++#define XSEMI_CLK2_RESUL_CHIP (1e-6)
++#define XSEMI_CLK2_RESUL_EMUL \
++ ((XSEMI_CLK2_RESUL_CHIP)/(EMULATION_FREQUENCY_FACTOR))
++#define XSEMI_CLK2_RESUL_FPGA \
++ ((XSEMI_CLK2_RESUL_CHIP)/(FPGA_FREQUENCY_FACTOR))
++
++#define SDM_TIMER_TICK_RESUL_CHIP (4*(1e-6))
++#define SDM_TIMER_TICK_RESUL_EMUL \
++ ((SDM_TIMER_TICK_RESUL_CHIP)/(EMULATION_FREQUENCY_FACTOR))
++#define SDM_TIMER_TICK_RESUL_FPGA \
++ ((SDM_TIMER_TICK_RESUL_CHIP)/(FPGA_FREQUENCY_FACTOR))
++
++
++/**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
+ #define XSTORM_IP_ID_ROLL_HALF 0x8000
+ #define XSTORM_IP_ID_ROLL_ALL 0
+
+-#define FW_LOG_LIST_SIZE (50)
++#define FW_LOG_LIST_SIZE (50)
++
++#define NUM_OF_PROTOCOLS 4
++#define NUM_OF_SAFC_BITS 16
++#define MAX_COS_NUMBER 4
++#define MAX_T_STAT_COUNTER_ID 18
++#define MAX_X_STAT_COUNTER_ID 18
++#define MAX_U_STAT_COUNTER_ID 18
++
+
+-#define NUM_OF_PROTOCOLS 4
+-#define MAX_COS_NUMBER 16
+-#define MAX_T_STAT_COUNTER_ID 18
++#define UNKNOWN_ADDRESS 0
++#define UNICAST_ADDRESS 1
++#define MULTICAST_ADDRESS 2
++#define BROADCAST_ADDRESS 3
+
+-#define T_FAIR 1
+-#define FAIR_MEM 2
+-#define RS_PERIODIC_TIMEOUT_IN_SDM_TICS 25
++#define SINGLE_FUNCTION 0
++#define MULTI_FUNCTION 1
+
+-#define UNKNOWN_ADDRESS 0
+-#define UNICAST_ADDRESS 1
+-#define MULTICAST_ADDRESS 2
+-#define BROADCAST_ADDRESS 3
++#define IP_V4 0
++#define IP_V6 1
+
+diff --git a/drivers/net/bnx2x_hsi.h b/drivers/net/bnx2x_hsi.h
+index b21075c..c416823 100644
+--- a/drivers/net/bnx2x_hsi.h
++++ b/drivers/net/bnx2x_hsi.h
+@@ -132,6 +132,12 @@ struct shared_hw_cfg { /* NVRAM Offset */
+ #define SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G 0x00000008
+ #define SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G 0x00000009
+ #define SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G 0x0000000a
++#define SHARED_HW_CFG_BOARD_TYPE_BCM957710A1023G 0x0000000b
++#define SHARED_HW_CFG_BOARD_TYPE_BCM957710A1033G 0x0000000c
++#define SHARED_HW_CFG_BOARD_TYPE_BCM957711T1101 0x0000000d
++#define SHARED_HW_CFG_BOARD_TYPE_BCM957711ET1201 0x0000000e
++#define SHARED_HW_CFG_BOARD_TYPE_BCM957711A1133G 0x0000000f
++#define SHARED_HW_CFG_BOARD_TYPE_BCM957711EA1233G 0x00000010
+
+ #define SHARED_HW_CFG_BOARD_VER_MASK 0xffff0000
+ #define SHARED_HW_CFG_BOARD_VER_SHIFT 16
+@@ -313,6 +319,7 @@ struct shared_feat_cfg { /* NVRAM Offset */
+
+ u32 config; /* 0x450 */
+ #define SHARED_FEATURE_BMC_ECHO_MODE_EN 0x00000001
++#define SHARED_FEATURE_MF_MODE_DISABLED 0x00000100
+
+ };
+
+@@ -502,28 +509,41 @@ struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */
+ };
+
+
+-/*****************************************************************************
+- * Device Information *
+- *****************************************************************************/
+-struct dev_info { /* size */
++/****************************************************************************
++ * Device Information *
++ ****************************************************************************/
++struct dev_info { /* size */
+
+- u32 bc_rev; /* 8 bits each: major, minor, build */ /* 4 */
++ u32 bc_rev; /* 8 bits each: major, minor, build */ /* 4 */
+
+- struct shared_hw_cfg shared_hw_config; /* 40 */
++ struct shared_hw_cfg shared_hw_config; /* 40 */
+
+- struct port_hw_cfg port_hw_config[PORT_MAX]; /* 400*2=800 */
++ struct port_hw_cfg port_hw_config[PORT_MAX]; /* 400*2=800 */
+
+- struct shared_feat_cfg shared_feature_config; /* 4 */
++ struct shared_feat_cfg shared_feature_config; /* 4 */
+
+- struct port_feat_cfg port_feature_config[PORT_MAX]; /* 116*2=232 */
++ struct port_feat_cfg port_feature_config[PORT_MAX];/* 116*2=232 */
+
+ };
+
+
+ #define FUNC_0 0
+ #define FUNC_1 1
++#define FUNC_2 2
++#define FUNC_3 3
++#define FUNC_4 4
++#define FUNC_5 5
++#define FUNC_6 6
++#define FUNC_7 7
+ #define E1_FUNC_MAX 2
+-#define FUNC_MAX E1_FUNC_MAX
++#define E1H_FUNC_MAX 8
++
++#define VN_0 0
++#define VN_1 1
++#define VN_2 2
++#define VN_3 3
++#define E1VN_MAX 1
++#define E1HVN_MAX 4
+
+
+ /* This value (in milliseconds) determines the frequency of the driver
+@@ -619,7 +639,9 @@ struct drv_port_mb {
+ #define LINK_STATUS_LINK_PARTNER_15GXFD_CAPABLE 0x08000000
+ #define LINK_STATUS_LINK_PARTNER_16GXFD_CAPABLE 0x10000000
+
+- u32 reserved[3];
++ u32 port_stx;
++
++ u32 reserved[2];
+
+ };
+
+@@ -642,6 +664,11 @@ struct drv_func_mb {
+ #define DRV_MSG_CODE_GET_MANUF_KEY 0x82000000
+ #define DRV_MSG_CODE_LOAD_L2B_PRAM 0x90000000
+
++#define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000
++#define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000
++#define BIOS_MSG_CODE_VIRT_MAC_PRIM 0xff030000
++#define BIOS_MSG_CODE_VIRT_MAC_ISCSI 0xff040000
++
+ #define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
+
+ u32 drv_mb_param;
+@@ -671,6 +698,11 @@ struct drv_func_mb {
+ #define FW_MSG_CODE_L2B_PRAM_X_LOAD_FAILURE 0x90230000
+ #define FW_MSG_CODE_L2B_PRAM_U_LOAD_FAILURE 0x90240000
+
++#define FW_MSG_CODE_LIC_CHALLENGE 0xff010000
++#define FW_MSG_CODE_LIC_RESPONSE 0xff020000
++#define FW_MSG_CODE_VIRT_MAC_PRIM 0xff030000
++#define FW_MSG_CODE_VIRT_MAC_ISCSI 0xff040000
++
+ #define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
+
+ u32 fw_mb_param;
+@@ -696,7 +728,13 @@ struct drv_func_mb {
+ u32 iscsi_boot_signature;
+ u32 iscsi_boot_block_offset;
+
+- u32 reserved[3];
++ u32 drv_status;
++#define DRV_STATUS_PMF 0x00000001
++
++ u32 virt_mac_upper;
++#define VIRT_MAC_SIGN_MASK 0xffff0000
++#define VIRT_MAC_SIGNATURE 0x564d0000
++ u32 virt_mac_lower;
+
+ };
+
+@@ -713,6 +751,92 @@ struct mgmtfw_state {
+
+
+ /****************************************************************************
++ * Multi-Function configuration *
++ ****************************************************************************/
++struct shared_mf_cfg {
++
++ u32 clp_mb;
++#define SHARED_MF_CLP_SET_DEFAULT 0x00000000
++ /* set by CLP */
++#define SHARED_MF_CLP_EXIT 0x00000001
++ /* set by MCP */
++#define SHARED_MF_CLP_EXIT_DONE 0x00010000
++
++};
++
++struct port_mf_cfg {
++
++ u32 dynamic_cfg; /* device control channel */
++#define PORT_MF_CFG_OUTER_VLAN_TAG_MASK 0x0000ffff
++#define PORT_MF_CFG_OUTER_VLAN_TAG_SHIFT 0
++#define PORT_MF_CFG_DYNAMIC_CFG_ENABLED 0x00010000
++#define PORT_MF_CFG_DYNAMIC_CFG_DEFAULT 0x00000000
++
++ u32 reserved[3];
++
++};
++
++struct func_mf_cfg {
++
++ u32 config;
++ /* E/R/I/D */
++ /* function 0 of each port cannot be hidden */
++#define FUNC_MF_CFG_FUNC_HIDE 0x00000001
++
++#define FUNC_MF_CFG_PROTOCOL_MASK 0x00000007
++#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000002
++#define FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA 0x00000004
++#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000006
++#define FUNC_MF_CFG_PROTOCOL_DEFAULT\
++ FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA
++
++#define FUNC_MF_CFG_FUNC_DISABLED 0x00000008
++
++ /* PRI */
++ /* 0 - low priority, 3 - high priority */
++#define FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK 0x00000300
++#define FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT 8
++#define FUNC_MF_CFG_TRANSMIT_PRIORITY_DEFAULT 0x00000000
++
++ /* MINBW, MAXBW */
++ /* value range - 0..100, increments in 100Mbps */
++#define FUNC_MF_CFG_MIN_BW_MASK 0x00ff0000
++#define FUNC_MF_CFG_MIN_BW_SHIFT 16
++#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000
++#define FUNC_MF_CFG_MAX_BW_MASK 0xff000000
++#define FUNC_MF_CFG_MAX_BW_SHIFT 24
++#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x64000000
++
++ u32 mac_upper; /* MAC */
++#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
++#define FUNC_MF_CFG_UPPERMAC_SHIFT 0
++#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK
++ u32 mac_lower;
++#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff
++
++ u32 e1hov_tag; /* VNI */
++#define FUNC_MF_CFG_E1HOV_TAG_MASK 0x0000ffff
++#define FUNC_MF_CFG_E1HOV_TAG_SHIFT 0
++#define FUNC_MF_CFG_E1HOV_TAG_DEFAULT FUNC_MF_CFG_E1HOV_TAG_MASK
++
++ u32 reserved[2];
++
++};
++
++struct mf_cfg {
++
++ struct shared_mf_cfg shared_mf_config;
++ struct port_mf_cfg port_mf_config[PORT_MAX];
++#if defined(b710)
++ struct func_mf_cfg func_mf_config[E1_FUNC_MAX];
++#else
++ struct func_mf_cfg func_mf_config[E1H_FUNC_MAX];
++#endif
++
++};
++
++
++/****************************************************************************
+ * Shared Memory Region *
+ ****************************************************************************/
+ struct shmem_region { /* SharedMem Offset (size) */
+@@ -747,14 +871,350 @@ struct shmem_region { /* SharedMem Offset (size) */
+ struct mgmtfw_state mgmtfw_state; /* 0x4ac (0x1b8) */
+
+ struct drv_port_mb port_mb[PORT_MAX]; /* 0x664 (16*2=0x20) */
+- struct drv_func_mb func_mb[FUNC_MAX]; /* 0x684 (44*2=0x58) */
++ struct drv_func_mb func_mb[E1H_FUNC_MAX];
++
++ struct mf_cfg mf_cfg;
+
+ }; /* 0x6dc */
+
+
++struct emac_stats {
++ u32 rx_stat_ifhcinoctets;
++ u32 rx_stat_ifhcinbadoctets;
++ u32 rx_stat_etherstatsfragments;
++ u32 rx_stat_ifhcinucastpkts;
++ u32 rx_stat_ifhcinmulticastpkts;
++ u32 rx_stat_ifhcinbroadcastpkts;
++ u32 rx_stat_dot3statsfcserrors;
++ u32 rx_stat_dot3statsalignmenterrors;
++ u32 rx_stat_dot3statscarriersenseerrors;
++ u32 rx_stat_xonpauseframesreceived;
++ u32 rx_stat_xoffpauseframesreceived;
++ u32 rx_stat_maccontrolframesreceived;
++ u32 rx_stat_xoffstateentered;
++ u32 rx_stat_dot3statsframestoolong;
++ u32 rx_stat_etherstatsjabbers;
++ u32 rx_stat_etherstatsundersizepkts;
++ u32 rx_stat_etherstatspkts64octets;
++ u32 rx_stat_etherstatspkts65octetsto127octets;
++ u32 rx_stat_etherstatspkts128octetsto255octets;
++ u32 rx_stat_etherstatspkts256octetsto511octets;
++ u32 rx_stat_etherstatspkts512octetsto1023octets;
++ u32 rx_stat_etherstatspkts1024octetsto1522octets;
++ u32 rx_stat_etherstatspktsover1522octets;
++
++ u32 rx_stat_falsecarriererrors;
++
++ u32 tx_stat_ifhcoutoctets;
++ u32 tx_stat_ifhcoutbadoctets;
++ u32 tx_stat_etherstatscollisions;
++ u32 tx_stat_outxonsent;
++ u32 tx_stat_outxoffsent;
++ u32 tx_stat_flowcontroldone;
++ u32 tx_stat_dot3statssinglecollisionframes;
++ u32 tx_stat_dot3statsmultiplecollisionframes;
++ u32 tx_stat_dot3statsdeferredtransmissions;
++ u32 tx_stat_dot3statsexcessivecollisions;
++ u32 tx_stat_dot3statslatecollisions;
++ u32 tx_stat_ifhcoutucastpkts;
++ u32 tx_stat_ifhcoutmulticastpkts;
++ u32 tx_stat_ifhcoutbroadcastpkts;
++ u32 tx_stat_etherstatspkts64octets;
++ u32 tx_stat_etherstatspkts65octetsto127octets;
++ u32 tx_stat_etherstatspkts128octetsto255octets;
++ u32 tx_stat_etherstatspkts256octetsto511octets;
++ u32 tx_stat_etherstatspkts512octetsto1023octets;
++ u32 tx_stat_etherstatspkts1024octetsto1522octets;
++ u32 tx_stat_etherstatspktsover1522octets;
++ u32 tx_stat_dot3statsinternalmactransmiterrors;
++};
++
++
++struct bmac_stats {
++ u32 tx_stat_gtpkt_lo;
++ u32 tx_stat_gtpkt_hi;
++ u32 tx_stat_gtxpf_lo;
++ u32 tx_stat_gtxpf_hi;
++ u32 tx_stat_gtfcs_lo;
++ u32 tx_stat_gtfcs_hi;
++ u32 tx_stat_gtmca_lo;
++ u32 tx_stat_gtmca_hi;
++ u32 tx_stat_gtbca_lo;
++ u32 tx_stat_gtbca_hi;
++ u32 tx_stat_gtfrg_lo;
++ u32 tx_stat_gtfrg_hi;
++ u32 tx_stat_gtovr_lo;
++ u32 tx_stat_gtovr_hi;
++ u32 tx_stat_gt64_lo;
++ u32 tx_stat_gt64_hi;
++ u32 tx_stat_gt127_lo;
++ u32 tx_stat_gt127_hi;
++ u32 tx_stat_gt255_lo;
++ u32 tx_stat_gt255_hi;
++ u32 tx_stat_gt511_lo;
++ u32 tx_stat_gt511_hi;
++ u32 tx_stat_gt1023_lo;
++ u32 tx_stat_gt1023_hi;
++ u32 tx_stat_gt1518_lo;
++ u32 tx_stat_gt1518_hi;
++ u32 tx_stat_gt2047_lo;
++ u32 tx_stat_gt2047_hi;
++ u32 tx_stat_gt4095_lo;
++ u32 tx_stat_gt4095_hi;
++ u32 tx_stat_gt9216_lo;
++ u32 tx_stat_gt9216_hi;
++ u32 tx_stat_gt16383_lo;
++ u32 tx_stat_gt16383_hi;
++ u32 tx_stat_gtmax_lo;
++ u32 tx_stat_gtmax_hi;
++ u32 tx_stat_gtufl_lo;
++ u32 tx_stat_gtufl_hi;
++ u32 tx_stat_gterr_lo;
++ u32 tx_stat_gterr_hi;
++ u32 tx_stat_gtbyt_lo;
++ u32 tx_stat_gtbyt_hi;
++
++ u32 rx_stat_gr64_lo;
++ u32 rx_stat_gr64_hi;
++ u32 rx_stat_gr127_lo;
++ u32 rx_stat_gr127_hi;
++ u32 rx_stat_gr255_lo;
++ u32 rx_stat_gr255_hi;
++ u32 rx_stat_gr511_lo;
++ u32 rx_stat_gr511_hi;
++ u32 rx_stat_gr1023_lo;
++ u32 rx_stat_gr1023_hi;
++ u32 rx_stat_gr1518_lo;
++ u32 rx_stat_gr1518_hi;
++ u32 rx_stat_gr2047_lo;
++ u32 rx_stat_gr2047_hi;
++ u32 rx_stat_gr4095_lo;
++ u32 rx_stat_gr4095_hi;
++ u32 rx_stat_gr9216_lo;
++ u32 rx_stat_gr9216_hi;
++ u32 rx_stat_gr16383_lo;
++ u32 rx_stat_gr16383_hi;
++ u32 rx_stat_grmax_lo;
++ u32 rx_stat_grmax_hi;
++ u32 rx_stat_grpkt_lo;
++ u32 rx_stat_grpkt_hi;
++ u32 rx_stat_grfcs_lo;
++ u32 rx_stat_grfcs_hi;
++ u32 rx_stat_grmca_lo;
++ u32 rx_stat_grmca_hi;
++ u32 rx_stat_grbca_lo;
++ u32 rx_stat_grbca_hi;
++ u32 rx_stat_grxcf_lo;
++ u32 rx_stat_grxcf_hi;
++ u32 rx_stat_grxpf_lo;
++ u32 rx_stat_grxpf_hi;
++ u32 rx_stat_grxuo_lo;
++ u32 rx_stat_grxuo_hi;
++ u32 rx_stat_grjbr_lo;
++ u32 rx_stat_grjbr_hi;
++ u32 rx_stat_grovr_lo;
++ u32 rx_stat_grovr_hi;
++ u32 rx_stat_grflr_lo;
++ u32 rx_stat_grflr_hi;
++ u32 rx_stat_grmeg_lo;
++ u32 rx_stat_grmeg_hi;
++ u32 rx_stat_grmeb_lo;
++ u32 rx_stat_grmeb_hi;
++ u32 rx_stat_grbyt_lo;
++ u32 rx_stat_grbyt_hi;
++ u32 rx_stat_grund_lo;
++ u32 rx_stat_grund_hi;
++ u32 rx_stat_grfrg_lo;
++ u32 rx_stat_grfrg_hi;
++ u32 rx_stat_grerb_lo;
++ u32 rx_stat_grerb_hi;
++ u32 rx_stat_grfre_lo;
++ u32 rx_stat_grfre_hi;
++ u32 rx_stat_gripj_lo;
++ u32 rx_stat_gripj_hi;
++};
++
++
++union mac_stats {
++ struct emac_stats emac_stats;
++ struct bmac_stats bmac_stats;
++};
++
++
++struct mac_stx {
++ /* in_bad_octets */
++ u32 rx_stat_ifhcinbadoctets_hi;
++ u32 rx_stat_ifhcinbadoctets_lo;
++
++ /* out_bad_octets */
++ u32 tx_stat_ifhcoutbadoctets_hi;
++ u32 tx_stat_ifhcoutbadoctets_lo;
++
++ /* crc_receive_errors */
++ u32 rx_stat_dot3statsfcserrors_hi;
++ u32 rx_stat_dot3statsfcserrors_lo;
++ /* alignment_errors */
++ u32 rx_stat_dot3statsalignmenterrors_hi;
++ u32 rx_stat_dot3statsalignmenterrors_lo;
++ /* carrier_sense_errors */
++ u32 rx_stat_dot3statscarriersenseerrors_hi;
++ u32 rx_stat_dot3statscarriersenseerrors_lo;
++ /* false_carrier_detections */
++ u32 rx_stat_falsecarriererrors_hi;
++ u32 rx_stat_falsecarriererrors_lo;
++
++ /* runt_packets_received */
++ u32 rx_stat_etherstatsundersizepkts_hi;
++ u32 rx_stat_etherstatsundersizepkts_lo;
++ /* jabber_packets_received */
++ u32 rx_stat_dot3statsframestoolong_hi;
++ u32 rx_stat_dot3statsframestoolong_lo;
++
++ /* error_runt_packets_received */
++ u32 rx_stat_etherstatsfragments_hi;
++ u32 rx_stat_etherstatsfragments_lo;
++ /* error_jabber_packets_received */
++ u32 rx_stat_etherstatsjabbers_hi;
++ u32 rx_stat_etherstatsjabbers_lo;
++
++ /* control_frames_received */
++ u32 rx_stat_maccontrolframesreceived_hi;
++ u32 rx_stat_maccontrolframesreceived_lo;
++ u32 rx_stat_bmac_xpf_hi;
++ u32 rx_stat_bmac_xpf_lo;
++ u32 rx_stat_bmac_xcf_hi;
++ u32 rx_stat_bmac_xcf_lo;
++
++ /* xoff_state_entered */
++ u32 rx_stat_xoffstateentered_hi;
++ u32 rx_stat_xoffstateentered_lo;
++ /* pause_xon_frames_received */
++ u32 rx_stat_xonpauseframesreceived_hi;
++ u32 rx_stat_xonpauseframesreceived_lo;
++ /* pause_xoff_frames_received */
++ u32 rx_stat_xoffpauseframesreceived_hi;
++ u32 rx_stat_xoffpauseframesreceived_lo;
++ /* pause_xon_frames_transmitted */
++ u32 tx_stat_outxonsent_hi;
++ u32 tx_stat_outxonsent_lo;
++ /* pause_xoff_frames_transmitted */
++ u32 tx_stat_outxoffsent_hi;
++ u32 tx_stat_outxoffsent_lo;
++ /* flow_control_done */
++ u32 tx_stat_flowcontroldone_hi;
++ u32 tx_stat_flowcontroldone_lo;
++
++ /* ether_stats_collisions */
++ u32 tx_stat_etherstatscollisions_hi;
++ u32 tx_stat_etherstatscollisions_lo;
++ /* single_collision_transmit_frames */
++ u32 tx_stat_dot3statssinglecollisionframes_hi;
++ u32 tx_stat_dot3statssinglecollisionframes_lo;
++ /* multiple_collision_transmit_frames */
++ u32 tx_stat_dot3statsmultiplecollisionframes_hi;
++ u32 tx_stat_dot3statsmultiplecollisionframes_lo;
++ /* deferred_transmissions */
++ u32 tx_stat_dot3statsdeferredtransmissions_hi;
++ u32 tx_stat_dot3statsdeferredtransmissions_lo;
++ /* excessive_collision_frames */
++ u32 tx_stat_dot3statsexcessivecollisions_hi;
++ u32 tx_stat_dot3statsexcessivecollisions_lo;
++ /* late_collision_frames */
++ u32 tx_stat_dot3statslatecollisions_hi;
++ u32 tx_stat_dot3statslatecollisions_lo;
++
++ /* frames_transmitted_64_bytes */
++ u32 tx_stat_etherstatspkts64octets_hi;
++ u32 tx_stat_etherstatspkts64octets_lo;
++ /* frames_transmitted_65_127_bytes */
++ u32 tx_stat_etherstatspkts65octetsto127octets_hi;
++ u32 tx_stat_etherstatspkts65octetsto127octets_lo;
++ /* frames_transmitted_128_255_bytes */
++ u32 tx_stat_etherstatspkts128octetsto255octets_hi;
++ u32 tx_stat_etherstatspkts128octetsto255octets_lo;
++ /* frames_transmitted_256_511_bytes */
++ u32 tx_stat_etherstatspkts256octetsto511octets_hi;
++ u32 tx_stat_etherstatspkts256octetsto511octets_lo;
++ /* frames_transmitted_512_1023_bytes */
++ u32 tx_stat_etherstatspkts512octetsto1023octets_hi;
++ u32 tx_stat_etherstatspkts512octetsto1023octets_lo;
++ /* frames_transmitted_1024_1522_bytes */
++ u32 tx_stat_etherstatspkts1024octetsto1522octets_hi;
++ u32 tx_stat_etherstatspkts1024octetsto1522octets_lo;
++ /* frames_transmitted_1523_9022_bytes */
++ u32 tx_stat_etherstatspktsover1522octets_hi;
++ u32 tx_stat_etherstatspktsover1522octets_lo;
++ u32 tx_stat_bmac_2047_hi;
++ u32 tx_stat_bmac_2047_lo;
++ u32 tx_stat_bmac_4095_hi;
++ u32 tx_stat_bmac_4095_lo;
++ u32 tx_stat_bmac_9216_hi;
++ u32 tx_stat_bmac_9216_lo;
++ u32 tx_stat_bmac_16383_hi;
++ u32 tx_stat_bmac_16383_lo;
++
++ /* internal_mac_transmit_errors */
++ u32 tx_stat_dot3statsinternalmactransmiterrors_hi;
++ u32 tx_stat_dot3statsinternalmactransmiterrors_lo;
++
++ /* if_out_discards */
++ u32 tx_stat_bmac_ufl_hi;
++ u32 tx_stat_bmac_ufl_lo;
++};
++
++
++#define MAC_STX_IDX_MAX 2
++
++struct host_port_stats {
++ u32 host_port_stats_start;
++
++ struct mac_stx mac_stx[MAC_STX_IDX_MAX];
++
++ u32 brb_drop_hi;
++ u32 brb_drop_lo;
++
++ u32 host_port_stats_end;
++};
++
++
++struct host_func_stats {
++ u32 host_func_stats_start;
++
++ u32 total_bytes_received_hi;
++ u32 total_bytes_received_lo;
++
++ u32 total_bytes_transmitted_hi;
++ u32 total_bytes_transmitted_lo;
++
++ u32 total_unicast_packets_received_hi;
++ u32 total_unicast_packets_received_lo;
++
++ u32 total_multicast_packets_received_hi;
++ u32 total_multicast_packets_received_lo;
++
++ u32 total_broadcast_packets_received_hi;
++ u32 total_broadcast_packets_received_lo;
++
++ u32 total_unicast_packets_transmitted_hi;
++ u32 total_unicast_packets_transmitted_lo;
++
++ u32 total_multicast_packets_transmitted_hi;
++ u32 total_multicast_packets_transmitted_lo;
++
++ u32 total_broadcast_packets_transmitted_hi;
++ u32 total_broadcast_packets_transmitted_lo;
++
++ u32 valid_bytes_received_hi;
++ u32 valid_bytes_received_lo;
++
++ u32 host_func_stats_end;
++};
++
++
+ #define BCM_5710_FW_MAJOR_VERSION 4
+-#define BCM_5710_FW_MINOR_VERSION 0
+-#define BCM_5710_FW_REVISION_VERSION 14
++#define BCM_5710_FW_MINOR_VERSION 8
++#define BCM_5710_FW_REVISION_VERSION 53
++#define BCM_5710_FW_ENGINEERING_VERSION 0
+ #define BCM_5710_FW_COMPILE_FLAGS 1
+
+
+@@ -793,7 +1253,7 @@ struct doorbell_hdr {
+ };
+
+ /*
+- * doorbell message send to the chip
++ * doorbell message sent to the chip
+ */
+ struct doorbell {
+ #if defined(__BIG_ENDIAN)
+@@ -809,7 +1269,7 @@ struct doorbell {
+
+
+ /*
+- * IGU driver acknowlegement register
++ * IGU driver acknowledgement register
+ */
+ struct igu_ack_register {
+ #if defined(__BIG_ENDIAN)
+@@ -849,8 +1309,10 @@ struct parsing_flags {
+ u16 flags;
+ #define PARSING_FLAGS_ETHERNET_ADDRESS_TYPE (0x1<<0)
+ #define PARSING_FLAGS_ETHERNET_ADDRESS_TYPE_SHIFT 0
+-#define PARSING_FLAGS_NUMBER_OF_NESTED_VLANS (0x3<<1)
+-#define PARSING_FLAGS_NUMBER_OF_NESTED_VLANS_SHIFT 1
++#define PARSING_FLAGS_VLAN (0x1<<1)
++#define PARSING_FLAGS_VLAN_SHIFT 1
++#define PARSING_FLAGS_EXTRA_VLAN (0x1<<2)
++#define PARSING_FLAGS_EXTRA_VLAN_SHIFT 2
+ #define PARSING_FLAGS_OVER_ETHERNET_PROTOCOL (0x3<<3)
+ #define PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT 3
+ #define PARSING_FLAGS_IP_OPTIONS (0x1<<5)
+@@ -874,6 +1336,12 @@ struct parsing_flags {
+ };
+
+
++struct regpair {
++ u32 lo;
++ u32 hi;
++};
++
++
+ /*
+ * dmae command structure
+ */
+@@ -901,8 +1369,10 @@ struct dmae_command {
+ #define DMAE_COMMAND_SRC_RESET_SHIFT 13
+ #define DMAE_COMMAND_DST_RESET (0x1<<14)
+ #define DMAE_COMMAND_DST_RESET_SHIFT 14
+-#define DMAE_COMMAND_RESERVED0 (0x1FFFF<<15)
+-#define DMAE_COMMAND_RESERVED0_SHIFT 15
++#define DMAE_COMMAND_E1HVN (0x3<<15)
++#define DMAE_COMMAND_E1HVN_SHIFT 15
++#define DMAE_COMMAND_RESERVED0 (0x7FFF<<17)
++#define DMAE_COMMAND_RESERVED0_SHIFT 17
+ u32 src_addr_lo;
+ u32 src_addr_hi;
+ u32 dst_addr_lo;
+@@ -952,72 +1422,103 @@ struct double_regpair {
+
+
+ /*
+- * The eth Rx Buffer Descriptor
++ * The eth storm context of Ustorm (configuration part)
+ */
+-struct eth_rx_bd {
+- u32 addr_lo;
+- u32 addr_hi;
+-};
+-
+-/*
+- * The eth storm context of Ustorm
+- */
+-struct ustorm_eth_st_context {
++struct ustorm_eth_st_context_config {
+ #if defined(__BIG_ENDIAN)
+- u8 sb_index_number;
++ u8 flags;
++#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT (0x1<<0)
++#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT_SHIFT 0
++#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_DYNAMIC_HC (0x1<<1)
++#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_DYNAMIC_HC_SHIFT 1
++#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA (0x1<<2)
++#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA_SHIFT 2
++#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING (0x1<<3)
++#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING_SHIFT 3
++#define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0 (0xF<<4)
++#define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0_SHIFT 4
+ u8 status_block_id;
+- u8 __local_rx_bd_cons;
+- u8 __local_rx_bd_prod;
++ u8 clientId;
++ u8 sb_index_numbers;
++#define USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER (0xF<<0)
++#define USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER_SHIFT 0
++#define USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER (0xF<<4)
++#define USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER_SHIFT 4
+ #elif defined(__LITTLE_ENDIAN)
+- u8 __local_rx_bd_prod;
+- u8 __local_rx_bd_cons;
++ u8 sb_index_numbers;
++#define USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER (0xF<<0)
++#define USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER_SHIFT 0
++#define USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER (0xF<<4)
++#define USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER_SHIFT 4
++ u8 clientId;
+ u8 status_block_id;
+- u8 sb_index_number;
+-#endif
+-#if defined(__BIG_ENDIAN)
+- u16 rcq_cons;
+- u16 rx_bd_cons;
+-#elif defined(__LITTLE_ENDIAN)
+- u16 rx_bd_cons;
+- u16 rcq_cons;
++ u8 flags;
++#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT (0x1<<0)
++#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT_SHIFT 0
++#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_DYNAMIC_HC (0x1<<1)
++#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_DYNAMIC_HC_SHIFT 1
++#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA (0x1<<2)
++#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA_SHIFT 2
++#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING (0x1<<3)
++#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING_SHIFT 3
++#define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0 (0xF<<4)
++#define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0_SHIFT 4
+ #endif
+- u32 rx_bd_page_base_lo;
+- u32 rx_bd_page_base_hi;
+- u32 rcq_base_address_lo;
+- u32 rcq_base_address_hi;
+ #if defined(__BIG_ENDIAN)
+- u16 __num_of_returned_cqes;
+- u8 num_rss;
+- u8 flags;
+-#define USTORM_ETH_ST_CONTEXT_ENABLE_MC_ALIGNMENT (0x1<<0)
+-#define USTORM_ETH_ST_CONTEXT_ENABLE_MC_ALIGNMENT_SHIFT 0
+-#define USTORM_ETH_ST_CONTEXT_ENABLE_DYNAMIC_HC (0x1<<1)
+-#define USTORM_ETH_ST_CONTEXT_ENABLE_DYNAMIC_HC_SHIFT 1
+-#define USTORM_ETH_ST_CONTEXT_ENABLE_TPA (0x1<<2)
+-#define USTORM_ETH_ST_CONTEXT_ENABLE_TPA_SHIFT 2
+-#define __USTORM_ETH_ST_CONTEXT_RESERVED0 (0x1F<<3)
+-#define __USTORM_ETH_ST_CONTEXT_RESERVED0_SHIFT 3
++ u16 bd_buff_size;
++ u8 statistics_counter_id;
++ u8 mc_alignment_log_size;
+ #elif defined(__LITTLE_ENDIAN)
+- u8 flags;
+-#define USTORM_ETH_ST_CONTEXT_ENABLE_MC_ALIGNMENT (0x1<<0)
+-#define USTORM_ETH_ST_CONTEXT_ENABLE_MC_ALIGNMENT_SHIFT 0
+-#define USTORM_ETH_ST_CONTEXT_ENABLE_DYNAMIC_HC (0x1<<1)
+-#define USTORM_ETH_ST_CONTEXT_ENABLE_DYNAMIC_HC_SHIFT 1
+-#define USTORM_ETH_ST_CONTEXT_ENABLE_TPA (0x1<<2)
+-#define USTORM_ETH_ST_CONTEXT_ENABLE_TPA_SHIFT 2
+-#define __USTORM_ETH_ST_CONTEXT_RESERVED0 (0x1F<<3)
+-#define __USTORM_ETH_ST_CONTEXT_RESERVED0_SHIFT 3
+- u8 num_rss;
+- u16 __num_of_returned_cqes;
++ u8 mc_alignment_log_size;
++ u8 statistics_counter_id;
++ u16 bd_buff_size;
+ #endif
+ #if defined(__BIG_ENDIAN)
+- u16 mc_alignment_size;
+- u16 agg_threshold;
++ u8 __local_sge_prod;
++ u8 __local_bd_prod;
++ u16 sge_buff_size;
+ #elif defined(__LITTLE_ENDIAN)
+- u16 agg_threshold;
+- u16 mc_alignment_size;
++ u16 sge_buff_size;
++ u8 __local_bd_prod;
++ u8 __local_sge_prod;
+ #endif
++ u32 reserved;
++ u32 bd_page_base_lo;
++ u32 bd_page_base_hi;
++ u32 sge_page_base_lo;
++ u32 sge_page_base_hi;
++};
++
++/*
++ * The eth Rx Buffer Descriptor
++ */
++struct eth_rx_bd {
++ u32 addr_lo;
++ u32 addr_hi;
++};
++
++/*
++ * The eth Rx SGE Descriptor
++ */
++struct eth_rx_sge {
++ u32 addr_lo;
++ u32 addr_hi;
++};
++
++/*
++ * Local BDs and SGEs rings (in ETH)
++ */
++struct eth_local_rx_rings {
+ struct eth_rx_bd __local_bd_ring[16];
++ struct eth_rx_sge __local_sge_ring[12];
++};
++
++/*
++ * The eth storm context of Ustorm
++ */
++struct ustorm_eth_st_context {
++ struct ustorm_eth_st_context_config common;
++ struct eth_local_rx_rings __rings;
+ };
+
+ /*
+@@ -1088,9 +1589,9 @@ struct xstorm_eth_extra_ag_context_section {
+ #if defined(__BIG_ENDIAN)
+ u16 __reserved3;
+ u8 __reserved2;
+- u8 __agg_misc7;
++ u8 __da_only_cnt;
+ #elif defined(__LITTLE_ENDIAN)
+- u8 __agg_misc7;
++ u8 __da_only_cnt;
+ u8 __reserved2;
+ u16 __reserved3;
+ #endif
+@@ -1368,11 +1869,17 @@ struct timers_block_context {
+ u32 __reserved_0;
+ u32 __reserved_1;
+ u32 __reserved_2;
+- u32 __reserved_flags;
++ u32 flags;
++#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS (0x3<<0)
++#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS_SHIFT 0
++#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG (0x1<<2)
++#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG_SHIFT 2
++#define __TIMERS_BLOCK_CONTEXT_RESERVED0 (0x1FFFFFFF<<3)
++#define __TIMERS_BLOCK_CONTEXT_RESERVED0_SHIFT 3
+ };
+
+ /*
+- * structure for easy accessability to assembler
++ * structure for easy accessibility to assembler
+ */
+ struct eth_tx_bd_flags {
+ u8 as_bitfield;
+@@ -1478,11 +1985,19 @@ struct xstorm_eth_st_context {
+ u32 tx_bd_page_base_hi;
+ #if defined(__BIG_ENDIAN)
+ u16 tx_bd_cons;
+- u8 __reserved0;
++ u8 statistics_data;
++#define XSTORM_ETH_ST_CONTEXT_STATISTICS_COUNTER_ID (0x7F<<0)
++#define XSTORM_ETH_ST_CONTEXT_STATISTICS_COUNTER_ID_SHIFT 0
++#define XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE (0x1<<7)
++#define XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE_SHIFT 7
+ u8 __local_tx_bd_prod;
+ #elif defined(__LITTLE_ENDIAN)
+ u8 __local_tx_bd_prod;
+- u8 __reserved0;
++ u8 statistics_data;
++#define XSTORM_ETH_ST_CONTEXT_STATISTICS_COUNTER_ID (0x7F<<0)
++#define XSTORM_ETH_ST_CONTEXT_STATISTICS_COUNTER_ID_SHIFT 0
++#define XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE (0x1<<7)
++#define XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE_SHIFT 7
+ u16 tx_bd_cons;
+ #endif
+ u32 db_data_addr_lo;
+@@ -1526,7 +2041,7 @@ struct eth_context {
+
+
+ /*
+- * ethernet doorbell
++ * Ethernet doorbell
+ */
+ struct eth_tx_doorbell {
+ #if defined(__BIG_ENDIAN)
+@@ -1559,7 +2074,7 @@ struct eth_tx_doorbell {
+ struct ustorm_def_status_block {
+ u16 index_values[HC_USTORM_DEF_SB_NUM_INDICES];
+ u16 status_block_index;
+- u8 reserved0;
++ u8 func;
+ u8 status_block_id;
+ u32 __flags;
+ };
+@@ -1570,7 +2085,7 @@ struct ustorm_def_status_block {
+ struct cstorm_def_status_block {
+ u16 index_values[HC_CSTORM_DEF_SB_NUM_INDICES];
+ u16 status_block_index;
+- u8 reserved0;
++ u8 func;
+ u8 status_block_id;
+ u32 __flags;
+ };
+@@ -1581,7 +2096,7 @@ struct cstorm_def_status_block {
+ struct xstorm_def_status_block {
+ u16 index_values[HC_XSTORM_DEF_SB_NUM_INDICES];
+ u16 status_block_index;
+- u8 reserved0;
++ u8 func;
+ u8 status_block_id;
+ u32 __flags;
+ };
+@@ -1592,7 +2107,7 @@ struct xstorm_def_status_block {
+ struct tstorm_def_status_block {
+ u16 index_values[HC_TSTORM_DEF_SB_NUM_INDICES];
+ u16 status_block_index;
+- u8 reserved0;
++ u8 func;
+ u8 status_block_id;
+ u32 __flags;
+ };
+@@ -1615,7 +2130,7 @@ struct host_def_status_block {
+ struct ustorm_status_block {
+ u16 index_values[HC_USTORM_SB_NUM_INDICES];
+ u16 status_block_index;
+- u8 reserved0;
++ u8 func;
+ u8 status_block_id;
+ u32 __flags;
+ };
+@@ -1626,7 +2141,7 @@ struct ustorm_status_block {
+ struct cstorm_status_block {
+ u16 index_values[HC_CSTORM_SB_NUM_INDICES];
+ u16 status_block_index;
+- u8 reserved0;
++ u8 func;
+ u8 status_block_id;
+ u32 __flags;
+ };
+@@ -1644,9 +2159,9 @@ struct host_status_block {
+ * The data for RSS setup ramrod
+ */
+ struct eth_client_setup_ramrod_data {
+- u32 client_id_5b;
+- u8 is_rdma_1b;
+- u8 reserved0;
++ u32 client_id;
++ u8 is_rdma;
++ u8 is_fcoe;
+ u16 reserved1;
+ };
+
+@@ -1664,20 +2179,21 @@ struct eth_dynamic_hc_config {
+ * regular eth FP CQE parameters struct
+ */
+ struct eth_fast_path_rx_cqe {
+- u8 type;
+- u8 error_type_flags;
+-#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG (0x1<<0)
+-#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG_SHIFT 0
+-#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG (0x1<<1)
+-#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG_SHIFT 1
+-#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG (0x1<<2)
+-#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG_SHIFT 2
+-#define ETH_FAST_PATH_RX_CQE_START_FLG (0x1<<3)
+-#define ETH_FAST_PATH_RX_CQE_START_FLG_SHIFT 3
+-#define ETH_FAST_PATH_RX_CQE_END_FLG (0x1<<4)
+-#define ETH_FAST_PATH_RX_CQE_END_FLG_SHIFT 4
+-#define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x7<<5)
+-#define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 5
++ u8 type_error_flags;
++#define ETH_FAST_PATH_RX_CQE_TYPE (0x1<<0)
++#define ETH_FAST_PATH_RX_CQE_TYPE_SHIFT 0
++#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG (0x1<<1)
++#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG_SHIFT 1
++#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG (0x1<<2)
++#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG_SHIFT 2
++#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG (0x1<<3)
++#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG_SHIFT 3
++#define ETH_FAST_PATH_RX_CQE_START_FLG (0x1<<4)
++#define ETH_FAST_PATH_RX_CQE_START_FLG_SHIFT 4
++#define ETH_FAST_PATH_RX_CQE_END_FLG (0x1<<5)
++#define ETH_FAST_PATH_RX_CQE_END_FLG_SHIFT 5
++#define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x3<<6)
++#define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 6
+ u8 status_flags;
+ #define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7<<0)
+ #define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0
+@@ -1692,11 +2208,13 @@ struct eth_fast_path_rx_cqe {
+ #define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG (0x1<<7)
+ #define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG_SHIFT 7
+ u8 placement_offset;
++ u8 queue_index;
+ u32 rss_hash_result;
+ u16 vlan_tag;
+ u16 pkt_len;
+- u16 queue_index;
++ u16 len_on_bd;
+ struct parsing_flags pars_flags;
++ u16 sgl[8];
+ };
+
+
+@@ -1704,12 +2222,29 @@ struct eth_fast_path_rx_cqe {
+ * The data for RSS setup ramrod
+ */
+ struct eth_halt_ramrod_data {
+- u32 client_id_5b;
++ u32 client_id;
+ u32 reserved0;
+ };
+
+
+ /*
++ * The data for statistics query ramrod
++ */
++struct eth_query_ramrod_data {
++#if defined(__BIG_ENDIAN)
++ u8 reserved0;
++ u8 collect_port;
++ u16 drv_counter;
++#elif defined(__LITTLE_ENDIAN)
++ u16 drv_counter;
++ u8 collect_port;
++ u8 reserved0;
++#endif
++ u32 ctr_id_vector;
++};
++
++
++/*
+ * Place holder for ramrods protocol specific data
+ */
+ struct ramrod_data {
+@@ -1718,7 +2253,7 @@ struct ramrod_data {
+ };
+
+ /*
+- * union for ramrod data for ethernet protocol (CQE) (force size of 16 bits)
++ * union for ramrod data for Ethernet protocol (CQE) (force size of 16 bits)
+ */
+ union eth_ramrod_data {
+ struct ramrod_data general;
+@@ -1739,15 +2274,20 @@ struct eth_rx_bd_next_page {
+ * Eth Rx Cqe structure- general structure for ramrods
+ */
+ struct common_ramrod_eth_rx_cqe {
+- u8 type;
+- u8 conn_type_3b;
+- u16 reserved;
++ u8 ramrod_type;
++#define COMMON_RAMROD_ETH_RX_CQE_TYPE (0x1<<0)
++#define COMMON_RAMROD_ETH_RX_CQE_TYPE_SHIFT 0
++#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0 (0x7F<<1)
++#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0_SHIFT 1
++ u8 conn_type;
++ u16 reserved1;
+ u32 conn_and_cmd_data;
+ #define COMMON_RAMROD_ETH_RX_CQE_CID (0xFFFFFF<<0)
+ #define COMMON_RAMROD_ETH_RX_CQE_CID_SHIFT 0
+ #define COMMON_RAMROD_ETH_RX_CQE_CMD_ID (0xFF<<24)
+ #define COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT 24
+ struct ramrod_data protocol_data;
++ u32 reserved2[4];
+ };
+
+ /*
+@@ -1756,8 +2296,7 @@ struct common_ramrod_eth_rx_cqe {
+ struct eth_rx_cqe_next_page {
+ u32 addr_lo;
+ u32 addr_hi;
+- u32 reserved0;
+- u32 reserved1;
++ u32 reserved[6];
+ };
+
+ /*
+@@ -1787,13 +2326,8 @@ struct spe_hdr {
+ u16 reserved;
+ };
+
+-struct regpair {
+- u32 lo;
+- u32 hi;
+-};
+-
+ /*
+- * ethernet slow path element
++ * Ethernet slow path element
+ */
+ union eth_specific_data {
+ u8 protocol_data[8];
+@@ -1802,10 +2336,11 @@ union eth_specific_data {
+ struct eth_halt_ramrod_data halt_ramrod_data;
+ struct regpair leading_cqe_addr;
+ struct regpair update_data_addr;
++ struct eth_query_ramrod_data query_ramrod_data;
+ };
+
+ /*
+- * ethernet slow path element
++ * Ethernet slow path element
+ */
+ struct eth_spe {
+ struct spe_hdr hdr;
+@@ -1824,10 +2359,13 @@ struct eth_tx_db_data {
+
+
+ /*
+- * Common configuration parameters per port in Tstorm
++ * Common configuration parameters per function in Tstorm
+ */
+ struct tstorm_eth_function_common_config {
+- u32 config_flags;
++#if defined(__BIG_ENDIAN)
++ u8 leading_client_id;
++ u8 rss_result_mask;
++ u16 config_flags;
+ #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0)
+ #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY_SHIFT 0
+ #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY (0x1<<1)
+@@ -1836,21 +2374,40 @@ struct tstorm_eth_function_common_config {
+ #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY_SHIFT 2
+ #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY (0x1<<3)
+ #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3
+-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_ENABLE (0x1<<4)
+-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_ENABLE_SHIFT 4
+-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_DEFAULT_ENABLE (0x1<<5)
+-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_DEFAULT_ENABLE_SHIFT 5
+-#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x3FFFFFF<<6)
+-#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 6
+-#if defined(__BIG_ENDIAN)
+- u16 __secondary_vlan_id;
+- u8 leading_client_id;
+- u8 rss_result_mask;
++#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4)
++#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4
++#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_DEFAULT_ENABLE (0x1<<7)
++#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_DEFAULT_ENABLE_SHIFT 7
++#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_IN_CAM (0x1<<8)
++#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_IN_CAM_SHIFT 8
++#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM (0x1<<9)
++#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM_SHIFT 9
++#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x3F<<10)
++#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 10
+ #elif defined(__LITTLE_ENDIAN)
++ u16 config_flags;
++#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0)
++#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY_SHIFT 0
++#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY (0x1<<1)
++#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY_SHIFT 1
++#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY (0x1<<2)
++#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY_SHIFT 2
++#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY (0x1<<3)
++#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3
++#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4)
++#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4
++#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_DEFAULT_ENABLE (0x1<<7)
++#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_DEFAULT_ENABLE_SHIFT 7
++#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_IN_CAM (0x1<<8)
++#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_IN_CAM_SHIFT 8
++#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM (0x1<<9)
++#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM_SHIFT 9
++#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x3F<<10)
++#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 10
+ u8 rss_result_mask;
+ u8 leading_client_id;
+- u16 __secondary_vlan_id;
+ #endif
++ u16 vlan_id[2];
+ };
+
+ /*
+@@ -1866,9 +2423,9 @@ struct eth_update_ramrod_data {
+ * MAC filtering configuration command header
+ */
+ struct mac_configuration_hdr {
+- u8 length_6b;
++ u8 length;
+ u8 offset;
+- u16 reserved0;
++ u16 client_id;
+ u32 reserved1;
+ };
+
+@@ -1925,15 +2482,55 @@ struct mac_configuration_cmd {
+
+
+ /*
++ * MAC address in list for ramrod
++ */
++struct mac_configuration_entry_e1h {
++ u16 lsb_mac_addr;
++ u16 middle_mac_addr;
++ u16 msb_mac_addr;
++ u16 vlan_id;
++ u16 e1hov_id;
++ u8 client_id;
++ u8 flags;
++#define MAC_CONFIGURATION_ENTRY_E1H_PORT (0x1<<0)
++#define MAC_CONFIGURATION_ENTRY_E1H_PORT_SHIFT 0
++#define MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE (0x1<<1)
++#define MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE_SHIFT 1
++#define MAC_CONFIGURATION_ENTRY_E1H_RDMA_MAC (0x1<<2)
++#define MAC_CONFIGURATION_ENTRY_E1H_RDMA_MAC_SHIFT 2
++#define MAC_CONFIGURATION_ENTRY_E1H_RESERVED0 (0x1F<<3)
++#define MAC_CONFIGURATION_ENTRY_E1H_RESERVED0_SHIFT 3
++};
++
++/*
++ * MAC filtering configuration command
++ */
++struct mac_configuration_cmd_e1h {
++ struct mac_configuration_hdr hdr;
++ struct mac_configuration_entry_e1h config_table[32];
++};
++
++
++/*
++ * approximate-match multicast filtering for E1H per function in Tstorm
++ */
++struct tstorm_eth_approximate_match_multicast_filtering {
++ u32 mcast_add_hash_bit_array[8];
++};
++
++
++/*
+ * Configuration parameters per client in Tstorm
+ */
+ struct tstorm_eth_client_config {
+ #if defined(__BIG_ENDIAN)
+- u16 statistics_counter_id;
++ u8 max_sges_for_packet;
++ u8 statistics_counter_id;
+ u16 mtu;
+ #elif defined(__LITTLE_ENDIAN)
+ u16 mtu;
+- u16 statistics_counter_id;
++ u8 statistics_counter_id;
++ u8 max_sges_for_packet;
+ #endif
+ #if defined(__BIG_ENDIAN)
+ u16 drop_flags;
+@@ -1941,42 +2538,46 @@ struct tstorm_eth_client_config {
+ #define TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR_SHIFT 0
+ #define TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR (0x1<<1)
+ #define TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR_SHIFT 1
+-#define TSTORM_ETH_CLIENT_CONFIG_DROP_MAC_ERR (0x1<<2)
+-#define TSTORM_ETH_CLIENT_CONFIG_DROP_MAC_ERR_SHIFT 2
+-#define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0 (0x1<<3)
+-#define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0_SHIFT 3
+-#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR (0x1<<4)
+-#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR_SHIFT 4
+-#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1 (0x7FF<<5)
+-#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1_SHIFT 5
++#define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0 (0x1<<2)
++#define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0_SHIFT 2
++#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR (0x1<<3)
++#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR_SHIFT 3
++#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1 (0xFFF<<4)
++#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1_SHIFT 4
+ u16 config_flags;
+-#define TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE (0x1<<0)
+-#define TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE_SHIFT 0
+-#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE (0x1<<1)
+-#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE_SHIFT 1
+-#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED0 (0x3FFF<<2)
+-#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED0_SHIFT 2
++#define TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE (0x1<<0)
++#define TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE_SHIFT 0
++#define TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE (0x1<<1)
++#define TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE_SHIFT 1
++#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE (0x1<<2)
++#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE_SHIFT 2
++#define TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING (0x1<<3)
++#define TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING_SHIFT 3
++#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED0 (0xFFF<<4)
++#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED0_SHIFT 4
+ #elif defined(__LITTLE_ENDIAN)
+ u16 config_flags;
+-#define TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE (0x1<<0)
+-#define TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE_SHIFT 0
+-#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE (0x1<<1)
+-#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE_SHIFT 1
+-#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED0 (0x3FFF<<2)
+-#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED0_SHIFT 2
++#define TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE (0x1<<0)
++#define TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE_SHIFT 0
++#define TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE (0x1<<1)
++#define TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE_SHIFT 1
++#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE (0x1<<2)
++#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE_SHIFT 2
++#define TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING (0x1<<3)
++#define TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING_SHIFT 3
++#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED0 (0xFFF<<4)
++#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED0_SHIFT 4
+ u16 drop_flags;
+ #define TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR (0x1<<0)
+ #define TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR_SHIFT 0
+ #define TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR (0x1<<1)
+ #define TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR_SHIFT 1
+-#define TSTORM_ETH_CLIENT_CONFIG_DROP_MAC_ERR (0x1<<2)
+-#define TSTORM_ETH_CLIENT_CONFIG_DROP_MAC_ERR_SHIFT 2
+-#define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0 (0x1<<3)
+-#define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0_SHIFT 3
+-#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR (0x1<<4)
+-#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR_SHIFT 4
+-#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1 (0x7FF<<5)
+-#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1_SHIFT 5
++#define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0 (0x1<<2)
++#define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0_SHIFT 2
++#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR (0x1<<3)
++#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR_SHIFT 3
++#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1 (0xFFF<<4)
++#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1_SHIFT 4
+ #endif
+ };
+
+@@ -1992,103 +2593,119 @@ struct tstorm_eth_mac_filter_config {
+ u32 bcast_drop_all;
+ u32 bcast_accept_all;
+ u32 strict_vlan;
+- u32 __secondary_vlan_clients;
++ u32 vlan_filter[2];
++ u32 reserved;
+ };
+
+
+-struct rate_shaping_per_protocol {
++/*
++ * common flag to indicate existance of TPA.
++ */
++struct tstorm_eth_tpa_exist {
+ #if defined(__BIG_ENDIAN)
+- u16 reserved0;
+- u16 protocol_rate;
++ u16 reserved1;
++ u8 reserved0;
++ u8 tpa_exist;
+ #elif defined(__LITTLE_ENDIAN)
+- u16 protocol_rate;
+- u16 reserved0;
++ u8 tpa_exist;
++ u8 reserved0;
++ u16 reserved1;
+ #endif
+- u32 protocol_quota;
+- s32 current_credit;
+- u32 reserved;
++ u32 reserved2;
+ };
+
+-struct rate_shaping_vars {
+- struct rate_shaping_per_protocol protocol_vars[NUM_OF_PROTOCOLS];
+- u32 pause_mask;
+- u32 periodic_stop;
+- u32 rs_periodic_timeout;
+- u32 rs_threshold;
+- u32 last_periodic_time;
+- u32 reserved;
+-};
+
+-struct fairness_per_protocol {
+- u32 credit_delta;
+- s32 fair_credit;
++/*
++ * Three RX producers for ETH
++ */
++struct ustorm_eth_rx_producers {
+ #if defined(__BIG_ENDIAN)
+- u16 reserved0;
+- u8 state;
+- u8 weight;
++ u16 bd_prod;
++ u16 cqe_prod;
+ #elif defined(__LITTLE_ENDIAN)
+- u8 weight;
+- u8 state;
+- u16 reserved0;
++ u16 cqe_prod;
++ u16 bd_prod;
+ #endif
+- u32 reserved1;
+-};
+-
+-struct fairness_vars {
+- struct fairness_per_protocol protocol_vars[NUM_OF_PROTOCOLS];
+- u32 upper_bound;
+- u32 port_rate;
+- u32 pause_mask;
+- u32 fair_threshold;
+-};
+-
+-struct safc_struct {
+- u32 cur_pause_mask;
+- u32 expire_time;
+ #if defined(__BIG_ENDIAN)
+- u16 reserved0;
+- u8 cur_cos_types;
+- u8 safc_timeout_usec;
++ u16 reserved;
++ u16 sge_prod;
+ #elif defined(__LITTLE_ENDIAN)
+- u8 safc_timeout_usec;
+- u8 cur_cos_types;
+- u16 reserved0;
++ u16 sge_prod;
++ u16 reserved;
+ #endif
+- u32 reserved1;
+ };
+
+-struct demo_struct {
++
++/*
++ * per-port SAFC demo variables
++ */
++struct cmng_flags_per_port {
+ u8 con_number[NUM_OF_PROTOCOLS];
+ #if defined(__BIG_ENDIAN)
+- u8 reserved1;
+ u8 fairness_enable;
+ u8 rate_shaping_enable;
+- u8 cmng_enable;
++ u8 cmng_protocol_enable;
++ u8 cmng_vn_enable;
+ #elif defined(__LITTLE_ENDIAN)
+- u8 cmng_enable;
++ u8 cmng_vn_enable;
++ u8 cmng_protocol_enable;
+ u8 rate_shaping_enable;
+ u8 fairness_enable;
+- u8 reserved1;
+ #endif
+ };
+
+-struct cmng_struct {
+- struct rate_shaping_vars rs_vars;
+- struct fairness_vars fair_vars;
+- struct safc_struct safc_vars;
+- struct demo_struct demo_vars;
++
++/*
++ * per-port rate shaping variables
++ */
++struct rate_shaping_vars_per_port {
++ u32 rs_periodic_timeout;
++ u32 rs_threshold;
+ };
+
+
+-struct cos_to_protocol {
+- u8 mask[MAX_COS_NUMBER];
++/*
++ * per-port fairness variables
++ */
++struct fairness_vars_per_port {
++ u32 upper_bound;
++ u32 fair_threshold;
++ u32 fairness_timeout;
+ };
+
+
+ /*
+- * Common statistics collected by the Xstorm (per port)
++ * per-port SAFC variables
+ */
+-struct xstorm_common_stats {
++struct safc_struct_per_port {
++#if defined(__BIG_ENDIAN)
++ u16 __reserved1;
++ u8 __reserved0;
++ u8 safc_timeout_usec;
++#elif defined(__LITTLE_ENDIAN)
++ u8 safc_timeout_usec;
++ u8 __reserved0;
++ u16 __reserved1;
++#endif
++ u16 cos_to_pause_mask[NUM_OF_SAFC_BITS];
++};
++
++
++/*
++ * Per-port congestion management variables
++ */
++struct cmng_struct_per_port {
++ struct rate_shaping_vars_per_port rs_vars;
++ struct fairness_vars_per_port fair_vars;
++ struct safc_struct_per_port safc_vars;
++ struct cmng_flags_per_port flags;
++};
++
++
++/*
++ * Protocol-common statistics collected by the Xstorm (per client)
++ */
++struct xstorm_per_client_stats {
+ struct regpair total_sent_bytes;
+ u32 total_sent_pkts;
+ u32 unicast_pkts_sent;
+@@ -2097,9 +2714,31 @@ struct xstorm_common_stats {
+ u32 multicast_pkts_sent;
+ u32 broadcast_pkts_sent;
+ struct regpair broadcast_bytes_sent;
+- struct regpair done;
++ u16 stats_counter;
++ u16 reserved0;
++ u32 reserved1;
+ };
+
++
++/*
++ * Common statistics collected by the Xstorm (per port)
++ */
++struct xstorm_common_stats {
++ struct xstorm_per_client_stats client_statistics[MAX_X_STAT_COUNTER_ID];
++};
++
++
++/*
++ * Protocol-common statistics collected by the Tstorm (per port)
++ */
++struct tstorm_per_port_stats {
++ u32 mac_filter_discard;
++ u32 xxoverflow_discard;
++ u32 brb_truncate_discard;
++ u32 mac_discard;
++};
++
++
+ /*
+ * Protocol-common statistics collected by the Tstorm (per client)
+ */
+@@ -2117,24 +2756,21 @@ struct tstorm_per_client_stats {
+ u32 rcv_multicast_pkts;
+ u32 no_buff_discard;
+ u32 ttl0_discard;
+- u32 mac_discard;
+- u32 reserved;
++ u16 stats_counter;
++ u16 reserved0;
++ u32 reserved1;
+ };
+
+ /*
+- * Protocol-common statistics collected by the Tstorm (per port)
++ * Protocol-common statistics collected by the Tstorm
+ */
+ struct tstorm_common_stats {
+- struct tstorm_per_client_stats client_statistics[MAX_T_STAT_COUNTER_ID];
+- u32 mac_filter_discard;
+- u32 xxoverflow_discard;
+- u32 brb_truncate_discard;
+- u32 reserved;
+- struct regpair done;
++ struct tstorm_per_port_stats port_statistics;
++ struct tstorm_per_client_stats client_statistics[MAX_T_STAT_COUNTER_ID];
+ };
+
+ /*
+- * Eth statistics query sturcture for the eth_stats_quesry ramrod
++ * Eth statistics query structure for the eth_stats_query ramrod
+ */
+ struct eth_stats_query {
+ struct xstorm_common_stats xstorm_common;
+@@ -2143,25 +2779,39 @@ struct eth_stats_query {
+
+
+ /*
++ * per-vnic fairness variables
++ */
++struct fairness_vars_per_vn {
++ u32 protocol_credit_delta[NUM_OF_PROTOCOLS];
++ u32 vn_credit_delta;
++ u32 __reserved0;
++};
++
++
++/*
+ * FW version stored in the Xstorm RAM
+ */
+ struct fw_version {
+ #if defined(__BIG_ENDIAN)
+- u16 patch;
+- u8 primary;
+- u8 client;
++ u8 engineering;
++ u8 revision;
++ u8 minor;
++ u8 major;
+ #elif defined(__LITTLE_ENDIAN)
+- u8 client;
+- u8 primary;
+- u16 patch;
++ u8 major;
++ u8 minor;
++ u8 revision;
++ u8 engineering;
+ #endif
+ u32 flags;
+ #define FW_VERSION_OPTIMIZED (0x1<<0)
+ #define FW_VERSION_OPTIMIZED_SHIFT 0
+ #define FW_VERSION_BIG_ENDIEN (0x1<<1)
+ #define FW_VERSION_BIG_ENDIEN_SHIFT 1
+-#define __FW_VERSION_RESERVED (0x3FFFFFFF<<2)
+-#define __FW_VERSION_RESERVED_SHIFT 2
++#define FW_VERSION_CHIP_VERSION (0x3<<2)
++#define FW_VERSION_CHIP_VERSION_SHIFT 2
++#define __FW_VERSION_RESERVED (0xFFFFFFF<<4)
++#define __FW_VERSION_RESERVED_SHIFT 4
+ };
+
+
+@@ -2169,15 +2819,10 @@ struct fw_version {
+ * FW version stored in first line of pram
+ */
+ struct pram_fw_version {
+-#if defined(__BIG_ENDIAN)
+- u16 patch;
+- u8 primary;
+- u8 client;
+-#elif defined(__LITTLE_ENDIAN)
+- u8 client;
+- u8 primary;
+- u16 patch;
+-#endif
++ u8 major;
++ u8 minor;
++ u8 revision;
++ u8 engineering;
+ u8 flags;
+ #define PRAM_FW_VERSION_OPTIMIZED (0x1<<0)
+ #define PRAM_FW_VERSION_OPTIMIZED_SHIFT 0
+@@ -2185,8 +2830,34 @@ struct pram_fw_version {
+ #define PRAM_FW_VERSION_STORM_ID_SHIFT 1
+ #define PRAM_FW_VERSION_BIG_ENDIEN (0x1<<3)
+ #define PRAM_FW_VERSION_BIG_ENDIEN_SHIFT 3
+-#define __PRAM_FW_VERSION_RESERVED0 (0xF<<4)
+-#define __PRAM_FW_VERSION_RESERVED0_SHIFT 4
++#define PRAM_FW_VERSION_CHIP_VERSION (0x3<<4)
++#define PRAM_FW_VERSION_CHIP_VERSION_SHIFT 4
++#define __PRAM_FW_VERSION_RESERVED0 (0x3<<6)
++#define __PRAM_FW_VERSION_RESERVED0_SHIFT 6
++};
++
++
++/*
++ * a single rate shaping counter. can be used as protocol or vnic counter
++ */
++struct rate_shaping_counter {
++ u32 quota;
++#if defined(__BIG_ENDIAN)
++ u16 __reserved0;
++ u16 rate;
++#elif defined(__LITTLE_ENDIAN)
++ u16 rate;
++ u16 __reserved0;
++#endif
++};
++
++
++/*
++ * per-vnic rate shaping variables
++ */
++struct rate_shaping_vars_per_vn {
++ struct rate_shaping_counter protocol_counters[NUM_OF_PROTOCOLS];
++ struct rate_shaping_counter vn_counter;
+ };
+
+
+diff --git a/drivers/net/bnx2x_init.h b/drivers/net/bnx2x_init.h
+index 370686e..961db49 100644
+--- a/drivers/net/bnx2x_init.h
++++ b/drivers/net/bnx2x_init.h
+@@ -22,7 +22,8 @@
+ #define INIT_ASIC 0x4
+ #define INIT_HARDWARE 0x7
+
+-#define STORM_INTMEM_SIZE (0x5800 / 4)
++#define STORM_INTMEM_SIZE_E1 (0x5800 / 4)
++#define STORM_INTMEM_SIZE_E1H (0x10000 / 4)
+ #define TSTORM_INTMEM_ADDR 0x1a0000
+ #define CSTORM_INTMEM_ADDR 0x220000
+ #define XSTORM_INTMEM_ADDR 0x2a0000
+@@ -30,7 +31,7 @@
+
+
+ /* Init operation types and structures */
+-
++/* Common for both E1 and E1H */
+ #define OP_RD 0x1 /* read single register */
+ #define OP_WR 0x2 /* write single register */
+ #define OP_IW 0x3 /* write single register using mailbox */
+@@ -38,29 +39,59 @@
+ #define OP_SI 0x5 /* copy a string using mailbox */
+ #define OP_ZR 0x6 /* clear memory */
+ #define OP_ZP 0x7 /* unzip then copy with DMAE */
+-#define OP_WB 0x8 /* copy a string using DMAE */
++#define OP_WR_64 0x8 /* write 64 bit pattern */
++#define OP_WB 0x9 /* copy a string using DMAE */
++
++/* Operation specific for E1 */
++#define OP_RD_E1 0xa /* read single register */
++#define OP_WR_E1 0xb /* write single register */
++#define OP_IW_E1 0xc /* write single register using mailbox */
++#define OP_SW_E1 0xd /* copy a string to the device */
++#define OP_SI_E1 0xe /* copy a string using mailbox */
++#define OP_ZR_E1 0xf /* clear memory */
++#define OP_ZP_E1 0x10 /* unzip then copy with DMAE */
++#define OP_WR_64_E1 0x11 /* write 64 bit pattern on E1 */
++#define OP_WB_E1 0x12 /* copy a string using DMAE */
++
++/* Operation specific for E1H */
++#define OP_RD_E1H 0x13 /* read single register */
++#define OP_WR_E1H 0x14 /* write single register */
++#define OP_IW_E1H 0x15 /* write single register using mailbox */
++#define OP_SW_E1H 0x16 /* copy a string to the device */
++#define OP_SI_E1H 0x17 /* copy a string using mailbox */
++#define OP_ZR_E1H 0x18 /* clear memory */
++#define OP_ZP_E1H 0x19 /* unzip then copy with DMAE */
++#define OP_WR_64_E1H 0x1a /* write 64 bit pattern on E1H */
++#define OP_WB_E1H 0x1b /* copy a string using DMAE */
++
++/* FPGA and EMUL specific operations */
++#define OP_WR_EMUL_E1H 0x1c /* write single register on E1H Emul */
++#define OP_WR_EMUL 0x1d /* write single register on Emulation */
++#define OP_WR_FPGA 0x1e /* write single register on FPGA */
++#define OP_WR_ASIC 0x1f /* write single register on ASIC */
++
+
+ struct raw_op {
+- u32 op :8;
+- u32 offset :24;
++ u32 op:8;
++ u32 offset:24;
+ u32 raw_data;
+ };
+
+ struct op_read {
+- u32 op :8;
+- u32 offset :24;
++ u32 op:8;
++ u32 offset:24;
+ u32 pad;
+ };
+
+ struct op_write {
+- u32 op :8;
+- u32 offset :24;
++ u32 op:8;
++ u32 offset:24;
+ u32 val;
+ };
+
+ struct op_string_write {
+- u32 op :8;
+- u32 offset :24;
++ u32 op:8;
++ u32 offset:24;
+ #ifdef __LITTLE_ENDIAN
+ u16 data_off;
+ u16 data_len;
+@@ -71,8 +102,8 @@ struct op_string_write {
+ };
+
+ struct op_zero {
+- u32 op :8;
+- u32 offset :24;
++ u32 op:8;
++ u32 offset:24;
+ u32 len;
+ };
+
+@@ -87,10 +118,6 @@ union init_op {
+ #include "bnx2x_init_values.h"
+
+ static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
+-
+-static void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr,
+- u32 dst_addr, u32 len32);
+-
+ static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len);
+
+ static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, const u32 *data,
+@@ -107,9 +134,6 @@ static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, const u32 *data,
+ }
+ }
+
+-#define INIT_MEM_WR(reg, data, reg_off, len) \
+- bnx2x_init_str_wr(bp, reg + reg_off*4, data, len)
+-
+ static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr, const u32 *data,
+ u16 len)
+ {
+@@ -124,11 +148,117 @@ static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr, const u32 *data,
+ }
+ }
+
++static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len)
++{
++#ifdef USE_DMAE
++ int offset = 0;
++
++ if (bp->dmae_ready) {
++ while (len > DMAE_LEN32_WR_MAX) {
++ bnx2x_write_dmae(bp, bp->gunzip_mapping + offset,
++ addr + offset, DMAE_LEN32_WR_MAX);
++ offset += DMAE_LEN32_WR_MAX * 4;
++ len -= DMAE_LEN32_WR_MAX;
++ }
++ bnx2x_write_dmae(bp, bp->gunzip_mapping + offset,
++ addr + offset, len);
++ } else
++ bnx2x_init_str_wr(bp, addr, bp->gunzip_buf, len);
++#else
++ bnx2x_init_str_wr(bp, addr, bp->gunzip_buf, len);
++#endif
++}
++
++static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
++{
++ if ((len * 4) > FW_BUF_SIZE) {
++ BNX2X_ERR("LARGE DMAE OPERATION ! addr 0x%x len 0x%x\n",
++ addr, len*4);
++ return;
++ }
++ memset(bp->gunzip_buf, fill, len * 4);
++
++ bnx2x_write_big_buf(bp, addr, len);
++}
++
++static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, const u32 *data,
++ u32 len64)
++{
++ u32 buf_len32 = FW_BUF_SIZE/4;
++ u32 len = len64*2;
++ u64 data64 = 0;
++ int i;
++
++ /* 64 bit value is in a blob: first low DWORD, then high DWORD */
++ data64 = HILO_U64((*(data + 1)), (*data));
++ len64 = min((u32)(FW_BUF_SIZE/8), len64);
++ for (i = 0; i < len64; i++) {
++ u64 *pdata = ((u64 *)(bp->gunzip_buf)) + i;
++
++ *pdata = data64;
++ }
++
++ for (i = 0; i < len; i += buf_len32) {
++ u32 cur_len = min(buf_len32, len - i);
++
++ bnx2x_write_big_buf(bp, addr + i * 4, cur_len);
++ }
++}
++
++/*********************************************************
++ There are different blobs for each PRAM section.
++ In addition, each blob write operation is divided into a few operations
++ in order to decrease the amount of phys. contiguous buffer needed.
++ Thus, when we select a blob the address may be with some offset
++ from the beginning of PRAM section.
++ The same holds for the INT_TABLE sections.
++**********************************************************/
++#define IF_IS_INT_TABLE_ADDR(base, addr) \
++ if (((base) <= (addr)) && ((base) + 0x400 >= (addr)))
++
++#define IF_IS_PRAM_ADDR(base, addr) \
++ if (((base) <= (addr)) && ((base) + 0x40000 >= (addr)))
++
++static const u32 *bnx2x_sel_blob(u32 addr, const u32 *data, int is_e1)
++{
++ IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr)
++ data = is_e1 ? tsem_int_table_data_e1 :
++ tsem_int_table_data_e1h;
++ else
++ IF_IS_INT_TABLE_ADDR(CSEM_REG_INT_TABLE, addr)
++ data = is_e1 ? csem_int_table_data_e1 :
++ csem_int_table_data_e1h;
++ else
++ IF_IS_INT_TABLE_ADDR(USEM_REG_INT_TABLE, addr)
++ data = is_e1 ? usem_int_table_data_e1 :
++ usem_int_table_data_e1h;
++ else
++ IF_IS_INT_TABLE_ADDR(XSEM_REG_INT_TABLE, addr)
++ data = is_e1 ? xsem_int_table_data_e1 :
++ xsem_int_table_data_e1h;
++ else
++ IF_IS_PRAM_ADDR(TSEM_REG_PRAM, addr)
++ data = is_e1 ? tsem_pram_data_e1 : tsem_pram_data_e1h;
++ else
++ IF_IS_PRAM_ADDR(CSEM_REG_PRAM, addr)
++ data = is_e1 ? csem_pram_data_e1 : csem_pram_data_e1h;
++ else
++ IF_IS_PRAM_ADDR(USEM_REG_PRAM, addr)
++ data = is_e1 ? usem_pram_data_e1 : usem_pram_data_e1h;
++ else
++ IF_IS_PRAM_ADDR(XSEM_REG_PRAM, addr)
++ data = is_e1 ? xsem_pram_data_e1 : xsem_pram_data_e1h;
++
++ return data;
++}
++
+ static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr, const u32 *data,
+- u32 len, int gunzip)
++ u32 len, int gunzip, int is_e1, u32 blob_off)
+ {
+ int offset = 0;
+
++ data = bnx2x_sel_blob(addr, data, is_e1) + blob_off;
++
+ if (gunzip) {
+ int rc;
+ #ifdef __BIG_ENDIAN
+@@ -143,64 +273,59 @@ static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr, const u32 *data,
+ #endif
+ rc = bnx2x_gunzip(bp, (u8 *)data, len);
+ if (rc) {
+- DP(NETIF_MSG_HW, "gunzip failed ! rc %d\n", rc);
++ BNX2X_ERR("gunzip failed ! rc %d\n", rc);
+ return;
+ }
+ len = bp->gunzip_outlen;
+ #ifdef __BIG_ENDIAN
+ kfree(temp);
+ for (i = 0; i < len; i++)
+- ((u32 *)bp->gunzip_buf)[i] =
++ ((u32 *)bp->gunzip_buf)[i] =
+ swab32(((u32 *)bp->gunzip_buf)[i]);
+ #endif
+ } else {
+ if ((len * 4) > FW_BUF_SIZE) {
+- BNX2X_ERR("LARGE DMAE OPERATION ! len 0x%x\n", len*4);
++ BNX2X_ERR("LARGE DMAE OPERATION ! "
++ "addr 0x%x len 0x%x\n", addr, len*4);
+ return;
+ }
+ memcpy(bp->gunzip_buf, data, len * 4);
+ }
+
+- while (len > DMAE_LEN32_MAX) {
+- bnx2x_write_dmae(bp, bp->gunzip_mapping + offset,
+- addr + offset, DMAE_LEN32_MAX);
+- offset += DMAE_LEN32_MAX * 4;
+- len -= DMAE_LEN32_MAX;
+- }
+- bnx2x_write_dmae(bp, bp->gunzip_mapping + offset, addr + offset, len);
+-}
+-
+-#define INIT_MEM_WB(reg, data, reg_off, len) \
+- bnx2x_init_wr_wb(bp, reg + reg_off*4, data, len, 0)
+-
+-#define INIT_GUNZIP_DMAE(reg, data, reg_off, len) \
+- bnx2x_init_wr_wb(bp, reg + reg_off*4, data, len, 1)
+-
+-static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
+-{
+- int offset = 0;
+-
+- if ((len * 4) > FW_BUF_SIZE) {
+- BNX2X_ERR("LARGE DMAE OPERATION ! len 0x%x\n", len * 4);
+- return;
+- }
+- memset(bp->gunzip_buf, fill, len * 4);
+-
+- while (len > DMAE_LEN32_MAX) {
++ if (bp->dmae_ready) {
++ while (len > DMAE_LEN32_WR_MAX) {
++ bnx2x_write_dmae(bp, bp->gunzip_mapping + offset,
++ addr + offset, DMAE_LEN32_WR_MAX);
++ offset += DMAE_LEN32_WR_MAX * 4;
++ len -= DMAE_LEN32_WR_MAX;
++ }
+ bnx2x_write_dmae(bp, bp->gunzip_mapping + offset,
+- addr + offset, DMAE_LEN32_MAX);
+- offset += DMAE_LEN32_MAX * 4;
+- len -= DMAE_LEN32_MAX;
+- }
+- bnx2x_write_dmae(bp, bp->gunzip_mapping + offset, addr + offset, len);
++ addr + offset, len);
++ } else
++ bnx2x_init_ind_wr(bp, addr, bp->gunzip_buf, len);
+ }
+
+ static void bnx2x_init_block(struct bnx2x *bp, u32 op_start, u32 op_end)
+ {
+- int i;
++ int is_e1 = CHIP_IS_E1(bp);
++ int is_e1h = CHIP_IS_E1H(bp);
++ int is_emul_e1h = (CHIP_REV_IS_EMUL(bp) && is_e1h);
++ int hw_wr, i;
+ union init_op *op;
+ u32 op_type, addr, len;
+- const u32 *data;
++ const u32 *data, *data_base;
++
++ if (CHIP_REV_IS_FPGA(bp))
++ hw_wr = OP_WR_FPGA;
++ else if (CHIP_REV_IS_EMUL(bp))
++ hw_wr = OP_WR_EMUL;
++ else
++ hw_wr = OP_WR_ASIC;
++
++ if (is_e1)
++ data_base = init_data_e1;
++ else /* CHIP_IS_E1H(bp) */
++ data_base = init_data_e1h;
+
+ for (i = op_start; i < op_end; i++) {
+
+@@ -209,7 +334,30 @@ static void bnx2x_init_block(struct bnx2x *bp, u32 op_start, u32 op_end)
+ op_type = op->str_wr.op;
+ addr = op->str_wr.offset;
+ len = op->str_wr.data_len;
+- data = init_data + op->str_wr.data_off;
++ data = data_base + op->str_wr.data_off;
++
++ /* careful! it must be in order */
++ if (unlikely(op_type > OP_WB)) {
++
++ /* If E1 only */
++ if (op_type <= OP_WB_E1) {
++ if (is_e1)
++ op_type -= (OP_RD_E1 - OP_RD);
++
++ /* If E1H only */
++ } else if (op_type <= OP_WB_E1H) {
++ if (is_e1h)
++ op_type -= (OP_RD_E1H - OP_RD);
++ }
++
++ /* HW/EMUL specific */
++ if (op_type == hw_wr)
++ op_type = OP_WR;
++
++ /* EMUL on E1H is special */
++ if ((op_type == OP_WR_EMUL_E1H) && is_emul_e1h)
++ op_type = OP_WR;
++ }
+
+ switch (op_type) {
+ case OP_RD:
+@@ -222,7 +370,7 @@ static void bnx2x_init_block(struct bnx2x *bp, u32 op_start, u32 op_end)
+ bnx2x_init_str_wr(bp, addr, data, len);
+ break;
+ case OP_WB:
+- bnx2x_init_wr_wb(bp, addr, data, len, 0);
++ bnx2x_init_wr_wb(bp, addr, data, len, 0, is_e1, 0);
+ break;
+ case OP_SI:
+ bnx2x_init_ind_wr(bp, addr, data, len);
+@@ -231,10 +379,21 @@ static void bnx2x_init_block(struct bnx2x *bp, u32 op_start, u32 op_end)
+ bnx2x_init_fill(bp, addr, 0, op->zero.len);
+ break;
+ case OP_ZP:
+- bnx2x_init_wr_wb(bp, addr, data, len, 1);
++ bnx2x_init_wr_wb(bp, addr, data, len, 1, is_e1,
++ op->str_wr.data_off);
++ break;
++ case OP_WR_64:
++ bnx2x_init_wr_64(bp, addr, data, len);
+ break;
+ default:
+- BNX2X_ERR("BAD init operation!\n");
++ /* happens whenever an op is of a diff HW */
++#if 0
++ DP(NETIF_MSG_HW, "skipping init operation "
++ "index %d[%d:%d]: type %d addr 0x%x "
++ "len %d(0x%x)\n",
++ i, op_start, op_end, op_type, addr, len, len);
++#endif
++ break;
+ }
+ }
+ }
+@@ -245,7 +404,7 @@ static void bnx2x_init_block(struct bnx2x *bp, u32 op_start, u32 op_end)
+ ****************************************************************************/
+ /*
+ * This code configures the PCI read/write arbiter
+- * which implements a wighted round robin
++ * which implements a weighted round robin
+ * between the virtual queues in the chip.
+ *
+ * The values were derived for each PCI max payload and max request size.
+@@ -315,7 +474,7 @@ static const struct arb_line write_arb_data[NUM_WR_Q][MAX_WR_ORD + 1] = {
+ {{8 , 64 , 25}, {16 , 64 , 41}, {32 , 64 , 81} }
+ };
+
+-/* register adresses for read queues */
++/* register addresses for read queues */
+ static const struct arb_line read_arb_addr[NUM_RD_Q-1] = {
+ {PXP2_REG_RQ_BW_RD_L0, PXP2_REG_RQ_BW_RD_ADD0,
+ PXP2_REG_RQ_BW_RD_UBOUND0},
+@@ -375,7 +534,7 @@ static const struct arb_line read_arb_addr[NUM_RD_Q-1] = {
+ PXP2_REG_PSWRQ_BW_UB28}
+ };
+
+-/* register adresses for wrtie queues */
++/* register addresses for write queues */
+ static const struct arb_line write_arb_addr[NUM_WR_Q-1] = {
+ {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
+ PXP2_REG_PSWRQ_BW_UB1},
+@@ -405,14 +564,20 @@ static const struct arb_line write_arb_addr[NUM_WR_Q-1] = {
+
+ static void bnx2x_init_pxp(struct bnx2x *bp)
+ {
++ u16 devctl;
+ int r_order, w_order;
+ u32 val, i;
+
+ pci_read_config_word(bp->pdev,
+- bp->pcie_cap + PCI_EXP_DEVCTL, (u16 *)&val);
+- DP(NETIF_MSG_HW, "read 0x%x from devctl\n", (u16)val);
+- w_order = ((val & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
+- r_order = ((val & PCI_EXP_DEVCTL_READRQ) >> 12);
++ bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
++ DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
++ w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
++ if (bp->mrrs == -1)
++ r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
++ else {
++ DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
++ r_order = bp->mrrs;
++ }
+
+ if (r_order > MAX_RD_ORD) {
+ DP(NETIF_MSG_HW, "read order of %d order adjusted to %d\n",
+@@ -424,6 +589,10 @@ static void bnx2x_init_pxp(struct bnx2x *bp)
+ w_order, MAX_WR_ORD);
+ w_order = MAX_WR_ORD;
+ }
++ if (CHIP_REV_IS_FPGA(bp)) {
++ DP(NETIF_MSG_HW, "write order adjusted to 1 for FPGA\n");
++ w_order = 0;
++ }
+ DP(NETIF_MSG_HW, "read order %d write order %d\n", r_order, w_order);
+
+ for (i = 0; i < NUM_RD_Q-1; i++) {
+@@ -481,7 +650,20 @@ static void bnx2x_init_pxp(struct bnx2x *bp)
+ REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00);
+
+ REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order));
+- REG_WR(bp, PXP2_REG_WR_DMAE_TH, (128 << w_order)/16);
++
++ if (CHIP_IS_E1H(bp)) {
++ REG_WR(bp, PXP2_REG_WR_HC_MPS, w_order+1);
++ REG_WR(bp, PXP2_REG_WR_USDM_MPS, w_order+1);
++ REG_WR(bp, PXP2_REG_WR_CSDM_MPS, w_order+1);
++ REG_WR(bp, PXP2_REG_WR_TSDM_MPS, w_order+1);
++ REG_WR(bp, PXP2_REG_WR_XSDM_MPS, w_order+1);
++ REG_WR(bp, PXP2_REG_WR_QM_MPS, w_order+1);
++ REG_WR(bp, PXP2_REG_WR_TM_MPS, w_order+1);
++ REG_WR(bp, PXP2_REG_WR_SRC_MPS, w_order+1);
++ REG_WR(bp, PXP2_REG_WR_DBG_MPS, w_order+1);
++ REG_WR(bp, PXP2_REG_WR_DMAE_MPS, 2); /* DMAE is special */
++ REG_WR(bp, PXP2_REG_WR_CDU_MPS, w_order+1);
++ }
+ }
+
+
+@@ -564,6 +746,72 @@ static u8 calc_crc8(u32 data, u8 crc)
+ return crc_res;
+ }
+
++/* registers addresses are not in order
++ so these arrays help simplify the code */
++static const int cm_start[E1H_FUNC_MAX][9] = {
++ {MISC_FUNC0_START, TCM_FUNC0_START, UCM_FUNC0_START, CCM_FUNC0_START,
++ XCM_FUNC0_START, TSEM_FUNC0_START, USEM_FUNC0_START, CSEM_FUNC0_START,
++ XSEM_FUNC0_START},
++ {MISC_FUNC1_START, TCM_FUNC1_START, UCM_FUNC1_START, CCM_FUNC1_START,
++ XCM_FUNC1_START, TSEM_FUNC1_START, USEM_FUNC1_START, CSEM_FUNC1_START,
++ XSEM_FUNC1_START},
++ {MISC_FUNC2_START, TCM_FUNC2_START, UCM_FUNC2_START, CCM_FUNC2_START,
++ XCM_FUNC2_START, TSEM_FUNC2_START, USEM_FUNC2_START, CSEM_FUNC2_START,
++ XSEM_FUNC2_START},
++ {MISC_FUNC3_START, TCM_FUNC3_START, UCM_FUNC3_START, CCM_FUNC3_START,
++ XCM_FUNC3_START, TSEM_FUNC3_START, USEM_FUNC3_START, CSEM_FUNC3_START,
++ XSEM_FUNC3_START},
++ {MISC_FUNC4_START, TCM_FUNC4_START, UCM_FUNC4_START, CCM_FUNC4_START,
++ XCM_FUNC4_START, TSEM_FUNC4_START, USEM_FUNC4_START, CSEM_FUNC4_START,
++ XSEM_FUNC4_START},
++ {MISC_FUNC5_START, TCM_FUNC5_START, UCM_FUNC5_START, CCM_FUNC5_START,
++ XCM_FUNC5_START, TSEM_FUNC5_START, USEM_FUNC5_START, CSEM_FUNC5_START,
++ XSEM_FUNC5_START},
++ {MISC_FUNC6_START, TCM_FUNC6_START, UCM_FUNC6_START, CCM_FUNC6_START,
++ XCM_FUNC6_START, TSEM_FUNC6_START, USEM_FUNC6_START, CSEM_FUNC6_START,
++ XSEM_FUNC6_START},
++ {MISC_FUNC7_START, TCM_FUNC7_START, UCM_FUNC7_START, CCM_FUNC7_START,
++ XCM_FUNC7_START, TSEM_FUNC7_START, USEM_FUNC7_START, CSEM_FUNC7_START,
++ XSEM_FUNC7_START}
++};
++
++static const int cm_end[E1H_FUNC_MAX][9] = {
++ {MISC_FUNC0_END, TCM_FUNC0_END, UCM_FUNC0_END, CCM_FUNC0_END,
++ XCM_FUNC0_END, TSEM_FUNC0_END, USEM_FUNC0_END, CSEM_FUNC0_END,
++ XSEM_FUNC0_END},
++ {MISC_FUNC1_END, TCM_FUNC1_END, UCM_FUNC1_END, CCM_FUNC1_END,
++ XCM_FUNC1_END, TSEM_FUNC1_END, USEM_FUNC1_END, CSEM_FUNC1_END,
++ XSEM_FUNC1_END},
++ {MISC_FUNC2_END, TCM_FUNC2_END, UCM_FUNC2_END, CCM_FUNC2_END,
++ XCM_FUNC2_END, TSEM_FUNC2_END, USEM_FUNC2_END, CSEM_FUNC2_END,
++ XSEM_FUNC2_END},
++ {MISC_FUNC3_END, TCM_FUNC3_END, UCM_FUNC3_END, CCM_FUNC3_END,
++ XCM_FUNC3_END, TSEM_FUNC3_END, USEM_FUNC3_END, CSEM_FUNC3_END,
++ XSEM_FUNC3_END},
++ {MISC_FUNC4_END, TCM_FUNC4_END, UCM_FUNC4_END, CCM_FUNC4_END,
++ XCM_FUNC4_END, TSEM_FUNC4_END, USEM_FUNC4_END, CSEM_FUNC4_END,
++ XSEM_FUNC4_END},
++ {MISC_FUNC5_END, TCM_FUNC5_END, UCM_FUNC5_END, CCM_FUNC5_END,
++ XCM_FUNC5_END, TSEM_FUNC5_END, USEM_FUNC5_END, CSEM_FUNC5_END,
++ XSEM_FUNC5_END},
++ {MISC_FUNC6_END, TCM_FUNC6_END, UCM_FUNC6_END, CCM_FUNC6_END,
++ XCM_FUNC6_END, TSEM_FUNC6_END, USEM_FUNC6_END, CSEM_FUNC6_END,
++ XSEM_FUNC6_END},
++ {MISC_FUNC7_END, TCM_FUNC7_END, UCM_FUNC7_END, CCM_FUNC7_END,
++ XCM_FUNC7_END, TSEM_FUNC7_END, USEM_FUNC7_END, CSEM_FUNC7_END,
++ XSEM_FUNC7_END},
++};
++
++static const int hc_limits[E1H_FUNC_MAX][2] = {
++ {HC_FUNC0_START, HC_FUNC0_END},
++ {HC_FUNC1_START, HC_FUNC1_END},
++ {HC_FUNC2_START, HC_FUNC2_END},
++ {HC_FUNC3_START, HC_FUNC3_END},
++ {HC_FUNC4_START, HC_FUNC4_END},
++ {HC_FUNC5_START, HC_FUNC5_END},
++ {HC_FUNC6_START, HC_FUNC6_END},
++ {HC_FUNC7_START, HC_FUNC7_END}
++};
+
+ #endif /* BNX2X_INIT_H */
+
+diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x_link.c
+new file mode 100644
+index 0000000..ecfda9e
+--- /dev/null
++++ b/drivers/net/bnx2x_link.c
+@@ -0,0 +1,4907 @@
++/* Copyright 2008-2009 Broadcom Corporation
++ *
++ * Unless you and Broadcom execute a separate written software license
++ * agreement governing use of this software, this software is licensed to you
++ * under the terms of the GNU General Public License version 2, available
++ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
++ *
++ * Notwithstanding the above, under no circumstances may you combine this
++ * software in any way with any other Broadcom software provided under a
++ * license other than the GPL, without Broadcom's express prior written
++ * consent.
++ *
++ * Written by Yaniv Rosner
++ *
++ */
++
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/pci.h>
++#include <linux/netdevice.h>
++#include <linux/delay.h>
++#include <linux/ethtool.h>
++#include <linux/mutex.h>
++#include <linux/version.h>
++
++#include "bnx2x_reg.h"
++#include "bnx2x_fw_defs.h"
++#include "bnx2x_hsi.h"
++#include "bnx2x_link.h"
++#include "bnx2x.h"
++
++/********************************************************/
++#define SUPPORT_CL73 0 /* Currently no */
++#define ETH_HLEN 14
++#define ETH_OVREHEAD (ETH_HLEN + 8)/* 8 for CRC + VLAN*/
++#define ETH_MIN_PACKET_SIZE 60
++#define ETH_MAX_PACKET_SIZE 1500
++#define ETH_MAX_JUMBO_PACKET_SIZE 9600
++#define MDIO_ACCESS_TIMEOUT 1000
++#define BMAC_CONTROL_RX_ENABLE 2
++
++/***********************************************************/
++/* Shortcut definitions */
++/***********************************************************/
++
++#define NIG_STATUS_XGXS0_LINK10G \
++ NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G
++#define NIG_STATUS_XGXS0_LINK_STATUS \
++ NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS
++#define NIG_STATUS_XGXS0_LINK_STATUS_SIZE \
++ NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE
++#define NIG_STATUS_SERDES0_LINK_STATUS \
++ NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS
++#define NIG_MASK_MI_INT \
++ NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT
++#define NIG_MASK_XGXS0_LINK10G \
++ NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G
++#define NIG_MASK_XGXS0_LINK_STATUS \
++ NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK_STATUS
++#define NIG_MASK_SERDES0_LINK_STATUS \
++ NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS
++
++#define MDIO_AN_CL73_OR_37_COMPLETE \
++ (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | \
++ MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE)
++
++#define XGXS_RESET_BITS \
++ (MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW | \
++ MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_IDDQ | \
++ MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN | \
++ MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN_SD | \
++ MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB)
++
++#define SERDES_RESET_BITS \
++ (MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_RSTB_HW | \
++ MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ | \
++ MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN | \
++ MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN_SD)
++
++#define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37
++#define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73
++#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM
++#define AUTONEG_PARALLEL \
++ SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION
++#define AUTONEG_SGMII_FIBER_AUTODET \
++ SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT
++#define AUTONEG_REMOTE_PHY SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY
++
++#define GP_STATUS_PAUSE_RSOLUTION_TXSIDE \
++ MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE
++#define GP_STATUS_PAUSE_RSOLUTION_RXSIDE \
++ MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE
++#define GP_STATUS_SPEED_MASK \
++ MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK
++#define GP_STATUS_10M MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M
++#define GP_STATUS_100M MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M
++#define GP_STATUS_1G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G
++#define GP_STATUS_2_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G
++#define GP_STATUS_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G
++#define GP_STATUS_6G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G
++#define GP_STATUS_10G_HIG \
++ MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG
++#define GP_STATUS_10G_CX4 \
++ MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4
++#define GP_STATUS_12G_HIG \
++ MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12G_HIG
++#define GP_STATUS_12_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12_5G
++#define GP_STATUS_13G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_13G
++#define GP_STATUS_15G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_15G
++#define GP_STATUS_16G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_16G
++#define GP_STATUS_1G_KX MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX
++#define GP_STATUS_10G_KX4 \
++ MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4
++
++#define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD
++#define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD
++#define LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD
++#define LINK_100T4 LINK_STATUS_SPEED_AND_DUPLEX_100T4
++#define LINK_100TXFD LINK_STATUS_SPEED_AND_DUPLEX_100TXFD
++#define LINK_1000THD LINK_STATUS_SPEED_AND_DUPLEX_1000THD
++#define LINK_1000TFD LINK_STATUS_SPEED_AND_DUPLEX_1000TFD
++#define LINK_1000XFD LINK_STATUS_SPEED_AND_DUPLEX_1000XFD
++#define LINK_2500THD LINK_STATUS_SPEED_AND_DUPLEX_2500THD
++#define LINK_2500TFD LINK_STATUS_SPEED_AND_DUPLEX_2500TFD
++#define LINK_2500XFD LINK_STATUS_SPEED_AND_DUPLEX_2500XFD
++#define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD
++#define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD
++#define LINK_12GTFD LINK_STATUS_SPEED_AND_DUPLEX_12GTFD
++#define LINK_12GXFD LINK_STATUS_SPEED_AND_DUPLEX_12GXFD
++#define LINK_12_5GTFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD
++#define LINK_12_5GXFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD
++#define LINK_13GTFD LINK_STATUS_SPEED_AND_DUPLEX_13GTFD
++#define LINK_13GXFD LINK_STATUS_SPEED_AND_DUPLEX_13GXFD
++#define LINK_15GTFD LINK_STATUS_SPEED_AND_DUPLEX_15GTFD
++#define LINK_15GXFD LINK_STATUS_SPEED_AND_DUPLEX_15GXFD
++#define LINK_16GTFD LINK_STATUS_SPEED_AND_DUPLEX_16GTFD
++#define LINK_16GXFD LINK_STATUS_SPEED_AND_DUPLEX_16GXFD
++
++#define PHY_XGXS_FLAG 0x1
++#define PHY_SGMII_FLAG 0x2
++#define PHY_SERDES_FLAG 0x4
++
++/**********************************************************/
++/* INTERFACE */
++/**********************************************************/
++#define CL45_WR_OVER_CL22(_bp, _port, _phy_addr, _bank, _addr, _val) \
++ bnx2x_cl45_write(_bp, _port, 0, _phy_addr, \
++ DEFAULT_PHY_DEV_ADDR, \
++ (_bank + (_addr & 0xf)), \
++ _val)
++
++#define CL45_RD_OVER_CL22(_bp, _port, _phy_addr, _bank, _addr, _val) \
++ bnx2x_cl45_read(_bp, _port, 0, _phy_addr, \
++ DEFAULT_PHY_DEV_ADDR, \
++ (_bank + (_addr & 0xf)), \
++ _val)
++
++static void bnx2x_set_phy_mdio(struct link_params *params)
++{
++ struct bnx2x *bp = params->bp;
++ REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST +
++ params->port*0x18, 0);
++ REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + params->port*0x18,
++ DEFAULT_PHY_DEV_ADDR);
++}
++
++static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
++{
++ u32 val = REG_RD(bp, reg);
++
++ val |= bits;
++ REG_WR(bp, reg, val);
++ return val;
++}
++
++static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
++{
++ u32 val = REG_RD(bp, reg);
++
++ val &= ~bits;
++ REG_WR(bp, reg, val);
++ return val;
++}
++
++static void bnx2x_emac_init(struct link_params *params,
++ struct link_vars *vars)
++{
++ /* reset and unreset the emac core */
++ struct bnx2x *bp = params->bp;
++ u8 port = params->port;
++ u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
++ u32 val;
++ u16 timeout;
++
++ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
++ (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
++ udelay(5);
++ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
++ (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
++
++ /* init emac - use read-modify-write */
++ /* self clear reset */
++ val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
++ EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET));
++
++ timeout = 200;
++ do {
++ val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
++ DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
++ if (!timeout) {
++ DP(NETIF_MSG_LINK, "EMAC timeout!\n");
++ return;
++ }
++ timeout--;
++ } while (val & EMAC_MODE_RESET);
++
++ /* Set mac address */
++ val = ((params->mac_addr[0] << 8) |
++ params->mac_addr[1]);
++ EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH, val);
++
++ val = ((params->mac_addr[2] << 24) |
++ (params->mac_addr[3] << 16) |
++ (params->mac_addr[4] << 8) |
++ params->mac_addr[5]);
++ EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + 4, val);
++}
++
++static u8 bnx2x_emac_enable(struct link_params *params,
++ struct link_vars *vars, u8 lb)
++{
++ struct bnx2x *bp = params->bp;
++ u8 port = params->port;
++ u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
++ u32 val;
++
++ DP(NETIF_MSG_LINK, "enabling EMAC\n");
++
++ /* enable emac and not bmac */
++ REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
++
++ /* for paladium */
++ if (CHIP_REV_IS_EMUL(bp)) {
++ /* Use lane 1 (of lanes 0-3) */
++ REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
++ REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL +
++ port*4, 1);
++ }
++ /* for fpga */
++ else
++
++ if (CHIP_REV_IS_FPGA(bp)) {
++ /* Use lane 1 (of lanes 0-3) */
++ DP(NETIF_MSG_LINK, "bnx2x_emac_enable: Setting FPGA\n");
++
++ REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
++ REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4,
++ 0);
++ } else
++ /* ASIC */
++ if (vars->phy_flags & PHY_XGXS_FLAG) {
++ u32 ser_lane = ((params->lane_config &
++ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
++ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
++
++ DP(NETIF_MSG_LINK, "XGXS\n");
++ /* select the master lanes (out of 0-3) */
++ REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 +
++ port*4, ser_lane);
++ /* select XGXS */
++ REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL +
++ port*4, 1);
++
++ } else { /* SerDes */
++ DP(NETIF_MSG_LINK, "SerDes\n");
++ /* select SerDes */
++ REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL +
++ port*4, 0);
++ }
++
++ /* enable emac */
++ REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 1);
++
++ if (CHIP_REV_IS_SLOW(bp)) {
++ /* config GMII mode */
++ val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
++ EMAC_WR(bp, EMAC_REG_EMAC_MODE,
++ (val | EMAC_MODE_PORT_GMII));
++ } else { /* ASIC */
++ /* pause enable/disable */
++ bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
++ EMAC_RX_MODE_FLOW_EN);
++ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
++ bnx2x_bits_en(bp, emac_base +
++ EMAC_REG_EMAC_RX_MODE,
++ EMAC_RX_MODE_FLOW_EN);
++
++ bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
++ (EMAC_TX_MODE_EXT_PAUSE_EN |
++ EMAC_TX_MODE_FLOW_EN));
++ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
++ bnx2x_bits_en(bp, emac_base +
++ EMAC_REG_EMAC_TX_MODE,
++ (EMAC_TX_MODE_EXT_PAUSE_EN |
++ EMAC_TX_MODE_FLOW_EN));
++ }
++
++ /* KEEP_VLAN_TAG, promiscuous */
++ val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
++ val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
++ EMAC_WR(bp, EMAC_REG_EMAC_RX_MODE, val);
++
++ /* Set Loopback */
++ val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
++ if (lb)
++ val |= 0x810;
++ else
++ val &= ~0x810;
++ EMAC_WR(bp, EMAC_REG_EMAC_MODE, val);
++
++ /* enable emac */
++ REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 1);
++
++ /* enable emac for jumbo packets */
++ EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE,
++ (EMAC_RX_MTU_SIZE_JUMBO_ENA |
++ (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)));
++
++ /* strip CRC */
++ REG_WR(bp, NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1);
++
++ /* disable the NIG in/out to the bmac */
++ REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x0);
++ REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0);
++ REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x0);
++
++ /* enable the NIG in/out to the emac */
++ REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x1);
++ val = 0;
++ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
++ val = 1;
++
++ REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
++ REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
++
++ if (CHIP_REV_IS_EMUL(bp)) {
++ /* take the BigMac out of reset */
++ REG_WR(bp,
++ GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
++ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
++
++ /* enable access for bmac registers */
++ REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
++ }
++
++ vars->mac_type = MAC_TYPE_EMAC;
++ return 0;
++}
++
++
++
++static u8 bnx2x_bmac_enable(struct link_params *params, struct link_vars *vars,
++ u8 is_lb)
++{
++ struct bnx2x *bp = params->bp;
++ u8 port = params->port;
++ u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
++ NIG_REG_INGRESS_BMAC0_MEM;
++ u32 wb_data[2];
++ u32 val;
++
++ DP(NETIF_MSG_LINK, "Enabling BigMAC\n");
++ /* reset and unreset the BigMac */
++ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
++ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
++ msleep(1);
++
++ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
++ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
++
++ /* enable access for bmac registers */
++ REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
++
++ /* XGXS control */
++ wb_data[0] = 0x3c;
++ wb_data[1] = 0;
++ REG_WR_DMAE(bp, bmac_addr +
++ BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
++ wb_data, 2);
++
++ /* tx MAC SA */
++ wb_data[0] = ((params->mac_addr[2] << 24) |
++ (params->mac_addr[3] << 16) |
++ (params->mac_addr[4] << 8) |
++ params->mac_addr[5]);
++ wb_data[1] = ((params->mac_addr[0] << 8) |
++ params->mac_addr[1]);
++ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR,
++ wb_data, 2);
++
++ /* tx control */
++ val = 0xc0;
++ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
++ val |= 0x800000;
++ wb_data[0] = val;
++ wb_data[1] = 0;
++ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL,
++ wb_data, 2);
++
++ /* mac control */
++ val = 0x3;
++ if (is_lb) {
++ val |= 0x4;
++ DP(NETIF_MSG_LINK, "enable bmac loopback\n");
++ }
++ wb_data[0] = val;
++ wb_data[1] = 0;
++ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
++ wb_data, 2);
++
++
++ /* set rx mtu */
++ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
++ wb_data[1] = 0;
++ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE,
++ wb_data, 2);
++
++ /* rx control set to don't strip crc */
++ val = 0x14;
++ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
++ val |= 0x20;
++ wb_data[0] = val;
++ wb_data[1] = 0;
++ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL,
++ wb_data, 2);
++
++ /* set tx mtu */
++ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
++ wb_data[1] = 0;
++ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE,
++ wb_data, 2);
++
++ /* set cnt max size */
++ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
++ wb_data[1] = 0;
++ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE,
++ wb_data, 2);
++
++ /* configure safc */
++ wb_data[0] = 0x1000200;
++ wb_data[1] = 0;
++ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
++ wb_data, 2);
++ /* fix for emulation */
++ if (CHIP_REV_IS_EMUL(bp)) {
++ wb_data[0] = 0xf000;
++ wb_data[1] = 0;
++ REG_WR_DMAE(bp,
++ bmac_addr + BIGMAC_REGISTER_TX_PAUSE_THRESHOLD,
++ wb_data, 2);
++ }
++
++ REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1);
++ REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
++ REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
++ val = 0;
++ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
++ val = 1;
++ REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val);
++ REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0);
++ REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x0);
++ REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, 0x0);
++ REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x1);
++ REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x1);
++
++ vars->mac_type = MAC_TYPE_BMAC;
++ return 0;
++}
++
++static void bnx2x_phy_deassert(struct link_params *params, u8 phy_flags)
++{
++ struct bnx2x *bp = params->bp;
++ u32 val;
++
++ if (phy_flags & PHY_XGXS_FLAG) {
++ DP(NETIF_MSG_LINK, "bnx2x_phy_deassert:XGXS\n");
++ val = XGXS_RESET_BITS;
++
++ } else { /* SerDes */
++ DP(NETIF_MSG_LINK, "bnx2x_phy_deassert:SerDes\n");
++ val = SERDES_RESET_BITS;
++ }
++
++ val = val << (params->port*16);
++
++ /* reset and unreset the SerDes/XGXS */
++ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
++ val);
++ udelay(500);
++ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET,
++ val);
++ bnx2x_set_phy_mdio(params);
++}
++
++void bnx2x_link_status_update(struct link_params *params,
++ struct link_vars *vars)
++{
++ struct bnx2x *bp = params->bp;
++ u8 link_10g;
++ u8 port = params->port;
++
++ if (params->switch_cfg == SWITCH_CFG_1G)
++ vars->phy_flags = PHY_SERDES_FLAG;
++ else
++ vars->phy_flags = PHY_XGXS_FLAG;
++ vars->link_status = REG_RD(bp, params->shmem_base +
++ offsetof(struct shmem_region,
++ port_mb[port].link_status));
++
++ vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
++
++ if (vars->link_up) {
++ DP(NETIF_MSG_LINK, "phy link up\n");
++
++ vars->phy_link_up = 1;
++ vars->duplex = DUPLEX_FULL;
++ switch (vars->link_status &
++ LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
++ case LINK_10THD:
++ vars->duplex = DUPLEX_HALF;
++ /* fall thru */
++ case LINK_10TFD:
++ vars->line_speed = SPEED_10;
++ break;
++
++ case LINK_100TXHD:
++ vars->duplex = DUPLEX_HALF;
++ /* fall thru */
++ case LINK_100T4:
++ case LINK_100TXFD:
++ vars->line_speed = SPEED_100;
++ break;
++
++ case LINK_1000THD:
++ vars->duplex = DUPLEX_HALF;
++ /* fall thru */
++ case LINK_1000TFD:
++ vars->line_speed = SPEED_1000;
++ break;
++
++ case LINK_2500THD:
++ vars->duplex = DUPLEX_HALF;
++ /* fall thru */
++ case LINK_2500TFD:
++ vars->line_speed = SPEED_2500;
++ break;
++
++ case LINK_10GTFD:
++ vars->line_speed = SPEED_10000;
++ break;
++
++ case LINK_12GTFD:
++ vars->line_speed = SPEED_12000;
++ break;
++
++ case LINK_12_5GTFD:
++ vars->line_speed = SPEED_12500;
++ break;
++
++ case LINK_13GTFD:
++ vars->line_speed = SPEED_13000;
++ break;
++
++ case LINK_15GTFD:
++ vars->line_speed = SPEED_15000;
++ break;
++
++ case LINK_16GTFD:
++ vars->line_speed = SPEED_16000;
++ break;
++
++ default:
++ break;
++ }
++
++ if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED)
++ vars->flow_ctrl |= BNX2X_FLOW_CTRL_TX;
++ else
++ vars->flow_ctrl &= ~BNX2X_FLOW_CTRL_TX;
++
++ if (vars->link_status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED)
++ vars->flow_ctrl |= BNX2X_FLOW_CTRL_RX;
++ else
++ vars->flow_ctrl &= ~BNX2X_FLOW_CTRL_RX;
++
++ if (vars->phy_flags & PHY_XGXS_FLAG) {
++ if (vars->line_speed &&
++ ((vars->line_speed == SPEED_10) ||
++ (vars->line_speed == SPEED_100))) {
++ vars->phy_flags |= PHY_SGMII_FLAG;
++ } else {
++ vars->phy_flags &= ~PHY_SGMII_FLAG;
++ }
++ }
++
++ /* anything 10 and over uses the bmac */
++ link_10g = ((vars->line_speed == SPEED_10000) ||
++ (vars->line_speed == SPEED_12000) ||
++ (vars->line_speed == SPEED_12500) ||
++ (vars->line_speed == SPEED_13000) ||
++ (vars->line_speed == SPEED_15000) ||
++ (vars->line_speed == SPEED_16000));
++ if (link_10g)
++ vars->mac_type = MAC_TYPE_BMAC;
++ else
++ vars->mac_type = MAC_TYPE_EMAC;
++
++ } else { /* link down */
++ DP(NETIF_MSG_LINK, "phy link down\n");
++
++ vars->phy_link_up = 0;
++
++ vars->line_speed = 0;
++ vars->duplex = DUPLEX_FULL;
++ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
++
++ /* indicate no mac active */
++ vars->mac_type = MAC_TYPE_NONE;
++ }
++
++ DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x\n",
++ vars->link_status, vars->phy_link_up);
++ DP(NETIF_MSG_LINK, "line_speed %x duplex %x flow_ctrl 0x%x\n",
++ vars->line_speed, vars->duplex, vars->flow_ctrl);
++}
++
++static void bnx2x_update_mng(struct link_params *params, u32 link_status)
++{
++ struct bnx2x *bp = params->bp;
++ REG_WR(bp, params->shmem_base +
++ offsetof(struct shmem_region,
++ port_mb[params->port].link_status),
++ link_status);
++}
++
++static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
++{
++ u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
++ NIG_REG_INGRESS_BMAC0_MEM;
++ u32 wb_data[2];
++ u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4);
++
++ /* Only if the bmac is out of reset */
++ if (REG_RD(bp, MISC_REG_RESET_REG_2) &
++ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port) &&
++ nig_bmac_enable) {
++
++ /* Clear Rx Enable bit in BMAC_CONTROL register */
++ REG_RD_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
++ wb_data, 2);
++ wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
++ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
++ wb_data, 2);
++
++ msleep(1);
++ }
++}
++
++static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
++ u32 line_speed)
++{
++ struct bnx2x *bp = params->bp;
++ u8 port = params->port;
++ u32 init_crd, crd;
++ u32 count = 1000;
++
++ /* disable port */
++ REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
++
++ /* wait for init credit */
++ init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4);
++ crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
++ DP(NETIF_MSG_LINK, "init_crd 0x%x crd 0x%x\n", init_crd, crd);
++
++ while ((init_crd != crd) && count) {
++ msleep(5);
++
++ crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
++ count--;
++ }
++ crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
++ if (init_crd != crd) {
++ DP(NETIF_MSG_LINK, "BUG! init_crd 0x%x != crd 0x%x\n",
++ init_crd, crd);
++ return -EINVAL;
++ }
++
++ if (flow_ctrl & BNX2X_FLOW_CTRL_RX ||
++ line_speed == SPEED_10 ||
++ line_speed == SPEED_100 ||
++ line_speed == SPEED_1000 ||
++ line_speed == SPEED_2500) {
++ REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 1);
++ /* update threshold */
++ REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
++ /* update init credit */
++ init_crd = 778; /* (800-18-4) */
++
++ } else {
++ u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE +
++ ETH_OVREHEAD)/16;
++ REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
++ /* update threshold */
++ REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
++ /* update init credit */
++ switch (line_speed) {
++ case SPEED_10000:
++ init_crd = thresh + 553 - 22;
++ break;
++
++ case SPEED_12000:
++ init_crd = thresh + 664 - 22;
++ break;
++
++ case SPEED_13000:
++ init_crd = thresh + 742 - 22;
++ break;
++
++ case SPEED_16000:
++ init_crd = thresh + 778 - 22;
++ break;
++ default:
++ DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
++ line_speed);
++ return -EINVAL;
++ break;
++ }
++ }
++ REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, init_crd);
++ DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n",
++ line_speed, init_crd);
++
++ /* probe the credit changes */
++ REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1);
++ msleep(5);
++ REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0);
++
++ /* enable port */
++ REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0);
++ return 0;
++}
++
++static u32 bnx2x_get_emac_base(u32 ext_phy_type, u8 port)
++{
++ u32 emac_base;
++ switch (ext_phy_type) {
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
++ emac_base = GRCBASE_EMAC0;
++ break;
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
++ emac_base = (port) ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
++ break;
++ default:
++ emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
++ break;
++ }
++ return emac_base;
++
++}
++
++u8 bnx2x_cl45_write(struct bnx2x *bp, u8 port, u32 ext_phy_type,
++ u8 phy_addr, u8 devad, u16 reg, u16 val)
++{
++ u32 tmp, saved_mode;
++ u8 i, rc = 0;
++ u32 mdio_ctrl = bnx2x_get_emac_base(ext_phy_type, port);
++
++ /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
++ * (a value of 49==0x31) and make sure that the AUTO poll is off
++ */
++ saved_mode = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
++ tmp = saved_mode & ~(EMAC_MDIO_MODE_AUTO_POLL |
++ EMAC_MDIO_MODE_CLOCK_CNT);
++ tmp |= (EMAC_MDIO_MODE_CLAUSE_45 |
++ (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
++ REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
++ REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
++ udelay(40);
++
++ /* address */
++
++ tmp = ((phy_addr << 21) | (devad << 16) | reg |
++ EMAC_MDIO_COMM_COMMAND_ADDRESS |
++ EMAC_MDIO_COMM_START_BUSY);
++ REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
++
++ for (i = 0; i < 50; i++) {
++ udelay(10);
++
++ tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
++ if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
++ udelay(5);
++ break;
++ }
++ }
++ if (tmp & EMAC_MDIO_COMM_START_BUSY) {
++ DP(NETIF_MSG_LINK, "write phy register failed\n");
++ rc = -EFAULT;
++ } else {
++ /* data */
++ tmp = ((phy_addr << 21) | (devad << 16) | val |
++ EMAC_MDIO_COMM_COMMAND_WRITE_45 |
++ EMAC_MDIO_COMM_START_BUSY);
++ REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
++
++ for (i = 0; i < 50; i++) {
++ udelay(10);
++
++ tmp = REG_RD(bp, mdio_ctrl +
++ EMAC_REG_EMAC_MDIO_COMM);
++ if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
++ udelay(5);
++ break;
++ }
++ }
++ if (tmp & EMAC_MDIO_COMM_START_BUSY) {
++ DP(NETIF_MSG_LINK, "write phy register failed\n");
++ rc = -EFAULT;
++ }
++ }
++
++ /* Restore the saved mode */
++ REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, saved_mode);
++
++ return rc;
++}
++
++u8 bnx2x_cl45_read(struct bnx2x *bp, u8 port, u32 ext_phy_type,
++ u8 phy_addr, u8 devad, u16 reg, u16 *ret_val)
++{
++ u32 val, saved_mode;
++ u16 i;
++ u8 rc = 0;
++
++ u32 mdio_ctrl = bnx2x_get_emac_base(ext_phy_type, port);
++ /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
++ * (a value of 49==0x31) and make sure that the AUTO poll is off
++ */
++ saved_mode = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
++ val = saved_mode & ((EMAC_MDIO_MODE_AUTO_POLL |
++ EMAC_MDIO_MODE_CLOCK_CNT));
++ val |= (EMAC_MDIO_MODE_CLAUSE_45 |
++ (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
++ REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
++ REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
++ udelay(40);
++
++ /* address */
++ val = ((phy_addr << 21) | (devad << 16) | reg |
++ EMAC_MDIO_COMM_COMMAND_ADDRESS |
++ EMAC_MDIO_COMM_START_BUSY);
++ REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
++
++ for (i = 0; i < 50; i++) {
++ udelay(10);
++
++ val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
++ if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
++ udelay(5);
++ break;
++ }
++ }
++ if (val & EMAC_MDIO_COMM_START_BUSY) {
++ DP(NETIF_MSG_LINK, "read phy register failed\n");
++
++ *ret_val = 0;
++ rc = -EFAULT;
++
++ } else {
++ /* data */
++ val = ((phy_addr << 21) | (devad << 16) |
++ EMAC_MDIO_COMM_COMMAND_READ_45 |
++ EMAC_MDIO_COMM_START_BUSY);
++ REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
++
++ for (i = 0; i < 50; i++) {
++ udelay(10);
++
++ val = REG_RD(bp, mdio_ctrl +
++ EMAC_REG_EMAC_MDIO_COMM);
++ if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
++ *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
++ break;
++ }
++ }
++ if (val & EMAC_MDIO_COMM_START_BUSY) {
++ DP(NETIF_MSG_LINK, "read phy register failed\n");
++
++ *ret_val = 0;
++ rc = -EFAULT;
++ }
++ }
++
++ /* Restore the saved mode */
++ REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, saved_mode);
++
++ return rc;
++}
++
++static void bnx2x_set_aer_mmd(struct link_params *params,
++ struct link_vars *vars)
++{
++ struct bnx2x *bp = params->bp;
++ u32 ser_lane;
++ u16 offset;
++
++ ser_lane = ((params->lane_config &
++ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
++ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
++
++ offset = (vars->phy_flags & PHY_XGXS_FLAG) ?
++ (params->phy_addr + ser_lane) : 0;
++
++ CL45_WR_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_AER_BLOCK,
++ MDIO_AER_BLOCK_AER_REG, 0x3800 + offset);
++}
++
++static void bnx2x_set_master_ln(struct link_params *params)
++{
++ struct bnx2x *bp = params->bp;
++ u16 new_master_ln, ser_lane;
++ ser_lane = ((params->lane_config &
++ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
++ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
++
++ /* set the master_ln for AN */
++ CL45_RD_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_XGXS_BLOCK2,
++ MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
++ &new_master_ln);
++
++ CL45_WR_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_XGXS_BLOCK2 ,
++ MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
++ (new_master_ln | ser_lane));
++}
++
++static u8 bnx2x_reset_unicore(struct link_params *params)
++{
++ struct bnx2x *bp = params->bp;
++ u16 mii_control;
++ u16 i;
++
++ CL45_RD_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_COMBO_IEEE0,
++ MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
++
++ /* reset the unicore */
++ CL45_WR_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_COMBO_IEEE0,
++ MDIO_COMBO_IEEE0_MII_CONTROL,
++ (mii_control |
++ MDIO_COMBO_IEEO_MII_CONTROL_RESET));
++
++ /* wait for the reset to self clear */
++ for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
++ udelay(5);
++
++ /* the reset erased the previous bank value */
++ CL45_RD_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_COMBO_IEEE0,
++ MDIO_COMBO_IEEE0_MII_CONTROL,
++ &mii_control);
++
++ if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
++ udelay(5);
++ return 0;
++ }
++ }
++
++ DP(NETIF_MSG_LINK, "BUG! XGXS is still in reset!\n");
++ return -EINVAL;
++
++}
++
++static void bnx2x_set_swap_lanes(struct link_params *params)
++{
++ struct bnx2x *bp = params->bp;
++ /* Each two bits represents a lane number:
++ No swap is 0123 => 0x1b no need to enable the swap */
++ u16 ser_lane, rx_lane_swap, tx_lane_swap;
++
++ ser_lane = ((params->lane_config &
++ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
++ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
++ rx_lane_swap = ((params->lane_config &
++ PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
++ PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
++ tx_lane_swap = ((params->lane_config &
++ PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
++ PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
++
++ if (rx_lane_swap != 0x1b) {
++ CL45_WR_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_XGXS_BLOCK2,
++ MDIO_XGXS_BLOCK2_RX_LN_SWAP,
++ (rx_lane_swap |
++ MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
++ MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
++ } else {
++ CL45_WR_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_XGXS_BLOCK2,
++ MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
++ }
++
++ if (tx_lane_swap != 0x1b) {
++ CL45_WR_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_XGXS_BLOCK2,
++ MDIO_XGXS_BLOCK2_TX_LN_SWAP,
++ (tx_lane_swap |
++ MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
++ } else {
++ CL45_WR_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_XGXS_BLOCK2,
++ MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
++ }
++}
++
++static void bnx2x_set_parallel_detection(struct link_params *params,
++ u8 phy_flags)
++{
++ struct bnx2x *bp = params->bp;
++ u16 control2;
++
++ CL45_RD_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_SERDES_DIGITAL,
++ MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
++ &control2);
++
++
++ control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
++
++
++ CL45_WR_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_SERDES_DIGITAL,
++ MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
++ control2);
++
++ if (phy_flags & PHY_XGXS_FLAG) {
++ DP(NETIF_MSG_LINK, "XGXS\n");
++
++ CL45_WR_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_10G_PARALLEL_DETECT,
++ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
++ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
++
++ CL45_RD_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_10G_PARALLEL_DETECT,
++ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
++ &control2);
++
++
++ control2 |=
++ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
++
++ CL45_WR_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_10G_PARALLEL_DETECT,
++ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
++ control2);
++
++ /* Disable parallel detection of HiG */
++ CL45_WR_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_XGXS_BLOCK2,
++ MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
++ MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
++ MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
++ }
++}
++
++static void bnx2x_set_autoneg(struct link_params *params,
++ struct link_vars *vars)
++{
++ struct bnx2x *bp = params->bp;
++ u16 reg_val;
++
++ /* CL37 Autoneg */
++
++ CL45_RD_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_COMBO_IEEE0,
++ MDIO_COMBO_IEEE0_MII_CONTROL, ®_val);
++
++ /* CL37 Autoneg Enabled */
++ if (vars->line_speed == SPEED_AUTO_NEG)
++ reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN;
++ else /* CL37 Autoneg Disabled */
++ reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
++ MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
++
++ CL45_WR_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_COMBO_IEEE0,
++ MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
++
++ /* Enable/Disable Autodetection */
++
++ CL45_RD_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_SERDES_DIGITAL,
++ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, ®_val);
++ reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN;
++ if (vars->line_speed == SPEED_AUTO_NEG)
++ reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
++ else
++ reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
++
++ CL45_WR_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_SERDES_DIGITAL,
++ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
++
++ /* Enable TetonII and BAM autoneg */
++ CL45_RD_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_BAM_NEXT_PAGE,
++ MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
++ ®_val);
++ if (vars->line_speed == SPEED_AUTO_NEG) {
++ /* Enable BAM aneg Mode and TetonII aneg Mode */
++ reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
++ MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
++ } else {
++ /* TetonII and BAM Autoneg Disabled */
++ reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
++ MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
++ }
++ CL45_WR_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_BAM_NEXT_PAGE,
++ MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
++ reg_val);
++
++ /* Enable Clause 73 Aneg */
++ if ((vars->line_speed == SPEED_AUTO_NEG) &&
++ (SUPPORT_CL73)) {
++ /* Enable BAM Station Manager */
++
++ CL45_WR_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_CL73_USERB0,
++ MDIO_CL73_USERB0_CL73_BAM_CTRL1,
++ (MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
++ MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN |
++ MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN));
++
++ /* Merge CL73 and CL37 aneg resolution */
++ CL45_RD_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_CL73_USERB0,
++ MDIO_CL73_USERB0_CL73_BAM_CTRL3,
++ ®_val);
++
++ CL45_WR_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_CL73_USERB0,
++ MDIO_CL73_USERB0_CL73_BAM_CTRL3,
++ (reg_val |
++ MDIO_CL73_USERB0_CL73_BAM_CTRL3_USE_CL73_HCD_MR));
++
++ /* Set the CL73 AN speed */
++
++ CL45_RD_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_CL73_IEEEB1,
++ MDIO_CL73_IEEEB1_AN_ADV2, ®_val);
++ /* In the SerDes we support only the 1G.
++ In the XGXS we support the 10G KX4
++ but we currently do not support the KR */
++ if (vars->phy_flags & PHY_XGXS_FLAG) {
++ DP(NETIF_MSG_LINK, "XGXS\n");
++ /* 10G KX4 */
++ reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
++ } else {
++ DP(NETIF_MSG_LINK, "SerDes\n");
++ /* 1000M KX */
++ reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
++ }
++ CL45_WR_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_CL73_IEEEB1,
++ MDIO_CL73_IEEEB1_AN_ADV2, reg_val);
++
++ /* CL73 Autoneg Enabled */
++ reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
++ } else {
++ /* CL73 Autoneg Disabled */
++ reg_val = 0;
++ }
++ CL45_WR_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_CL73_IEEEB0,
++ MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
++}
++
++/* program SerDes, forced speed */
++static void bnx2x_program_serdes(struct link_params *params,
++ struct link_vars *vars)
++{
++ struct bnx2x *bp = params->bp;
++ u16 reg_val;
++
++ /* program duplex, disable autoneg */
++
++ CL45_RD_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_COMBO_IEEE0,
++ MDIO_COMBO_IEEE0_MII_CONTROL, ®_val);
++ reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
++ MDIO_COMBO_IEEO_MII_CONTROL_AN_EN);
++ if (params->req_duplex == DUPLEX_FULL)
++ reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
++ CL45_WR_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_COMBO_IEEE0,
++ MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
++
++ /* program speed
++ - needed only if the speed is greater than 1G (2.5G or 10G) */
++ CL45_RD_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_SERDES_DIGITAL,
++ MDIO_SERDES_DIGITAL_MISC1, ®_val);
++ /* clearing the speed value before setting the right speed */
++ DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val);
++
++ reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK |
++ MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
++
++ if (!((vars->line_speed == SPEED_1000) ||
++ (vars->line_speed == SPEED_100) ||
++ (vars->line_speed == SPEED_10))) {
++
++ reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M |
++ MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
++ if (vars->line_speed == SPEED_10000)
++ reg_val |=
++ MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4;
++ if (vars->line_speed == SPEED_13000)
++ reg_val |=
++ MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G;
++ }
++
++ CL45_WR_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_SERDES_DIGITAL,
++ MDIO_SERDES_DIGITAL_MISC1, reg_val);
++
++}
++
++static void bnx2x_set_brcm_cl37_advertisment(struct link_params *params)
++{
++ struct bnx2x *bp = params->bp;
++ u16 val = 0;
++
++ /* configure the 48 bits for BAM AN */
++
++ /* set extended capabilities */
++ if (params->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)
++ val |= MDIO_OVER_1G_UP1_2_5G;
++ if (params->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
++ val |= MDIO_OVER_1G_UP1_10G;
++ CL45_WR_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_OVER_1G,
++ MDIO_OVER_1G_UP1, val);
++
++ CL45_WR_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_OVER_1G,
++ MDIO_OVER_1G_UP3, 0);
++}
++
++static void bnx2x_calc_ieee_aneg_adv(struct link_params *params, u32 *ieee_fc)
++{
++ *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
++ /* resolve pause mode and advertisement
++ * Please refer to Table 28B-3 of the 802.3ab-1999 spec */
++
++ switch (params->req_flow_ctrl) {
++ case BNX2X_FLOW_CTRL_AUTO:
++ if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) {
++ *ieee_fc |=
++ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
++ } else {
++ *ieee_fc |=
++ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
++ }
++ break;
++ case BNX2X_FLOW_CTRL_TX:
++ *ieee_fc |=
++ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
++ break;
++
++ case BNX2X_FLOW_CTRL_RX:
++ case BNX2X_FLOW_CTRL_BOTH:
++ *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
++ break;
++
++ case BNX2X_FLOW_CTRL_NONE:
++ default:
++ *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
++ break;
++ }
++}
++
++static void bnx2x_set_ieee_aneg_advertisment(struct link_params *params,
++ u32 ieee_fc)
++{
++ struct bnx2x *bp = params->bp;
++ /* for AN, we are always publishing full duplex */
++
++ CL45_WR_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_COMBO_IEEE0,
++ MDIO_COMBO_IEEE0_AUTO_NEG_ADV, (u16)ieee_fc);
++}
++
++static void bnx2x_restart_autoneg(struct link_params *params)
++{
++ struct bnx2x *bp = params->bp;
++ DP(NETIF_MSG_LINK, "bnx2x_restart_autoneg\n");
++ if (SUPPORT_CL73) {
++ /* enable and restart clause 73 aneg */
++ u16 an_ctrl;
++
++ CL45_RD_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_CL73_IEEEB0,
++ MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
++ &an_ctrl);
++ CL45_WR_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_CL73_IEEEB0,
++ MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
++ (an_ctrl |
++ MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
++ MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
++
++ } else {
++ /* Enable and restart BAM/CL37 aneg */
++ u16 mii_control;
++
++ CL45_RD_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_COMBO_IEEE0,
++ MDIO_COMBO_IEEE0_MII_CONTROL,
++ &mii_control);
++ DP(NETIF_MSG_LINK,
++ "bnx2x_restart_autoneg mii_control before = 0x%x\n",
++ mii_control);
++ CL45_WR_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_COMBO_IEEE0,
++ MDIO_COMBO_IEEE0_MII_CONTROL,
++ (mii_control |
++ MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
++ MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
++ }
++}
++
++static void bnx2x_initialize_sgmii_process(struct link_params *params,
++ struct link_vars *vars)
++{
++ struct bnx2x *bp = params->bp;
++ u16 control1;
++
++ /* in SGMII mode, the unicore is always slave */
++
++ CL45_RD_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_SERDES_DIGITAL,
++ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
++ &control1);
++ control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
++ /* set sgmii mode (and not fiber) */
++ control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
++ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
++ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
++ CL45_WR_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_SERDES_DIGITAL,
++ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
++ control1);
++
++ /* if forced speed */
++ if (!(vars->line_speed == SPEED_AUTO_NEG)) {
++ /* set speed, disable autoneg */
++ u16 mii_control;
++
++ CL45_RD_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_COMBO_IEEE0,
++ MDIO_COMBO_IEEE0_MII_CONTROL,
++ &mii_control);
++ mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
++ MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK|
++ MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
++
++ switch (vars->line_speed) {
++ case SPEED_100:
++ mii_control |=
++ MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100;
++ break;
++ case SPEED_1000:
++ mii_control |=
++ MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000;
++ break;
++ case SPEED_10:
++ /* there is nothing to set for 10M */
++ break;
++ default:
++ /* invalid speed for SGMII */
++ DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
++ vars->line_speed);
++ break;
++ }
++
++ /* setting the full duplex */
++ if (params->req_duplex == DUPLEX_FULL)
++ mii_control |=
++ MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
++ CL45_WR_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_COMBO_IEEE0,
++ MDIO_COMBO_IEEE0_MII_CONTROL,
++ mii_control);
++
++ } else { /* AN mode */
++ /* enable and restart AN */
++ bnx2x_restart_autoneg(params);
++ }
++}
++
++
++/*
++ * link management
++ */
++
++static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
++{ /* LD LP */
++ switch (pause_result) { /* ASYM P ASYM P */
++ case 0xb: /* 1 0 1 1 */
++ vars->flow_ctrl = BNX2X_FLOW_CTRL_TX;
++ break;
++
++ case 0xe: /* 1 1 1 0 */
++ vars->flow_ctrl = BNX2X_FLOW_CTRL_RX;
++ break;
++
++ case 0x5: /* 0 1 0 1 */
++ case 0x7: /* 0 1 1 1 */
++ case 0xd: /* 1 1 0 1 */
++ case 0xf: /* 1 1 1 1 */
++ vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
++ break;
++
++ default:
++ break;
++ }
++}
++
++static u8 bnx2x_ext_phy_resove_fc(struct link_params *params,
++ struct link_vars *vars)
++{
++ struct bnx2x *bp = params->bp;
++ u8 ext_phy_addr;
++ u16 ld_pause; /* local */
++ u16 lp_pause; /* link partner */
++ u16 an_complete; /* AN complete */
++ u16 pause_result;
++ u8 ret = 0;
++ u32 ext_phy_type;
++ u8 port = params->port;
++ ext_phy_addr = ((params->ext_phy_config &
++ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
++ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
++
++ ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
++ /* read twice */
++
++ bnx2x_cl45_read(bp, port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_AN_DEVAD,
++ MDIO_AN_REG_STATUS, &an_complete);
++ bnx2x_cl45_read(bp, port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_AN_DEVAD,
++ MDIO_AN_REG_STATUS, &an_complete);
++
++ if (an_complete & MDIO_AN_REG_STATUS_AN_COMPLETE) {
++ ret = 1;
++ bnx2x_cl45_read(bp, port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_AN_DEVAD,
++ MDIO_AN_REG_ADV_PAUSE, &ld_pause);
++ bnx2x_cl45_read(bp, port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_AN_DEVAD,
++ MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
++ pause_result = (ld_pause &
++ MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
++ pause_result |= (lp_pause &
++ MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
++ DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x \n",
++ pause_result);
++ bnx2x_pause_resolve(vars, pause_result);
++ if (vars->flow_ctrl == BNX2X_FLOW_CTRL_NONE &&
++ ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
++ bnx2x_cl45_read(bp, port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_AN_DEVAD,
++ MDIO_AN_REG_CL37_FC_LD, &ld_pause);
++
++ bnx2x_cl45_read(bp, port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_AN_DEVAD,
++ MDIO_AN_REG_CL37_FC_LP, &lp_pause);
++ pause_result = (ld_pause &
++ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 5;
++ pause_result |= (lp_pause &
++ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7;
++
++ bnx2x_pause_resolve(vars, pause_result);
++ DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x \n",
++ pause_result);
++ }
++ }
++ return ret;
++}
++
++
++static void bnx2x_flow_ctrl_resolve(struct link_params *params,
++ struct link_vars *vars,
++ u32 gp_status)
++{
++ struct bnx2x *bp = params->bp;
++ u16 ld_pause; /* local driver */
++ u16 lp_pause; /* link partner */
++ u16 pause_result;
++
++ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
++
++ /* resolve from gp_status in case of AN complete and not sgmii */
++ if ((params->req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
++ (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
++ (!(vars->phy_flags & PHY_SGMII_FLAG)) &&
++ (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)) {
++ CL45_RD_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_COMBO_IEEE0,
++ MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
++ &ld_pause);
++ CL45_RD_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_COMBO_IEEE0,
++ MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
++ &lp_pause);
++ pause_result = (ld_pause &
++ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
++ pause_result |= (lp_pause &
++ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
++ DP(NETIF_MSG_LINK, "pause_result 0x%x\n", pause_result);
++ bnx2x_pause_resolve(vars, pause_result);
++ } else if ((params->req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
++ (bnx2x_ext_phy_resove_fc(params, vars))) {
++ return;
++ } else {
++ if (params->req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
++ vars->flow_ctrl = params->req_fc_auto_adv;
++ else
++ vars->flow_ctrl = params->req_flow_ctrl;
++ }
++ DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", vars->flow_ctrl);
++}
++
++
++static u8 bnx2x_link_settings_status(struct link_params *params,
++ struct link_vars *vars,
++ u32 gp_status)
++{
++ struct bnx2x *bp = params->bp;
++ u16 new_line_speed;
++ u8 rc = 0;
++ vars->link_status = 0;
++
++ if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
++ DP(NETIF_MSG_LINK, "phy link up gp_status=0x%x\n",
++ gp_status);
++
++ vars->phy_link_up = 1;
++ vars->link_status |= LINK_STATUS_LINK_UP;
++
++ if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS)
++ vars->duplex = DUPLEX_FULL;
++ else
++ vars->duplex = DUPLEX_HALF;
++
++ bnx2x_flow_ctrl_resolve(params, vars, gp_status);
++
++ switch (gp_status & GP_STATUS_SPEED_MASK) {
++ case GP_STATUS_10M:
++ new_line_speed = SPEED_10;
++ if (vars->duplex == DUPLEX_FULL)
++ vars->link_status |= LINK_10TFD;
++ else
++ vars->link_status |= LINK_10THD;
++ break;
++
++ case GP_STATUS_100M:
++ new_line_speed = SPEED_100;
++ if (vars->duplex == DUPLEX_FULL)
++ vars->link_status |= LINK_100TXFD;
++ else
++ vars->link_status |= LINK_100TXHD;
++ break;
++
++ case GP_STATUS_1G:
++ case GP_STATUS_1G_KX:
++ new_line_speed = SPEED_1000;
++ if (vars->duplex == DUPLEX_FULL)
++ vars->link_status |= LINK_1000TFD;
++ else
++ vars->link_status |= LINK_1000THD;
++ break;
++
++ case GP_STATUS_2_5G:
++ new_line_speed = SPEED_2500;
++ if (vars->duplex == DUPLEX_FULL)
++ vars->link_status |= LINK_2500TFD;
++ else
++ vars->link_status |= LINK_2500THD;
++ break;
++
++ case GP_STATUS_5G:
++ case GP_STATUS_6G:
++ DP(NETIF_MSG_LINK,
++ "link speed unsupported gp_status 0x%x\n",
++ gp_status);
++ return -EINVAL;
++ break;
++ case GP_STATUS_10G_KX4:
++ case GP_STATUS_10G_HIG:
++ case GP_STATUS_10G_CX4:
++ new_line_speed = SPEED_10000;
++ vars->link_status |= LINK_10GTFD;
++ break;
++
++ case GP_STATUS_12G_HIG:
++ new_line_speed = SPEED_12000;
++ vars->link_status |= LINK_12GTFD;
++ break;
++
++ case GP_STATUS_12_5G:
++ new_line_speed = SPEED_12500;
++ vars->link_status |= LINK_12_5GTFD;
++ break;
++
++ case GP_STATUS_13G:
++ new_line_speed = SPEED_13000;
++ vars->link_status |= LINK_13GTFD;
++ break;
++
++ case GP_STATUS_15G:
++ new_line_speed = SPEED_15000;
++ vars->link_status |= LINK_15GTFD;
++ break;
++
++ case GP_STATUS_16G:
++ new_line_speed = SPEED_16000;
++ vars->link_status |= LINK_16GTFD;
++ break;
++
++ default:
++ DP(NETIF_MSG_LINK,
++ "link speed unsupported gp_status 0x%x\n",
++ gp_status);
++ return -EINVAL;
++ break;
++ }
++
++ /* Upon link speed change set the NIG into drain mode.
++ Comes to deals with possible FIFO glitch due to clk change
++ when speed is decreased without link down indicator */
++ if (new_line_speed != vars->line_speed) {
++ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE
++ + params->port*4, 0);
++ msleep(1);
++ }
++ vars->line_speed = new_line_speed;
++ vars->link_status |= LINK_STATUS_SERDES_LINK;
++
++ if ((params->req_line_speed == SPEED_AUTO_NEG) &&
++ ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ||
++ (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705))) {
++ vars->autoneg = AUTO_NEG_ENABLED;
++
++ if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
++ vars->autoneg |= AUTO_NEG_COMPLETE;
++ vars->link_status |=
++ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
++ }
++
++ vars->autoneg |= AUTO_NEG_PARALLEL_DETECTION_USED;
++ vars->link_status |=
++ LINK_STATUS_PARALLEL_DETECTION_USED;
++
++ }
++ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
++ vars->link_status |=
++ LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
++
++ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
++ vars->link_status |=
++ LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
++
++ } else { /* link_down */
++ DP(NETIF_MSG_LINK, "phy link down\n");
++
++ vars->phy_link_up = 0;
++
++ vars->duplex = DUPLEX_FULL;
++ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
++ vars->autoneg = AUTO_NEG_DISABLED;
++ vars->mac_type = MAC_TYPE_NONE;
++ }
++
++ DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %x line_speed %x \n",
++ gp_status, vars->phy_link_up, vars->line_speed);
++ DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x"
++ " autoneg 0x%x\n",
++ vars->duplex,
++ vars->flow_ctrl, vars->autoneg);
++ DP(NETIF_MSG_LINK, "link_status 0x%x\n", vars->link_status);
++
++ return rc;
++}
++
++static void bnx2x_set_sgmii_tx_driver(struct link_params *params)
++{
++ struct bnx2x *bp = params->bp;
++ u16 lp_up2;
++ u16 tx_driver;
++
++ /* read precomp */
++
++ CL45_RD_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_OVER_1G,
++ MDIO_OVER_1G_LP_UP2, &lp_up2);
++
++ CL45_RD_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_TX0,
++ MDIO_TX0_TX_DRIVER, &tx_driver);
++
++ /* bits [10:7] at lp_up2, positioned at [15:12] */
++ lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
++ MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) <<
++ MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT);
++
++ if ((lp_up2 != 0) &&
++ (lp_up2 != (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK))) {
++ /* replace tx_driver bits [15:12] */
++ tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
++ tx_driver |= lp_up2;
++ CL45_WR_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_TX0,
++ MDIO_TX0_TX_DRIVER, tx_driver);
++ }
++}
++
++static u8 bnx2x_emac_program(struct link_params *params,
++ u32 line_speed, u32 duplex)
++{
++ struct bnx2x *bp = params->bp;
++ u8 port = params->port;
++ u16 mode = 0;
++
++ DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
++ bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 +
++ EMAC_REG_EMAC_MODE,
++ (EMAC_MODE_25G_MODE |
++ EMAC_MODE_PORT_MII_10M |
++ EMAC_MODE_HALF_DUPLEX));
++ switch (line_speed) {
++ case SPEED_10:
++ mode |= EMAC_MODE_PORT_MII_10M;
++ break;
++
++ case SPEED_100:
++ mode |= EMAC_MODE_PORT_MII;
++ break;
++
++ case SPEED_1000:
++ mode |= EMAC_MODE_PORT_GMII;
++ break;
++
++ case SPEED_2500:
++ mode |= (EMAC_MODE_25G_MODE | EMAC_MODE_PORT_GMII);
++ break;
++
++ default:
++ /* 10G not valid for EMAC */
++ DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n", line_speed);
++ return -EINVAL;
++ }
++
++ if (duplex == DUPLEX_HALF)
++ mode |= EMAC_MODE_HALF_DUPLEX;
++ bnx2x_bits_en(bp,
++ GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
++ mode);
++
++ bnx2x_set_led(bp, params->port, LED_MODE_OPER,
++ line_speed, params->hw_led_mode, params->chip_id);
++ return 0;
++}
++
++/*****************************************************************************/
++/* External Phy section */
++/*****************************************************************************/
++static void bnx2x_hw_reset(struct bnx2x *bp, u8 port)
++{
++ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
++ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
++ msleep(1);
++ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
++ MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
++}
++
++static void bnx2x_ext_phy_reset(struct link_params *params,
++ struct link_vars *vars)
++{
++ struct bnx2x *bp = params->bp;
++ u32 ext_phy_type;
++ u8 ext_phy_addr = ((params->ext_phy_config &
++ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
++ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
++ DP(NETIF_MSG_LINK, "Port %x: bnx2x_ext_phy_reset\n", params->port);
++ ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
++ /* The PHY reset is controled by GPIO 1
++ * Give it 1ms of reset pulse
++ */
++ if (vars->phy_flags & PHY_XGXS_FLAG) {
++
++ switch (ext_phy_type) {
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
++ DP(NETIF_MSG_LINK, "XGXS Direct\n");
++ break;
++
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
++ DP(NETIF_MSG_LINK, "XGXS 8705/8706\n");
++
++ /* Restore normal power mode*/
++ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
++ MISC_REGISTERS_GPIO_OUTPUT_HIGH,
++ params->port);
++
++ /* HW reset */
++ bnx2x_hw_reset(bp, params->port);
++
++ bnx2x_cl45_write(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_CTRL, 0xa040);
++ break;
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
++ /* Unset Low Power Mode and SW reset */
++ /* Restore normal power mode*/
++ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
++ MISC_REGISTERS_GPIO_OUTPUT_HIGH,
++ params->port);
++
++ DP(NETIF_MSG_LINK, "XGXS 8072\n");
++ bnx2x_cl45_write(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_CTRL,
++ 1<<15);
++ break;
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
++ {
++ u16 emac_base;
++ emac_base = (params->port) ? GRCBASE_EMAC0 :
++ GRCBASE_EMAC1;
++
++ /* Restore normal power mode*/
++ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
++ MISC_REGISTERS_GPIO_OUTPUT_HIGH,
++ params->port);
++
++ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
++ MISC_REGISTERS_GPIO_OUTPUT_HIGH,
++ params->port);
++
++ DP(NETIF_MSG_LINK, "XGXS 8073\n");
++ }
++ break;
++
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
++ DP(NETIF_MSG_LINK, "XGXS SFX7101\n");
++
++ /* Restore normal power mode*/
++ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
++ MISC_REGISTERS_GPIO_OUTPUT_HIGH,
++ params->port);
++
++ /* HW reset */
++ bnx2x_hw_reset(bp, params->port);
++
++ break;
++
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
++ DP(NETIF_MSG_LINK, "XGXS PHY Failure detected\n");
++ break;
++
++ default:
++ DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
++ params->ext_phy_config);
++ break;
++ }
++
++ } else { /* SerDes */
++ ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config);
++ switch (ext_phy_type) {
++ case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
++ DP(NETIF_MSG_LINK, "SerDes Direct\n");
++ break;
++
++ case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
++ DP(NETIF_MSG_LINK, "SerDes 5482\n");
++ bnx2x_hw_reset(bp, params->port);
++ break;
++
++ default:
++ DP(NETIF_MSG_LINK,
++ "BAD SerDes ext_phy_config 0x%x\n",
++ params->ext_phy_config);
++ break;
++ }
++ }
++}
++
++static void bnx2x_bcm8072_external_rom_boot(struct link_params *params)
++{
++ struct bnx2x *bp = params->bp;
++ u8 port = params->port;
++ u8 ext_phy_addr = ((params->ext_phy_config &
++ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
++ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
++ u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
++ u16 fw_ver1, fw_ver2;
++
++ /* Need to wait 200ms after reset */
++ msleep(200);
++ /* Boot port from external ROM
++ * Set ser_boot_ctl bit in the MISC_CTRL1 register
++ */
++ bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_MISC_CTRL1, 0x0001);
++
++ /* Reset internal microprocessor */
++ bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_GEN_CTRL,
++ MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
++ /* set micro reset = 0 */
++ bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_GEN_CTRL,
++ MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
++ /* Reset internal microprocessor */
++ bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_GEN_CTRL,
++ MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
++ /* wait for 100ms for code download via SPI port */
++ msleep(100);
++
++ /* Clear ser_boot_ctl bit */
++ bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_MISC_CTRL1, 0x0000);
++ /* Wait 100ms */
++ msleep(100);
++
++ /* Print the PHY FW version */
++ bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_ROM_VER1, &fw_ver1);
++ bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_ROM_VER2, &fw_ver2);
++ DP(NETIF_MSG_LINK, "8072 FW version 0x%x:0x%x\n", fw_ver1, fw_ver2);
++}
++
++static u8 bnx2x_8073_is_snr_needed(struct link_params *params)
++{
++ /* This is only required for 8073A1, version 102 only */
++
++ struct bnx2x *bp = params->bp;
++ u8 ext_phy_addr = ((params->ext_phy_config &
++ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
++ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
++ u16 val;
++
++ /* Read 8073 HW revision*/
++ bnx2x_cl45_read(bp, params->port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ 0xc801, &val);
++
++ if (val != 1) {
++ /* No need to workaround in 8073 A1 */
++ return 0;
++ }
++
++ bnx2x_cl45_read(bp, params->port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_ROM_VER2, &val);
++
++ /* SNR should be applied only for version 0x102 */
++ if (val != 0x102)
++ return 0;
++
++ return 1;
++}
++
++static u8 bnx2x_bcm8073_xaui_wa(struct link_params *params)
++{
++ struct bnx2x *bp = params->bp;
++ u8 ext_phy_addr = ((params->ext_phy_config &
++ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
++ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
++ u16 val, cnt, cnt1 ;
++
++ bnx2x_cl45_read(bp, params->port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ 0xc801, &val);
++
++ if (val > 0) {
++ /* No need to workaround in 8073 A1 */
++ return 0;
++ }
++ /* XAUI workaround in 8073 A0: */
++
++ /* After loading the boot ROM and restarting Autoneg,
++ poll Dev1, Reg $C820: */
++
++ for (cnt = 0; cnt < 1000; cnt++) {
++ bnx2x_cl45_read(bp, params->port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ 0xc820, &val);
++ /* If bit [14] = 0 or bit [13] = 0, continue on with
++ system initialization (XAUI work-around not required,
++ as these bits indicate 2.5G or 1G link up). */
++ if (!(val & (1<<14)) || !(val & (1<<13))) {
++ DP(NETIF_MSG_LINK, "XAUI work-around not required\n");
++ return 0;
++ } else if (!(val & (1<<15))) {
++ DP(NETIF_MSG_LINK, "clc bit 15 went off\n");
++ /* If bit 15 is 0, then poll Dev1, Reg $C841 until
++ it's MSB (bit 15) goes to 1 (indicating that the
++ XAUI workaround has completed),
++ then continue on with system initialization.*/
++ for (cnt1 = 0; cnt1 < 1000; cnt1++) {
++ bnx2x_cl45_read(bp, params->port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ 0xc841, &val);
++ if (val & (1<<15)) {
++ DP(NETIF_MSG_LINK,
++ "XAUI workaround has completed\n");
++ return 0;
++ }
++ msleep(3);
++ }
++ break;
++ }
++ msleep(3);
++ }
++ DP(NETIF_MSG_LINK, "Warning: XAUI work-around timeout !!!\n");
++ return -EINVAL;
++
++}
++
++static void bnx2x_bcm8073_external_rom_boot(struct bnx2x *bp, u8 port,
++ u8 ext_phy_addr)
++{
++ u16 fw_ver1, fw_ver2;
++ /* Boot port from external ROM */
++ /* EDC grst */
++ bnx2x_cl45_write(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_GEN_CTRL,
++ 0x0001);
++
++ /* ucode reboot and rst */
++ bnx2x_cl45_write(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_GEN_CTRL,
++ 0x008c);
++
++ bnx2x_cl45_write(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_MISC_CTRL1, 0x0001);
++
++ /* Reset internal microprocessor */
++ bnx2x_cl45_write(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_GEN_CTRL,
++ MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
++
++ /* Release srst bit */
++ bnx2x_cl45_write(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_GEN_CTRL,
++ MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
++
++ /* wait for 100ms for code download via SPI port */
++ msleep(100);
++
++ /* Clear ser_boot_ctl bit */
++ bnx2x_cl45_write(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_MISC_CTRL1, 0x0000);
++
++ bnx2x_cl45_read(bp, port, PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_ROM_VER1, &fw_ver1);
++ bnx2x_cl45_read(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_ROM_VER2, &fw_ver2);
++ DP(NETIF_MSG_LINK, "8073 FW version 0x%x:0x%x\n", fw_ver1, fw_ver2);
++
++}
++
++static void bnx2x_bcm807x_force_10G(struct link_params *params)
++{
++ struct bnx2x *bp = params->bp;
++ u8 port = params->port;
++ u8 ext_phy_addr = ((params->ext_phy_config &
++ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
++ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
++ u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
++
++ /* Force KR or KX */
++ bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_CTRL,
++ 0x2040);
++ bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_10G_CTRL2,
++ 0x000b);
++ bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_BCM_CTRL,
++ 0x0000);
++ bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
++ MDIO_AN_DEVAD,
++ MDIO_AN_REG_CTRL,
++ 0x0000);
++}
++static void bnx2x_bcm8073_set_xaui_low_power_mode(struct link_params *params)
++{
++ struct bnx2x *bp = params->bp;
++ u8 port = params->port;
++ u16 val;
++ u8 ext_phy_addr = ((params->ext_phy_config &
++ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
++ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
++ u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
++
++ bnx2x_cl45_read(bp, params->port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ 0xc801, &val);
++
++ if (val == 0) {
++ /* Mustn't set low power mode in 8073 A0 */
++ return;
++ }
++
++ /* Disable PLL sequencer (use read-modify-write to clear bit 13) */
++ bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr,
++ MDIO_XS_DEVAD,
++ MDIO_XS_PLL_SEQUENCER, &val);
++ val &= ~(1<<13);
++ bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
++ MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
++
++ /* PLL controls */
++ bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
++ MDIO_XS_DEVAD, 0x805E, 0x1077);
++ bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
++ MDIO_XS_DEVAD, 0x805D, 0x0000);
++ bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
++ MDIO_XS_DEVAD, 0x805C, 0x030B);
++ bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
++ MDIO_XS_DEVAD, 0x805B, 0x1240);
++ bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
++ MDIO_XS_DEVAD, 0x805A, 0x2490);
++
++ /* Tx Controls */
++ bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
++ MDIO_XS_DEVAD, 0x80A7, 0x0C74);
++ bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
++ MDIO_XS_DEVAD, 0x80A6, 0x9041);
++ bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
++ MDIO_XS_DEVAD, 0x80A5, 0x4640);
++
++ /* Rx Controls */
++ bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
++ MDIO_XS_DEVAD, 0x80FE, 0x01C4);
++ bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
++ MDIO_XS_DEVAD, 0x80FD, 0x9249);
++ bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
++ MDIO_XS_DEVAD, 0x80FC, 0x2015);
++
++ /* Enable PLL sequencer (use read-modify-write to set bit 13) */
++ bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr,
++ MDIO_XS_DEVAD,
++ MDIO_XS_PLL_SEQUENCER, &val);
++ val |= (1<<13);
++ bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
++ MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
++}
++
++static void bnx2x_8073_set_pause_cl37(struct link_params *params,
++ struct link_vars *vars)
++{
++
++ struct bnx2x *bp = params->bp;
++ u16 cl37_val;
++ u8 ext_phy_addr = ((params->ext_phy_config &
++ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
++ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
++ u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
++
++ bnx2x_cl45_read(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_AN_DEVAD,
++ MDIO_AN_REG_CL37_FC_LD, &cl37_val);
++
++ cl37_val &= ~MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
++ /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
++
++ if ((vars->ieee_fc &
++ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) ==
++ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) {
++ cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC;
++ }
++ if ((vars->ieee_fc &
++ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
++ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
++ cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
++ }
++ if ((vars->ieee_fc &
++ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
++ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
++ cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
++ }
++ DP(NETIF_MSG_LINK,
++ "Ext phy AN advertize cl37 0x%x\n", cl37_val);
++
++ bnx2x_cl45_write(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_AN_DEVAD,
++ MDIO_AN_REG_CL37_FC_LD, cl37_val);
++ msleep(500);
++}
++
++static void bnx2x_ext_phy_set_pause(struct link_params *params,
++ struct link_vars *vars)
++{
++ struct bnx2x *bp = params->bp;
++ u16 val;
++ u8 ext_phy_addr = ((params->ext_phy_config &
++ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
++ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
++ u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
++
++ /* read modify write pause advertizing */
++ bnx2x_cl45_read(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_AN_DEVAD,
++ MDIO_AN_REG_ADV_PAUSE, &val);
++
++ val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH;
++
++ /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
++
++ if ((vars->ieee_fc &
++ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
++ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
++ val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
++ }
++ if ((vars->ieee_fc &
++ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
++ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
++ val |=
++ MDIO_AN_REG_ADV_PAUSE_PAUSE;
++ }
++ DP(NETIF_MSG_LINK,
++ "Ext phy AN advertize 0x%x\n", val);
++ bnx2x_cl45_write(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_AN_DEVAD,
++ MDIO_AN_REG_ADV_PAUSE, val);
++}
++
++
++static void bnx2x_init_internal_phy(struct link_params *params,
++ struct link_vars *vars)
++{
++ struct bnx2x *bp = params->bp;
++ u8 port = params->port;
++ if (!(vars->phy_flags & PHY_SGMII_FLAG)) {
++ u16 bank, rx_eq;
++
++ rx_eq = ((params->serdes_config &
++ PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK) >>
++ PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT);
++
++ DP(NETIF_MSG_LINK, "setting rx eq to 0x%x\n", rx_eq);
++ for (bank = MDIO_REG_BANK_RX0; bank <= MDIO_REG_BANK_RX_ALL;
++ bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0)) {
++ CL45_WR_OVER_CL22(bp, port,
++ params->phy_addr,
++ bank ,
++ MDIO_RX0_RX_EQ_BOOST,
++ ((rx_eq &
++ MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK) |
++ MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL));
++ }
++
++ /* forced speed requested? */
++ if (vars->line_speed != SPEED_AUTO_NEG) {
++ DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
++
++ /* disable autoneg */
++ bnx2x_set_autoneg(params, vars);
++
++ /* program speed and duplex */
++ bnx2x_program_serdes(params, vars);
++
++ } else { /* AN_mode */
++ DP(NETIF_MSG_LINK, "not SGMII, AN\n");
++
++ /* AN enabled */
++ bnx2x_set_brcm_cl37_advertisment(params);
++
++ /* program duplex & pause advertisement (for aneg) */
++ bnx2x_set_ieee_aneg_advertisment(params,
++ vars->ieee_fc);
++
++ /* enable autoneg */
++ bnx2x_set_autoneg(params, vars);
++
++ /* enable and restart AN */
++ bnx2x_restart_autoneg(params);
++ }
++
++ } else { /* SGMII mode */
++ DP(NETIF_MSG_LINK, "SGMII\n");
++
++ bnx2x_initialize_sgmii_process(params, vars);
++ }
++}
++
++static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
++{
++ struct bnx2x *bp = params->bp;
++ u32 ext_phy_type;
++ u8 ext_phy_addr;
++ u16 cnt;
++ u16 ctrl = 0;
++ u16 val = 0;
++ u8 rc = 0;
++ if (vars->phy_flags & PHY_XGXS_FLAG) {
++ ext_phy_addr = ((params->ext_phy_config &
++ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
++ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
++
++ ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
++ /* Make sure that the soft reset is off (expect for the 8072:
++ * due to the lock, it will be done inside the specific
++ * handling)
++ */
++ if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
++ (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
++ (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN) &&
++ (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) &&
++ (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)) {
++ /* Wait for soft reset to get cleared upto 1 sec */
++ for (cnt = 0; cnt < 1000; cnt++) {
++ bnx2x_cl45_read(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_CTRL, &ctrl);
++ if (!(ctrl & (1<<15)))
++ break;
++ msleep(1);
++ }
++ DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n",
++ ctrl, cnt);
++ }
++
++ switch (ext_phy_type) {
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
++ break;
++
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
++ DP(NETIF_MSG_LINK, "XGXS 8705\n");
++
++ bnx2x_cl45_write(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_MISC_CTRL,
++ 0x8288);
++ bnx2x_cl45_write(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_PHY_IDENTIFIER,
++ 0x7fbf);
++ bnx2x_cl45_write(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_CMU_PLL_BYPASS,
++ 0x0100);
++ bnx2x_cl45_write(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_WIS_DEVAD,
++ MDIO_WIS_REG_LASI_CNTL, 0x1);
++ break;
++
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
++ DP(NETIF_MSG_LINK, "XGXS 8706\n");
++
++ msleep(10);
++ /* Force speed */
++ /* First enable LASI */
++ bnx2x_cl45_write(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_RX_ALARM_CTRL,
++ 0x0400);
++ bnx2x_cl45_write(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_LASI_CTRL, 0x0004);
++
++ if (params->req_line_speed == SPEED_10000) {
++ DP(NETIF_MSG_LINK, "XGXS 8706 force 10Gbps\n");
++
++ bnx2x_cl45_write(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_DIGITAL_CTRL,
++ 0x400);
++ } else {
++ /* Force 1Gbps using autoneg with 1G
++ advertisment */
++
++ /* Allow CL37 through CL73 */
++ DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n");
++ bnx2x_cl45_write(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_AN_DEVAD,
++ MDIO_AN_REG_CL37_CL73,
++ 0x040c);
++
++ /* Enable Full-Duplex advertisment on CL37 */
++ bnx2x_cl45_write(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_AN_DEVAD,
++ MDIO_AN_REG_CL37_FC_LP,
++ 0x0020);
++ /* Enable CL37 AN */
++ bnx2x_cl45_write(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_AN_DEVAD,
++ MDIO_AN_REG_CL37_AN,
++ 0x1000);
++ /* 1G support */
++ bnx2x_cl45_write(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_AN_DEVAD,
++ MDIO_AN_REG_ADV, (1<<5));
++
++ /* Enable clause 73 AN */
++ bnx2x_cl45_write(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_AN_DEVAD,
++ MDIO_AN_REG_CTRL,
++ 0x1200);
++
++ }
++
++ break;
++
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
++ {
++ u16 tmp1;
++ u16 rx_alarm_ctrl_val;
++ u16 lasi_ctrl_val;
++ if (ext_phy_type ==
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) {
++ rx_alarm_ctrl_val = 0x400;
++ lasi_ctrl_val = 0x0004;
++ } else {
++ rx_alarm_ctrl_val = (1<<2);
++ lasi_ctrl_val = 0x0004;
++ }
++
++ /* enable LASI */
++ bnx2x_cl45_write(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_RX_ALARM_CTRL,
++ rx_alarm_ctrl_val);
++
++ bnx2x_cl45_write(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_LASI_CTRL,
++ lasi_ctrl_val);
++
++ bnx2x_8073_set_pause_cl37(params, vars);
++
++ if (ext_phy_type ==
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072){
++ bnx2x_bcm8072_external_rom_boot(params);
++ } else {
++
++ /* In case of 8073 with long xaui lines,
++ don't set the 8073 xaui low power*/
++ bnx2x_bcm8073_set_xaui_low_power_mode(params);
++ }
++
++ bnx2x_cl45_read(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ 0xca13,
++ &tmp1);
++
++ bnx2x_cl45_read(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_RX_ALARM, &tmp1);
++
++ DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1):"
++ "0x%x\n", tmp1);
++
++ /* If this is forced speed, set to KR or KX
++ * (all other are not supported)
++ */
++ if (params->loopback_mode == LOOPBACK_EXT) {
++ bnx2x_bcm807x_force_10G(params);
++ DP(NETIF_MSG_LINK,
++ "Forced speed 10G on 807X\n");
++ break;
++ } else {
++ bnx2x_cl45_write(bp, params->port,
++ ext_phy_type, ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_BCM_CTRL,
++ 0x0002);
++ }
++ if (params->req_line_speed != SPEED_AUTO_NEG) {
++ if (params->req_line_speed == SPEED_10000) {
++ val = (1<<7);
++ } else if (params->req_line_speed ==
++ SPEED_2500) {
++ val = (1<<5);
++ /* Note that 2.5G works only
++ when used with 1G advertisment */
++ } else
++ val = (1<<5);
++ } else {
++
++ val = 0;
++ if (params->speed_cap_mask &
++ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
++ val |= (1<<7);
++
++ /* Note that 2.5G works only when
++ used with 1G advertisment */
++ if (params->speed_cap_mask &
++ (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
++ PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
++ val |= (1<<5);
++ DP(NETIF_MSG_LINK,
++ "807x autoneg val = 0x%x\n", val);
++ }
++
++ bnx2x_cl45_write(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_AN_DEVAD,
++ MDIO_AN_REG_ADV, val);
++
++ if (ext_phy_type ==
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
++
++ bnx2x_cl45_read(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_AN_DEVAD,
++ 0x8329, &tmp1);
++
++ if (((params->speed_cap_mask &
++ PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) &&
++ (params->req_line_speed ==
++ SPEED_AUTO_NEG)) ||
++ (params->req_line_speed ==
++ SPEED_2500)) {
++ u16 phy_ver;
++ /* Allow 2.5G for A1 and above */
++ bnx2x_cl45_read(bp, params->port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ 0xc801, &phy_ver);
++ DP(NETIF_MSG_LINK, "Add 2.5G\n");
++ if (phy_ver > 0)
++ tmp1 |= 1;
++ else
++ tmp1 &= 0xfffe;
++ } else {
++ DP(NETIF_MSG_LINK, "Disable 2.5G\n");
++ tmp1 &= 0xfffe;
++ }
++
++ bnx2x_cl45_write(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_AN_DEVAD,
++ 0x8329, tmp1);
++ }
++
++ /* Add support for CL37 (passive mode) II */
++
++ bnx2x_cl45_read(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_AN_DEVAD,
++ MDIO_AN_REG_CL37_FC_LD,
++ &tmp1);
++
++ bnx2x_cl45_write(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_AN_DEVAD,
++ MDIO_AN_REG_CL37_FC_LD, (tmp1 |
++ ((params->req_duplex == DUPLEX_FULL) ?
++ 0x20 : 0x40)));
++
++ /* Add support for CL37 (passive mode) III */
++ bnx2x_cl45_write(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_AN_DEVAD,
++ MDIO_AN_REG_CL37_AN, 0x1000);
++
++ if (ext_phy_type ==
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
++ /* The SNR will improve about 2db by changing
++ BW and FEE main tap. Rest commands are executed
++ after link is up*/
++ /*Change FFE main cursor to 5 in EDC register*/
++ if (bnx2x_8073_is_snr_needed(params))
++ bnx2x_cl45_write(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_EDC_FFE_MAIN,
++ 0xFB0C);
++
++ /* Enable FEC (Forware Error Correction)
++ Request in the AN */
++ bnx2x_cl45_read(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_AN_DEVAD,
++ MDIO_AN_REG_ADV2, &tmp1);
++
++ tmp1 |= (1<<15);
++
++ bnx2x_cl45_write(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_AN_DEVAD,
++ MDIO_AN_REG_ADV2, tmp1);
++
++ }
++
++ bnx2x_ext_phy_set_pause(params, vars);
++
++ /* Restart autoneg */
++ msleep(500);
++ bnx2x_cl45_write(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_AN_DEVAD,
++ MDIO_AN_REG_CTRL, 0x1200);
++ DP(NETIF_MSG_LINK, "807x Autoneg Restart: "
++ "Advertise 1G=%x, 10G=%x\n",
++ ((val & (1<<5)) > 0),
++ ((val & (1<<7)) > 0));
++ break;
++ }
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
++ DP(NETIF_MSG_LINK,
++ "Setting the SFX7101 LASI indication\n");
++
++ bnx2x_cl45_write(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_LASI_CTRL, 0x1);
++ DP(NETIF_MSG_LINK,
++ "Setting the SFX7101 LED to blink on traffic\n");
++ bnx2x_cl45_write(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_7107_LED_CNTL, (1<<3));
++
++ bnx2x_ext_phy_set_pause(params, vars);
++ /* Restart autoneg */
++ bnx2x_cl45_read(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_AN_DEVAD,
++ MDIO_AN_REG_CTRL, &val);
++ val |= 0x200;
++ bnx2x_cl45_write(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_AN_DEVAD,
++ MDIO_AN_REG_CTRL, val);
++ break;
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
++ DP(NETIF_MSG_LINK,
++ "XGXS PHY Failure detected 0x%x\n",
++ params->ext_phy_config);
++ rc = -EINVAL;
++ break;
++ default:
++ DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
++ params->ext_phy_config);
++ rc = -EINVAL;
++ break;
++ }
++
++ } else { /* SerDes */
++
++ ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config);
++ switch (ext_phy_type) {
++ case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
++ DP(NETIF_MSG_LINK, "SerDes Direct\n");
++ break;
++
++ case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
++ DP(NETIF_MSG_LINK, "SerDes 5482\n");
++ break;
++
++ default:
++ DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
++ params->ext_phy_config);
++ break;
++ }
++ }
++ return rc;
++}
++
++
++static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
++ struct link_vars *vars)
++{
++ struct bnx2x *bp = params->bp;
++ u32 ext_phy_type;
++ u8 ext_phy_addr;
++ u16 val1 = 0, val2;
++ u16 rx_sd, pcs_status;
++ u8 ext_phy_link_up = 0;
++ u8 port = params->port;
++ if (vars->phy_flags & PHY_XGXS_FLAG) {
++ ext_phy_addr = ((params->ext_phy_config &
++ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
++ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
++
++ ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
++ switch (ext_phy_type) {
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
++ DP(NETIF_MSG_LINK, "XGXS Direct\n");
++ ext_phy_link_up = 1;
++ break;
++
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
++ DP(NETIF_MSG_LINK, "XGXS 8705\n");
++ bnx2x_cl45_read(bp, params->port, ext_phy_type,
++ ext_phy_addr,
++ MDIO_WIS_DEVAD,
++ MDIO_WIS_REG_LASI_STATUS, &val1);
++ DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
++
++ bnx2x_cl45_read(bp, params->port, ext_phy_type,
++ ext_phy_addr,
++ MDIO_WIS_DEVAD,
++ MDIO_WIS_REG_LASI_STATUS, &val1);
++ DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
++
++ bnx2x_cl45_read(bp, params->port, ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_RX_SD, &rx_sd);
++ DP(NETIF_MSG_LINK, "8705 rx_sd 0x%x\n", rx_sd);
++ ext_phy_link_up = (rx_sd & 0x1);
++ if (ext_phy_link_up)
++ vars->line_speed = SPEED_10000;
++ break;
++
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
++ DP(NETIF_MSG_LINK, "XGXS 8706\n");
++ bnx2x_cl45_read(bp, params->port, ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_LASI_STATUS, &val1);
++ DP(NETIF_MSG_LINK, "8706 LASI status 0x%x\n", val1);
++
++ bnx2x_cl45_read(bp, params->port, ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_LASI_STATUS, &val1);
++ DP(NETIF_MSG_LINK, "8706 LASI status 0x%x\n", val1);
++
++ bnx2x_cl45_read(bp, params->port, ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_RX_SD, &rx_sd);
++ bnx2x_cl45_read(bp, params->port, ext_phy_type,
++ ext_phy_addr,
++ MDIO_PCS_DEVAD,
++ MDIO_PCS_REG_STATUS, &pcs_status);
++
++ bnx2x_cl45_read(bp, params->port, ext_phy_type,
++ ext_phy_addr,
++ MDIO_AN_DEVAD,
++ MDIO_AN_REG_LINK_STATUS, &val2);
++ bnx2x_cl45_read(bp, params->port, ext_phy_type,
++ ext_phy_addr,
++ MDIO_AN_DEVAD,
++ MDIO_AN_REG_LINK_STATUS, &val2);
++
++ DP(NETIF_MSG_LINK, "8706 rx_sd 0x%x"
++ " pcs_status 0x%x 1Gbps link_status 0x%x\n",
++ rx_sd, pcs_status, val2);
++ /* link is up if both bit 0 of pmd_rx_sd and
++ * bit 0 of pcs_status are set, or if the autoneg bit
++ 1 is set
++ */
++ ext_phy_link_up = ((rx_sd & pcs_status & 0x1) ||
++ (val2 & (1<<1)));
++ if (ext_phy_link_up) {
++ if (val2 & (1<<1))
++ vars->line_speed = SPEED_1000;
++ else
++ vars->line_speed = SPEED_10000;
++ }
++
++ /* clear LASI indication*/
++ bnx2x_cl45_read(bp, params->port, ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_RX_ALARM, &val2);
++ break;
++
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
++ {
++ u16 link_status = 0;
++ u16 an1000_status = 0;
++ if (ext_phy_type ==
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) {
++ bnx2x_cl45_read(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_PCS_DEVAD,
++ MDIO_PCS_REG_LASI_STATUS, &val1);
++ bnx2x_cl45_read(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_PCS_DEVAD,
++ MDIO_PCS_REG_LASI_STATUS, &val2);
++ DP(NETIF_MSG_LINK,
++ "870x LASI status 0x%x->0x%x\n",
++ val1, val2);
++
++ } else {
++ /* In 8073, port1 is directed through emac0 and
++ * port0 is directed through emac1
++ */
++ bnx2x_cl45_read(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_LASI_STATUS, &val1);
++
++ DP(NETIF_MSG_LINK,
++ "8703 LASI status 0x%x\n",
++ val1);
++ }
++
++ /* clear the interrupt LASI status register */
++ bnx2x_cl45_read(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_PCS_DEVAD,
++ MDIO_PCS_REG_STATUS, &val2);
++ bnx2x_cl45_read(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_PCS_DEVAD,
++ MDIO_PCS_REG_STATUS, &val1);
++ DP(NETIF_MSG_LINK, "807x PCS status 0x%x->0x%x\n",
++ val2, val1);
++ /* Clear MSG-OUT */
++ bnx2x_cl45_read(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ 0xca13,
++ &val1);
++
++ /* Check the LASI */
++ bnx2x_cl45_read(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_RX_ALARM, &val2);
++
++ DP(NETIF_MSG_LINK, "KR 0x9003 0x%x\n", val2);
++
++ /* Check the link status */
++ bnx2x_cl45_read(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_PCS_DEVAD,
++ MDIO_PCS_REG_STATUS, &val2);
++ DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2);
++
++ bnx2x_cl45_read(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_STATUS, &val2);
++ bnx2x_cl45_read(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_STATUS, &val1);
++ ext_phy_link_up = ((val1 & 4) == 4);
++ DP(NETIF_MSG_LINK, "PMA_REG_STATUS=0x%x\n", val1);
++ if (ext_phy_type ==
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
++
++ if (ext_phy_link_up &&
++ ((params->req_line_speed !=
++ SPEED_10000))) {
++ if (bnx2x_bcm8073_xaui_wa(params)
++ != 0) {
++ ext_phy_link_up = 0;
++ break;
++ }
++ }
++ bnx2x_cl45_read(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_AN_DEVAD,
++ 0x8304,
++ &an1000_status);
++ bnx2x_cl45_read(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_AN_DEVAD,
++ 0x8304,
++ &an1000_status);
++
++ /* Check the link status on 1.1.2 */
++ bnx2x_cl45_read(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_STATUS, &val2);
++ bnx2x_cl45_read(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_STATUS, &val1);
++ DP(NETIF_MSG_LINK, "KR PMA status 0x%x->0x%x,"
++ "an_link_status=0x%x\n",
++ val2, val1, an1000_status);
++
++ ext_phy_link_up = (((val1 & 4) == 4) ||
++ (an1000_status & (1<<1)));
++ if (ext_phy_link_up &&
++ bnx2x_8073_is_snr_needed(params)) {
++ /* The SNR will improve about 2dbby
++ changing the BW and FEE main tap.*/
++
++ /* The 1st write to change FFE main
++ tap is set before restart AN */
++ /* Change PLL Bandwidth in EDC
++ register */
++ bnx2x_cl45_write(bp, port, ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_PLL_BANDWIDTH,
++ 0x26BC);
++
++ /* Change CDR Bandwidth in EDC
++ register */
++ bnx2x_cl45_write(bp, port, ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_CDR_BANDWIDTH,
++ 0x0333);
++
++
++ }
++ bnx2x_cl45_read(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ 0xc820,
++ &link_status);
++
++ /* Bits 0..2 --> speed detected,
++ bits 13..15--> link is down */
++ if ((link_status & (1<<2)) &&
++ (!(link_status & (1<<15)))) {
++ ext_phy_link_up = 1;
++ vars->line_speed = SPEED_10000;
++ DP(NETIF_MSG_LINK,
++ "port %x: External link"
++ " up in 10G\n", params->port);
++ } else if ((link_status & (1<<1)) &&
++ (!(link_status & (1<<14)))) {
++ ext_phy_link_up = 1;
++ vars->line_speed = SPEED_2500;
++ DP(NETIF_MSG_LINK,
++ "port %x: External link"
++ " up in 2.5G\n", params->port);
++ } else if ((link_status & (1<<0)) &&
++ (!(link_status & (1<<13)))) {
++ ext_phy_link_up = 1;
++ vars->line_speed = SPEED_1000;
++ DP(NETIF_MSG_LINK,
++ "port %x: External link"
++ " up in 1G\n", params->port);
++ } else {
++ ext_phy_link_up = 0;
++ DP(NETIF_MSG_LINK,
++ "port %x: External link"
++ " is down\n", params->port);
++ }
++ } else {
++ /* See if 1G link is up for the 8072 */
++ bnx2x_cl45_read(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_AN_DEVAD,
++ 0x8304,
++ &an1000_status);
++ bnx2x_cl45_read(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_AN_DEVAD,
++ 0x8304,
++ &an1000_status);
++ if (an1000_status & (1<<1)) {
++ ext_phy_link_up = 1;
++ vars->line_speed = SPEED_1000;
++ DP(NETIF_MSG_LINK,
++ "port %x: External link"
++ " up in 1G\n", params->port);
++ } else if (ext_phy_link_up) {
++ ext_phy_link_up = 1;
++ vars->line_speed = SPEED_10000;
++ DP(NETIF_MSG_LINK,
++ "port %x: External link"
++ " up in 10G\n", params->port);
++ }
++ }
++
++
++ break;
++ }
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
++ bnx2x_cl45_read(bp, params->port, ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_LASI_STATUS, &val2);
++ bnx2x_cl45_read(bp, params->port, ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_LASI_STATUS, &val1);
++ DP(NETIF_MSG_LINK,
++ "10G-base-T LASI status 0x%x->0x%x\n",
++ val2, val1);
++ bnx2x_cl45_read(bp, params->port, ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_STATUS, &val2);
++ bnx2x_cl45_read(bp, params->port, ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_STATUS, &val1);
++ DP(NETIF_MSG_LINK,
++ "10G-base-T PMA status 0x%x->0x%x\n",
++ val2, val1);
++ ext_phy_link_up = ((val1 & 4) == 4);
++ /* if link is up
++ * print the AN outcome of the SFX7101 PHY
++ */
++ if (ext_phy_link_up) {
++ bnx2x_cl45_read(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_AN_DEVAD,
++ MDIO_AN_REG_MASTER_STATUS,
++ &val2);
++ vars->line_speed = SPEED_10000;
++ DP(NETIF_MSG_LINK,
++ "SFX7101 AN status 0x%x->Master=%x\n",
++ val2,
++ (val2 & (1<<14)));
++ }
++ break;
++
++ default:
++ DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
++ params->ext_phy_config);
++ ext_phy_link_up = 0;
++ break;
++ }
++
++ } else { /* SerDes */
++ ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config);
++ switch (ext_phy_type) {
++ case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
++ DP(NETIF_MSG_LINK, "SerDes Direct\n");
++ ext_phy_link_up = 1;
++ break;
++
++ case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
++ DP(NETIF_MSG_LINK, "SerDes 5482\n");
++ ext_phy_link_up = 1;
++ break;
++
++ default:
++ DP(NETIF_MSG_LINK,
++ "BAD SerDes ext_phy_config 0x%x\n",
++ params->ext_phy_config);
++ ext_phy_link_up = 0;
++ break;
++ }
++ }
++
++ return ext_phy_link_up;
++}
++
++static void bnx2x_link_int_enable(struct link_params *params)
++{
++ u8 port = params->port;
++ u32 ext_phy_type;
++ u32 mask;
++ struct bnx2x *bp = params->bp;
++ /* setting the status to report on link up
++ for either XGXS or SerDes */
++
++ if (params->switch_cfg == SWITCH_CFG_10G) {
++ mask = (NIG_MASK_XGXS0_LINK10G |
++ NIG_MASK_XGXS0_LINK_STATUS);
++ DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n");
++ ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
++ if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
++ (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
++ (ext_phy_type !=
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) {
++ mask |= NIG_MASK_MI_INT;
++ DP(NETIF_MSG_LINK, "enabled external phy int\n");
++ }
++
++ } else { /* SerDes */
++ mask = NIG_MASK_SERDES0_LINK_STATUS;
++ DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n");
++ ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config);
++ if ((ext_phy_type !=
++ PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) &&
++ (ext_phy_type !=
++ PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN)) {
++ mask |= NIG_MASK_MI_INT;
++ DP(NETIF_MSG_LINK, "enabled external phy int\n");
++ }
++ }
++ bnx2x_bits_en(bp,
++ NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
++ mask);
++ DP(NETIF_MSG_LINK, "port %x, is_xgxs=%x, int_status 0x%x\n", port,
++ (params->switch_cfg == SWITCH_CFG_10G),
++ REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
++
++ DP(NETIF_MSG_LINK, " int_mask 0x%x, MI_INT %x, SERDES_LINK %x\n",
++ REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
++ REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
++ REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS+port*0x3c));
++ DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
++ REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
++ REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
++}
++
++
++/*
++ * link management
++ */
++static void bnx2x_link_int_ack(struct link_params *params,
++ struct link_vars *vars, u8 is_10g)
++{
++ struct bnx2x *bp = params->bp;
++ u8 port = params->port;
++
++ /* first reset all status
++ * we assume only one line will be change at a time */
++ bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
++ (NIG_STATUS_XGXS0_LINK10G |
++ NIG_STATUS_XGXS0_LINK_STATUS |
++ NIG_STATUS_SERDES0_LINK_STATUS));
++ if (vars->phy_link_up) {
++ if (is_10g) {
++ /* Disable the 10G link interrupt
++ * by writing 1 to the status register
++ */
++ DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
++ bnx2x_bits_en(bp,
++ NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
++ NIG_STATUS_XGXS0_LINK10G);
++
++ } else if (params->switch_cfg == SWITCH_CFG_10G) {
++ /* Disable the link interrupt
++ * by writing 1 to the relevant lane
++ * in the status register
++ */
++ u32 ser_lane = ((params->lane_config &
++ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
++ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
++
++ DP(NETIF_MSG_LINK, "1G XGXS phy link up\n");
++ bnx2x_bits_en(bp,
++ NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
++ ((1 << ser_lane) <<
++ NIG_STATUS_XGXS0_LINK_STATUS_SIZE));
++
++ } else { /* SerDes */
++ DP(NETIF_MSG_LINK, "SerDes phy link up\n");
++ /* Disable the link interrupt
++ * by writing 1 to the status register
++ */
++ bnx2x_bits_en(bp,
++ NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
++ NIG_STATUS_SERDES0_LINK_STATUS);
++ }
++
++ } else { /* link_down */
++ }
++}
++
++static u8 bnx2x_format_ver(u32 num, u8 *str, u16 len)
++{
++ u8 *str_ptr = str;
++ u32 mask = 0xf0000000;
++ u8 shift = 8*4;
++ u8 digit;
++ if (len < 10) {
++ /* Need more than 10chars for this format */
++ *str_ptr = '\0';
++ return -EINVAL;
++ }
++ while (shift > 0) {
++
++ shift -= 4;
++ digit = ((num & mask) >> shift);
++ if (digit < 0xa)
++ *str_ptr = digit + '0';
++ else
++ *str_ptr = digit - 0xa + 'a';
++ str_ptr++;
++ mask = mask >> 4;
++ if (shift == 4*4) {
++ *str_ptr = ':';
++ str_ptr++;
++ }
++ }
++ *str_ptr = '\0';
++ return 0;
++}
++
++
++static void bnx2x_turn_on_ef(struct bnx2x *bp, u8 port, u8 ext_phy_addr,
++ u32 ext_phy_type)
++{
++ u32 cnt = 0;
++ u16 ctrl = 0;
++ /* Enable EMAC0 in to enable MDIO */
++ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
++ (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
++ msleep(5);
++
++ /* take ext phy out of reset */
++ bnx2x_set_gpio(bp,
++ MISC_REGISTERS_GPIO_2,
++ MISC_REGISTERS_GPIO_HIGH,
++ port);
++
++ bnx2x_set_gpio(bp,
++ MISC_REGISTERS_GPIO_1,
++ MISC_REGISTERS_GPIO_HIGH,
++ port);
++
++ /* wait for 5ms */
++ msleep(5);
++
++ for (cnt = 0; cnt < 1000; cnt++) {
++ msleep(1);
++ bnx2x_cl45_read(bp, port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_CTRL,
++ &ctrl);
++ if (!(ctrl & (1<<15))) {
++ DP(NETIF_MSG_LINK, "Reset completed\n\n");
++ break;
++ }
++ }
++}
++
++static void bnx2x_turn_off_sf(struct bnx2x *bp, u8 port)
++{
++ /* put sf to reset */
++ bnx2x_set_gpio(bp,
++ MISC_REGISTERS_GPIO_1,
++ MISC_REGISTERS_GPIO_LOW,
++ port);
++ bnx2x_set_gpio(bp,
++ MISC_REGISTERS_GPIO_2,
++ MISC_REGISTERS_GPIO_LOW,
++ port);
++}
++
++u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
++ u8 *version, u16 len)
++{
++ struct bnx2x *bp = params->bp;
++ u32 ext_phy_type = 0;
++ u16 val = 0;
++ u8 ext_phy_addr = 0 ;
++ u8 status = 0 ;
++ u32 ver_num;
++
++ if (version == NULL || params == NULL)
++ return -EINVAL;
++
++ /* reset the returned value to zero */
++ ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
++ ext_phy_addr = ((params->ext_phy_config &
++ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
++ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
++
++ switch (ext_phy_type) {
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
++
++ if (len < 5)
++ return -EINVAL;
++
++ /* Take ext phy out of reset */
++ if (!driver_loaded)
++ bnx2x_turn_on_ef(bp, params->port, ext_phy_addr,
++ ext_phy_type);
++
++ /* wait for 1ms */
++ msleep(1);
++
++ bnx2x_cl45_read(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_7101_VER1, &val);
++ version[2] = (val & 0xFF);
++ version[3] = ((val & 0xFF00)>>8);
++
++ bnx2x_cl45_read(bp, params->port,
++ ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER2,
++ &val);
++ version[0] = (val & 0xFF);
++ version[1] = ((val & 0xFF00)>>8);
++ version[4] = '\0';
++
++ if (!driver_loaded)
++ bnx2x_turn_off_sf(bp, params->port);
++ break;
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
++ {
++ /* Take ext phy out of reset */
++ if (!driver_loaded)
++ bnx2x_turn_on_ef(bp, params->port, ext_phy_addr,
++ ext_phy_type);
++
++ bnx2x_cl45_read(bp, params->port, ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_ROM_VER1, &val);
++ ver_num = val<<16;
++ bnx2x_cl45_read(bp, params->port, ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_ROM_VER2, &val);
++ ver_num |= val;
++ status = bnx2x_format_ver(ver_num, version, len);
++ break;
++ }
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
++
++ bnx2x_cl45_read(bp, params->port, ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_ROM_VER1, &val);
++ ver_num = val<<16;
++ bnx2x_cl45_read(bp, params->port, ext_phy_type,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_ROM_VER2, &val);
++ ver_num |= val;
++ status = bnx2x_format_ver(ver_num, version, len);
++ break;
++
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
++ break;
++
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
++ DP(NETIF_MSG_LINK, "bnx2x_get_ext_phy_fw_version:"
++ " type is FAILURE!\n");
++ status = -EINVAL;
++ break;
++
++ default:
++ break;
++ }
++ return status;
++}
++
++static void bnx2x_set_xgxs_loopback(struct link_params *params,
++ struct link_vars *vars,
++ u8 is_10g)
++{
++ u8 port = params->port;
++ struct bnx2x *bp = params->bp;
++
++ if (is_10g) {
++ u32 md_devad;
++
++ DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
++
++ /* change the uni_phy_addr in the nig */
++ md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
++ port*0x18));
++
++ REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5);
++
++ bnx2x_cl45_write(bp, port, 0,
++ params->phy_addr,
++ 5,
++ (MDIO_REG_BANK_AER_BLOCK +
++ (MDIO_AER_BLOCK_AER_REG & 0xf)),
++ 0x2800);
++
++ bnx2x_cl45_write(bp, port, 0,
++ params->phy_addr,
++ 5,
++ (MDIO_REG_BANK_CL73_IEEEB0 +
++ (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
++ 0x6041);
++ msleep(200);
++ /* set aer mmd back */
++ bnx2x_set_aer_mmd(params, vars);
++
++ /* and md_devad */
++ REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
++ md_devad);
++
++ } else {
++ u16 mii_control;
++
++ DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
++
++ CL45_RD_OVER_CL22(bp, port,
++ params->phy_addr,
++ MDIO_REG_BANK_COMBO_IEEE0,
++ MDIO_COMBO_IEEE0_MII_CONTROL,
++ &mii_control);
++
++ CL45_WR_OVER_CL22(bp, port,
++ params->phy_addr,
++ MDIO_REG_BANK_COMBO_IEEE0,
++ MDIO_COMBO_IEEE0_MII_CONTROL,
++ (mii_control |
++ MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK));
++ }
++}
++
++
++static void bnx2x_ext_phy_loopback(struct link_params *params)
++{
++ struct bnx2x *bp = params->bp;
++ u8 ext_phy_addr;
++ u32 ext_phy_type;
++
++ if (params->switch_cfg == SWITCH_CFG_10G) {
++ ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
++ /* CL37 Autoneg Enabled */
++ ext_phy_addr = ((params->ext_phy_config &
++ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
++ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
++ switch (ext_phy_type) {
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN:
++ DP(NETIF_MSG_LINK,
++ "ext_phy_loopback: We should not get here\n");
++ break;
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
++ DP(NETIF_MSG_LINK, "ext_phy_loopback: 8705\n");
++ break;
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
++ DP(NETIF_MSG_LINK, "ext_phy_loopback: 8706\n");
++ break;
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
++ /* SFX7101_XGXS_TEST1 */
++ bnx2x_cl45_write(bp, params->port, ext_phy_type,
++ ext_phy_addr,
++ MDIO_XS_DEVAD,
++ MDIO_XS_SFX7101_XGXS_TEST1,
++ 0x100);
++ DP(NETIF_MSG_LINK,
++ "ext_phy_loopback: set ext phy loopback\n");
++ break;
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
++
++ break;
++ } /* switch external PHY type */
++ } else {
++ /* serdes */
++ ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config);
++ ext_phy_addr = (params->ext_phy_config &
++ PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK)
++ >> PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT;
++ }
++}
++
++
++/*
++ *------------------------------------------------------------------------
++ * bnx2x_override_led_value -
++ *
++ * Override the led value of the requsted led
++ *
++ *------------------------------------------------------------------------
++ */
++u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port,
++ u32 led_idx, u32 value)
++{
++ u32 reg_val;
++
++ /* If port 0 then use EMAC0, else use EMAC1*/
++ u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
++
++ DP(NETIF_MSG_LINK,
++ "bnx2x_override_led_value() port %x led_idx %d value %d\n",
++ port, led_idx, value);
++
++ switch (led_idx) {
++ case 0: /* 10MB led */
++ /* Read the current value of the LED register in
++ the EMAC block */
++ reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
++ /* Set the OVERRIDE bit to 1 */
++ reg_val |= EMAC_LED_OVERRIDE;
++ /* If value is 1, set the 10M_OVERRIDE bit,
++ otherwise reset it.*/
++ reg_val = (value == 1) ? (reg_val | EMAC_LED_10MB_OVERRIDE) :
++ (reg_val & ~EMAC_LED_10MB_OVERRIDE);
++ REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
++ break;
++ case 1: /*100MB led */
++ /*Read the current value of the LED register in
++ the EMAC block */
++ reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
++ /* Set the OVERRIDE bit to 1 */
++ reg_val |= EMAC_LED_OVERRIDE;
++ /* If value is 1, set the 100M_OVERRIDE bit,
++ otherwise reset it.*/
++ reg_val = (value == 1) ? (reg_val | EMAC_LED_100MB_OVERRIDE) :
++ (reg_val & ~EMAC_LED_100MB_OVERRIDE);
++ REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
++ break;
++ case 2: /* 1000MB led */
++ /* Read the current value of the LED register in the
++ EMAC block */
++ reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
++ /* Set the OVERRIDE bit to 1 */
++ reg_val |= EMAC_LED_OVERRIDE;
++ /* If value is 1, set the 1000M_OVERRIDE bit, otherwise
++ reset it. */
++ reg_val = (value == 1) ? (reg_val | EMAC_LED_1000MB_OVERRIDE) :
++ (reg_val & ~EMAC_LED_1000MB_OVERRIDE);
++ REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
++ break;
++ case 3: /* 2500MB led */
++ /* Read the current value of the LED register in the
++ EMAC block*/
++ reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
++ /* Set the OVERRIDE bit to 1 */
++ reg_val |= EMAC_LED_OVERRIDE;
++ /* If value is 1, set the 2500M_OVERRIDE bit, otherwise
++ reset it.*/
++ reg_val = (value == 1) ? (reg_val | EMAC_LED_2500MB_OVERRIDE) :
++ (reg_val & ~EMAC_LED_2500MB_OVERRIDE);
++ REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
++ break;
++ case 4: /*10G led */
++ if (port == 0) {
++ REG_WR(bp, NIG_REG_LED_10G_P0,
++ value);
++ } else {
++ REG_WR(bp, NIG_REG_LED_10G_P1,
++ value);
++ }
++ break;
++ case 5: /* TRAFFIC led */
++ /* Find if the traffic control is via BMAC or EMAC */
++ if (port == 0)
++ reg_val = REG_RD(bp, NIG_REG_NIG_EMAC0_EN);
++ else
++ reg_val = REG_RD(bp, NIG_REG_NIG_EMAC1_EN);
++
++ /* Override the traffic led in the EMAC:*/
++ if (reg_val == 1) {
++ /* Read the current value of the LED register in
++ the EMAC block */
++ reg_val = REG_RD(bp, emac_base +
++ EMAC_REG_EMAC_LED);
++ /* Set the TRAFFIC_OVERRIDE bit to 1 */
++ reg_val |= EMAC_LED_OVERRIDE;
++ /* If value is 1, set the TRAFFIC bit, otherwise
++ reset it.*/
++ reg_val = (value == 1) ? (reg_val | EMAC_LED_TRAFFIC) :
++ (reg_val & ~EMAC_LED_TRAFFIC);
++ REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
++ } else { /* Override the traffic led in the BMAC: */
++ REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
++ + port*4, 1);
++ REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 + port*4,
++ value);
++ }
++ break;
++ default:
++ DP(NETIF_MSG_LINK,
++ "bnx2x_override_led_value() unknown led index %d "
++ "(should be 0-5)\n", led_idx);
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++
++u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed,
++ u16 hw_led_mode, u32 chip_id)
++{
++ u8 rc = 0;
++ u32 tmp;
++ u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
++ DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode);
++ DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n",
++ speed, hw_led_mode);
++ switch (mode) {
++ case LED_MODE_OFF:
++ REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0);
++ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
++ SHARED_HW_CFG_LED_MAC1);
++
++ tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
++ EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE));
++ break;
++
++ case LED_MODE_OPER:
++ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, hw_led_mode);
++ REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 +
++ port*4, 0);
++ /* Set blinking rate to ~15.9Hz */
++ REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
++ LED_BLINK_RATE_VAL);
++ REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
++ port*4, 1);
++ tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
++ EMAC_WR(bp, EMAC_REG_EMAC_LED,
++ (tmp & (~EMAC_LED_OVERRIDE)));
++
++ if (!CHIP_IS_E1H(bp) &&
++ ((speed == SPEED_2500) ||
++ (speed == SPEED_1000) ||
++ (speed == SPEED_100) ||
++ (speed == SPEED_10))) {
++ /* On Everest 1 Ax chip versions for speeds less than
++ 10G LED scheme is different */
++ REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
++ + port*4, 1);
++ REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 +
++ port*4, 0);
++ REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 +
++ port*4, 1);
++ }
++ break;
++
++ default:
++ rc = -EINVAL;
++ DP(NETIF_MSG_LINK, "bnx2x_set_led: Invalid led mode %d\n",
++ mode);
++ break;
++ }
++ return rc;
++
++}
++
++u8 bnx2x_test_link(struct link_params *params, struct link_vars *vars)
++{
++ struct bnx2x *bp = params->bp;
++ u16 gp_status = 0;
++
++ CL45_RD_OVER_CL22(bp, params->port,
++ params->phy_addr,
++ MDIO_REG_BANK_GP_STATUS,
++ MDIO_GP_STATUS_TOP_AN_STATUS1,
++ &gp_status);
++ /* link is up only if both local phy and external phy are up */
++ if ((gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) &&
++ bnx2x_ext_phy_is_link_up(params, vars))
++ return 0;
++
++ return -ESRCH;
++}
++
++static u8 bnx2x_link_initialize(struct link_params *params,
++ struct link_vars *vars)
++{
++ struct bnx2x *bp = params->bp;
++ u8 port = params->port;
++ u8 rc = 0;
++ u8 non_ext_phy;
++ u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
++ /* Activate the external PHY */
++ bnx2x_ext_phy_reset(params, vars);
++
++ bnx2x_set_aer_mmd(params, vars);
++
++ if (vars->phy_flags & PHY_XGXS_FLAG)
++ bnx2x_set_master_ln(params);
++
++ rc = bnx2x_reset_unicore(params);
++ /* reset the SerDes and wait for reset bit return low */
++ if (rc != 0)
++ return rc;
++
++ bnx2x_set_aer_mmd(params, vars);
++
++ /* setting the masterLn_def again after the reset */
++ if (vars->phy_flags & PHY_XGXS_FLAG) {
++ bnx2x_set_master_ln(params);
++ bnx2x_set_swap_lanes(params);
++ }
++
++ if (vars->phy_flags & PHY_XGXS_FLAG) {
++ if ((params->req_line_speed &&
++ ((params->req_line_speed == SPEED_100) ||
++ (params->req_line_speed == SPEED_10))) ||
++ (!params->req_line_speed &&
++ (params->speed_cap_mask >=
++ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) &&
++ (params->speed_cap_mask <
++ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
++ )) {
++ vars->phy_flags |= PHY_SGMII_FLAG;
++ } else {
++ vars->phy_flags &= ~PHY_SGMII_FLAG;
++ }
++ }
++ /* In case of external phy existance, the line speed would be the
++ line speed linked up by the external phy. In case it is direct only,
++ then the line_speed during initialization will be equal to the
++ req_line_speed*/
++ vars->line_speed = params->req_line_speed;
++
++ bnx2x_calc_ieee_aneg_adv(params, &vars->ieee_fc);
++
++ /* init ext phy and enable link state int */
++ non_ext_phy = ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ||
++ (params->loopback_mode == LOOPBACK_XGXS_10) ||
++ (params->loopback_mode == LOOPBACK_EXT_PHY));
++
++ if (non_ext_phy ||
++ (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705)) {
++ if (params->req_line_speed == SPEED_AUTO_NEG)
++ bnx2x_set_parallel_detection(params, vars->phy_flags);
++ bnx2x_init_internal_phy(params, vars);
++ }
++
++ if (!non_ext_phy)
++ rc |= bnx2x_ext_phy_init(params, vars);
++
++ bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
++ (NIG_STATUS_XGXS0_LINK10G |
++ NIG_STATUS_XGXS0_LINK_STATUS |
++ NIG_STATUS_SERDES0_LINK_STATUS));
++
++ return rc;
++
++}
++
++
++u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
++{
++ struct bnx2x *bp = params->bp;
++
++ u32 val;
++ DP(NETIF_MSG_LINK, "Phy Initialization started \n");
++ DP(NETIF_MSG_LINK, "req_speed = %d, req_flowctrl=%d\n",
++ params->req_line_speed, params->req_flow_ctrl);
++ vars->link_status = 0;
++ vars->phy_link_up = 0;
++ vars->link_up = 0;
++ vars->line_speed = 0;
++ vars->duplex = DUPLEX_FULL;
++ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
++ vars->mac_type = MAC_TYPE_NONE;
++
++ if (params->switch_cfg == SWITCH_CFG_1G)
++ vars->phy_flags = PHY_SERDES_FLAG;
++ else
++ vars->phy_flags = PHY_XGXS_FLAG;
++
++
++ /* disable attentions */
++ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
++ (NIG_MASK_XGXS0_LINK_STATUS |
++ NIG_MASK_XGXS0_LINK10G |
++ NIG_MASK_SERDES0_LINK_STATUS |
++ NIG_MASK_MI_INT));
++
++ bnx2x_emac_init(params, vars);
++
++ if (CHIP_REV_IS_FPGA(bp)) {
++ vars->link_up = 1;
++ vars->line_speed = SPEED_10000;
++ vars->duplex = DUPLEX_FULL;
++ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
++ vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD);
++ /* enable on E1.5 FPGA */
++ if (CHIP_IS_E1H(bp)) {
++ vars->flow_ctrl |=
++ (BNX2X_FLOW_CTRL_TX | BNX2X_FLOW_CTRL_RX);
++ vars->link_status |=
++ (LINK_STATUS_TX_FLOW_CONTROL_ENABLED |
++ LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
++ }
++
++ bnx2x_emac_enable(params, vars, 0);
++ bnx2x_pbf_update(params, vars->flow_ctrl, vars->line_speed);
++ /* disable drain */
++ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE
++ + params->port*4, 0);
++
++ /* update shared memory */
++ bnx2x_update_mng(params, vars->link_status);
++
++ return 0;
++
++ } else
++ if (CHIP_REV_IS_EMUL(bp)) {
++
++ vars->link_up = 1;
++ vars->line_speed = SPEED_10000;
++ vars->duplex = DUPLEX_FULL;
++ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
++ vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD);
++
++ bnx2x_bmac_enable(params, vars, 0);
++
++ bnx2x_pbf_update(params, vars->flow_ctrl, vars->line_speed);
++ /* Disable drain */
++ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE
++ + params->port*4, 0);
++
++ /* update shared memory */
++ bnx2x_update_mng(params, vars->link_status);
++
++ return 0;
++
++ } else
++ if (params->loopback_mode == LOOPBACK_BMAC) {
++ vars->link_up = 1;
++ vars->line_speed = SPEED_10000;
++ vars->duplex = DUPLEX_FULL;
++ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
++ vars->mac_type = MAC_TYPE_BMAC;
++
++ vars->phy_flags = PHY_XGXS_FLAG;
++
++ bnx2x_phy_deassert(params, vars->phy_flags);
++ /* set bmac loopback */
++ bnx2x_bmac_enable(params, vars, 1);
++
++ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
++ params->port*4, 0);
++ } else if (params->loopback_mode == LOOPBACK_EMAC) {
++ vars->link_up = 1;
++ vars->line_speed = SPEED_1000;
++ vars->duplex = DUPLEX_FULL;
++ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
++ vars->mac_type = MAC_TYPE_EMAC;
++
++ vars->phy_flags = PHY_XGXS_FLAG;
++
++ bnx2x_phy_deassert(params, vars->phy_flags);
++ /* set bmac loopback */
++ bnx2x_emac_enable(params, vars, 1);
++ bnx2x_emac_program(params, vars->line_speed,
++ vars->duplex);
++ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
++ params->port*4, 0);
++ } else if ((params->loopback_mode == LOOPBACK_XGXS_10) ||
++ (params->loopback_mode == LOOPBACK_EXT_PHY)) {
++ vars->link_up = 1;
++ vars->line_speed = SPEED_10000;
++ vars->duplex = DUPLEX_FULL;
++ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
++
++ vars->phy_flags = PHY_XGXS_FLAG;
++
++ val = REG_RD(bp,
++ NIG_REG_XGXS0_CTRL_PHY_ADDR+
++ params->port*0x18);
++ params->phy_addr = (u8)val;
++
++ bnx2x_phy_deassert(params, vars->phy_flags);
++ bnx2x_link_initialize(params, vars);
++
++ vars->mac_type = MAC_TYPE_BMAC;
++
++ bnx2x_bmac_enable(params, vars, 0);
++
++ if (params->loopback_mode == LOOPBACK_XGXS_10) {
++ /* set 10G XGXS loopback */
++ bnx2x_set_xgxs_loopback(params, vars, 1);
++ } else {
++ /* set external phy loopback */
++ bnx2x_ext_phy_loopback(params);
++ }
++ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
++ params->port*4, 0);
++ } else
++ /* No loopback */
++ {
++
++ bnx2x_phy_deassert(params, vars->phy_flags);
++ switch (params->switch_cfg) {
++ case SWITCH_CFG_1G:
++ vars->phy_flags |= PHY_SERDES_FLAG;
++ if ((params->ext_phy_config &
++ PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK) ==
++ PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482) {
++ vars->phy_flags |=
++ PHY_SGMII_FLAG;
++ }
++
++ val = REG_RD(bp,
++ NIG_REG_SERDES0_CTRL_PHY_ADDR+
++ params->port*0x10);
++
++ params->phy_addr = (u8)val;
++
++ break;
++ case SWITCH_CFG_10G:
++ vars->phy_flags |= PHY_XGXS_FLAG;
++ val = REG_RD(bp,
++ NIG_REG_XGXS0_CTRL_PHY_ADDR+
++ params->port*0x18);
++ params->phy_addr = (u8)val;
++
++ break;
++ default:
++ DP(NETIF_MSG_LINK, "Invalid switch_cfg\n");
++ return -EINVAL;
++ break;
++ }
++
++ bnx2x_link_initialize(params, vars);
++ msleep(30);
++ bnx2x_link_int_enable(params);
++ }
++ return 0;
++}
++
++u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars)
++{
++
++ struct bnx2x *bp = params->bp;
++ u32 ext_phy_config = params->ext_phy_config;
++ u16 hw_led_mode = params->hw_led_mode;
++ u32 chip_id = params->chip_id;
++ u8 port = params->port;
++ u32 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
++ /* disable attentions */
++
++ vars->link_status = 0;
++ bnx2x_update_mng(params, vars->link_status);
++ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
++ (NIG_MASK_XGXS0_LINK_STATUS |
++ NIG_MASK_XGXS0_LINK10G |
++ NIG_MASK_SERDES0_LINK_STATUS |
++ NIG_MASK_MI_INT));
++
++ /* activate nig drain */
++ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
++
++ /* disable nig egress interface */
++ REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0);
++ REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
++
++ /* Stop BigMac rx */
++ bnx2x_bmac_rx_disable(bp, port);
++
++ /* disable emac */
++ REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
++
++ msleep(10);
++ /* The PHY reset is controled by GPIO 1
++ * Hold it as vars low
++ */
++ /* clear link led */
++ bnx2x_set_led(bp, port, LED_MODE_OFF, 0, hw_led_mode, chip_id);
++ if (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) {
++ if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) &&
++ (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)) {
++ /* HW reset */
++
++ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
++ MISC_REGISTERS_GPIO_OUTPUT_LOW,
++ port);
++
++ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
++ MISC_REGISTERS_GPIO_OUTPUT_LOW,
++ port);
++
++ DP(NETIF_MSG_LINK, "reset external PHY\n");
++ } else if (ext_phy_type ==
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
++ DP(NETIF_MSG_LINK, "Setting 8073 port %d into "
++ "low power mode\n",
++ port);
++ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
++ MISC_REGISTERS_GPIO_OUTPUT_LOW,
++ port);
++ }
++ }
++ /* reset the SerDes/XGXS */
++ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
++ (0x1ff << (port*16)));
++
++ /* reset BigMac */
++ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
++ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
++
++ /* disable nig ingress interface */
++ REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0);
++ REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0);
++ REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0);
++ REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
++ vars->link_up = 0;
++ return 0;
++}
++
++static u8 bnx2x_update_link_down(struct link_params *params,
++ struct link_vars *vars)
++{
++ struct bnx2x *bp = params->bp;
++ u8 port = params->port;
++ DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
++ bnx2x_set_led(bp, port, LED_MODE_OFF,
++ 0, params->hw_led_mode,
++ params->chip_id);
++
++ /* indicate no mac active */
++ vars->mac_type = MAC_TYPE_NONE;
++
++ /* update shared memory */
++ vars->link_status = 0;
++ vars->line_speed = 0;
++ bnx2x_update_mng(params, vars->link_status);
++
++ /* activate nig drain */
++ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
++
++ /* disable emac */
++ REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
++
++ msleep(10);
++
++ /* reset BigMac */
++ bnx2x_bmac_rx_disable(bp, params->port);
++ REG_WR(bp, GRCBASE_MISC +
++ MISC_REGISTERS_RESET_REG_2_CLEAR,
++ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
++ return 0;
++}
++
++static u8 bnx2x_update_link_up(struct link_params *params,
++ struct link_vars *vars,
++ u8 link_10g, u32 gp_status)
++{
++ struct bnx2x *bp = params->bp;
++ u8 port = params->port;
++ u8 rc = 0;
++ vars->link_status |= LINK_STATUS_LINK_UP;
++ if (link_10g) {
++ bnx2x_bmac_enable(params, vars, 0);
++ bnx2x_set_led(bp, port, LED_MODE_OPER,
++ SPEED_10000, params->hw_led_mode,
++ params->chip_id);
++
++ } else {
++ bnx2x_emac_enable(params, vars, 0);
++ rc = bnx2x_emac_program(params, vars->line_speed,
++ vars->duplex);
++
++ /* AN complete? */
++ if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
++ if (!(vars->phy_flags &
++ PHY_SGMII_FLAG))
++ bnx2x_set_sgmii_tx_driver(params);
++ }
++ }
++
++ /* PBF - link up */
++ rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
++ vars->line_speed);
++
++ /* disable drain */
++ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
++
++ /* update shared memory */
++ bnx2x_update_mng(params, vars->link_status);
++ msleep(20);
++ return rc;
++}
++/* This function should called upon link interrupt */
++/* In case vars->link_up, driver needs to
++ 1. Update the pbf
++ 2. Disable drain
++ 3. Update the shared memory
++ 4. Indicate link up
++ 5. Set LEDs
++ Otherwise,
++ 1. Update shared memory
++ 2. Reset BigMac
++ 3. Report link down
++ 4. Unset LEDs
++*/
++u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
++{
++ struct bnx2x *bp = params->bp;
++ u8 port = params->port;
++ u16 gp_status;
++ u8 link_10g;
++ u8 ext_phy_link_up, rc = 0;
++ u32 ext_phy_type;
++
++ DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n",
++ port,
++ (vars->phy_flags & PHY_XGXS_FLAG),
++ REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
++
++ DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n",
++ REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
++ REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
++ REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
++
++ DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
++ REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
++ REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
++
++ /* disable emac */
++ REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
++
++ ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
++
++ /* Check external link change only for non-direct */
++ ext_phy_link_up = bnx2x_ext_phy_is_link_up(params, vars);
++
++ /* Read gp_status */
++ CL45_RD_OVER_CL22(bp, port, params->phy_addr,
++ MDIO_REG_BANK_GP_STATUS,
++ MDIO_GP_STATUS_TOP_AN_STATUS1,
++ &gp_status);
++
++ rc = bnx2x_link_settings_status(params, vars, gp_status);
++ if (rc != 0)
++ return rc;
++
++ /* anything 10 and over uses the bmac */
++ link_10g = ((vars->line_speed == SPEED_10000) ||
++ (vars->line_speed == SPEED_12000) ||
++ (vars->line_speed == SPEED_12500) ||
++ (vars->line_speed == SPEED_13000) ||
++ (vars->line_speed == SPEED_15000) ||
++ (vars->line_speed == SPEED_16000));
++
++ bnx2x_link_int_ack(params, vars, link_10g);
++
++ /* In case external phy link is up, and internal link is down
++ ( not initialized yet probably after link initialization, it needs
++ to be initialized.
++ Note that after link down-up as result of cable plug,
++ the xgxs link would probably become up again without the need to
++ initialize it*/
++
++ if ((ext_phy_type != PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) &&
++ (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) &&
++ (ext_phy_link_up && !vars->phy_link_up))
++ bnx2x_init_internal_phy(params, vars);
++
++ /* link is up only if both local phy and external phy are up */
++ vars->link_up = (ext_phy_link_up && vars->phy_link_up);
++
++ if (vars->link_up)
++ rc = bnx2x_update_link_up(params, vars, link_10g, gp_status);
++ else
++ rc = bnx2x_update_link_down(params, vars);
++
++ return rc;
++}
++
++static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base)
++{
++ u8 ext_phy_addr[PORT_MAX];
++ u16 val;
++ s8 port;
++
++ /* PART1 - Reset both phys */
++ for (port = PORT_MAX - 1; port >= PORT_0; port--) {
++ /* Extract the ext phy address for the port */
++ u32 ext_phy_config = REG_RD(bp, shmem_base +
++ offsetof(struct shmem_region,
++ dev_info.port_hw_config[port].external_phy_config));
++
++ /* disable attentions */
++ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
++ (NIG_MASK_XGXS0_LINK_STATUS |
++ NIG_MASK_XGXS0_LINK10G |
++ NIG_MASK_SERDES0_LINK_STATUS |
++ NIG_MASK_MI_INT));
++
++ ext_phy_addr[port] =
++ ((ext_phy_config &
++ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
++ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
++
++ /* Need to take the phy out of low power mode in order
++ to write to access its registers */
++ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
++ MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
++
++ /* Reset the phy */
++ bnx2x_cl45_write(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
++ ext_phy_addr[port],
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_CTRL,
++ 1<<15);
++ }
++
++ /* Add delay of 150ms after reset */
++ msleep(150);
++
++ /* PART2 - Download firmware to both phys */
++ for (port = PORT_MAX - 1; port >= PORT_0; port--) {
++ u16 fw_ver1;
++
++ bnx2x_bcm8073_external_rom_boot(bp, port,
++ ext_phy_addr[port]);
++
++ bnx2x_cl45_read(bp, port, PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
++ ext_phy_addr[port],
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_ROM_VER1, &fw_ver1);
++ if (fw_ver1 == 0 || fw_ver1 == 0x4321) {
++ DP(NETIF_MSG_LINK,
++ "bnx2x_8073_common_init_phy port %x:"
++ "Download failed. fw version = 0x%x\n",
++ port, fw_ver1);
++ return -EINVAL;
++ }
++
++ /* Only set bit 10 = 1 (Tx power down) */
++ bnx2x_cl45_read(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
++ ext_phy_addr[port],
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_TX_POWER_DOWN, &val);
++
++ /* Phase1 of TX_POWER_DOWN reset */
++ bnx2x_cl45_write(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
++ ext_phy_addr[port],
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_TX_POWER_DOWN,
++ (val | 1<<10));
++ }
++
++ /* Toggle Transmitter: Power down and then up with 600ms
++ delay between */
++ msleep(600);
++
++ /* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */
++ for (port = PORT_MAX - 1; port >= PORT_0; port--) {
++ /* Phase2 of POWER_DOWN_RESET*/
++ /* Release bit 10 (Release Tx power down) */
++ bnx2x_cl45_read(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
++ ext_phy_addr[port],
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_TX_POWER_DOWN, &val);
++
++ bnx2x_cl45_write(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
++ ext_phy_addr[port],
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
++ msleep(15);
++
++ /* Read modify write the SPI-ROM version select register */
++ bnx2x_cl45_read(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
++ ext_phy_addr[port],
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_EDC_FFE_MAIN, &val);
++ bnx2x_cl45_write(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
++ ext_phy_addr[port],
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
++
++ /* set GPIO2 back to LOW */
++ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
++ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
++ }
++ return 0;
++
++}
++
++u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base)
++{
++ u8 rc = 0;
++ u32 ext_phy_type;
++
++ DP(NETIF_MSG_LINK, "bnx2x_common_init_phy\n");
++
++ /* Read the ext_phy_type for arbitrary port(0) */
++ ext_phy_type = XGXS_EXT_PHY_TYPE(
++ REG_RD(bp, shmem_base +
++ offsetof(struct shmem_region,
++ dev_info.port_hw_config[0].external_phy_config)));
++
++ switch (ext_phy_type) {
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
++ {
++ rc = bnx2x_8073_common_init_phy(bp, shmem_base);
++ break;
++ }
++ default:
++ DP(NETIF_MSG_LINK,
++ "bnx2x_common_init_phy: ext_phy 0x%x not required\n",
++ ext_phy_type);
++ break;
++ }
++
++ return rc;
++}
++
++
++
++static void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, u8 port, u8 phy_addr)
++{
++ u16 val, cnt;
++
++ bnx2x_cl45_read(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
++ phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_7101_RESET, &val);
++
++ for (cnt = 0; cnt < 10; cnt++) {
++ msleep(50);
++ /* Writes a self-clearing reset */
++ bnx2x_cl45_write(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
++ phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_7101_RESET,
++ (val | (1<<15)));
++ /* Wait for clear */
++ bnx2x_cl45_read(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
++ phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_7101_RESET, &val);
++
++ if ((val & (1<<15)) == 0)
++ break;
++ }
++}
++#define RESERVED_SIZE 256
++/* max application is 160K bytes - data at end of RAM */
++#define MAX_APP_SIZE (160*1024 - RESERVED_SIZE)
++
++/* Header is 14 bytes */
++#define HEADER_SIZE 14
++#define DATA_OFFSET HEADER_SIZE
++
++#define SPI_START_TRANSFER(bp, port, ext_phy_addr) \
++ bnx2x_cl45_write(bp, port, PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101, \
++ ext_phy_addr, \
++ MDIO_PCS_DEVAD, \
++ MDIO_PCS_REG_7101_SPI_CTRL_ADDR, 1)
++
++/* Programs an image to DSP's flash via the SPI port*/
++static u8 bnx2x_sfx7101_flash_download(struct bnx2x *bp, u8 port,
++ u8 ext_phy_addr,
++ char data[], u32 size)
++{
++ const u16 num_trans = size/4; /* 4 bytes can be sent at a time */
++ /* Doesn't include last trans!*/
++ const u16 last_trans_size = size%4; /* Num bytes on last trans */
++ u16 trans_cnt, byte_cnt;
++ u32 data_index;
++ u16 tmp;
++ u16 code_started = 0;
++ u16 image_revision1, image_revision2;
++ u16 cnt;
++
++ DP(NETIF_MSG_LINK, "bnx2x_sfx7101_flash_download file_size=%d\n", size);
++ /* Going to flash*/
++ if ((size-HEADER_SIZE) > MAX_APP_SIZE) {
++ /* This very often will be the case, because the image is built
++ with 160Kbytes size whereas the total image size must actually
++ be 160Kbytes-RESERVED_SIZE */
++ DP(NETIF_MSG_LINK, "Warning, file size was %d bytes "
++ "truncated to %d bytes\n", size, MAX_APP_SIZE);
++ size = MAX_APP_SIZE+HEADER_SIZE;
++ }
++ DP(NETIF_MSG_LINK, "File version is %c%c\n", data[0x14e], data[0x14f]);
++ DP(NETIF_MSG_LINK, " %c%c\n", data[0x150], data[0x151]);
++ /* Put the DSP in download mode by setting FLASH_CFG[2] to 1
++ and issuing a reset.*/
++
++ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
++ MISC_REGISTERS_GPIO_HIGH, port);
++
++ bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
++
++ /* wait 0.5 sec */
++ for (cnt = 0; cnt < 100; cnt++)
++ msleep(5);
++
++ /* Make sure we can access the DSP
++ And it's in the correct mode (waiting for download) */
++
++ bnx2x_cl45_read(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
++ ext_phy_addr,
++ MDIO_PCS_DEVAD,
++ MDIO_PCS_REG_7101_DSP_ACCESS, &tmp);
++
++ if (tmp != 0x000A) {
++ DP(NETIF_MSG_LINK, "DSP is not in waiting on download mode. "
++ "Expected 0x000A, read 0x%04X\n", tmp);
++ DP(NETIF_MSG_LINK, "Download failed\n");
++ return -EINVAL;
++ }
++
++ /* Mux the SPI interface away from the internal processor */
++ bnx2x_cl45_write(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
++ ext_phy_addr,
++ MDIO_PCS_DEVAD,
++ MDIO_PCS_REG_7101_SPI_MUX, 1);
++
++ /* Reset the SPI port */
++ bnx2x_cl45_write(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
++ ext_phy_addr,
++ MDIO_PCS_DEVAD,
++ MDIO_PCS_REG_7101_SPI_CTRL_ADDR, 0);
++ bnx2x_cl45_write(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
++ ext_phy_addr,
++ MDIO_PCS_DEVAD,
++ MDIO_PCS_REG_7101_SPI_CTRL_ADDR,
++ (1<<MDIO_PCS_REG_7101_SPI_RESET_BIT));
++ bnx2x_cl45_write(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
++ ext_phy_addr,
++ MDIO_PCS_DEVAD,
++ MDIO_PCS_REG_7101_SPI_CTRL_ADDR, 0);
++
++ /* Erase the flash */
++ bnx2x_cl45_write(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
++ ext_phy_addr,
++ MDIO_PCS_DEVAD,
++ MDIO_PCS_REG_7101_SPI_FIFO_ADDR,
++ MDIO_PCS_REG_7101_SPI_FIFO_ADDR_WRITE_ENABLE_CMD);
++
++ bnx2x_cl45_write(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
++ ext_phy_addr,
++ MDIO_PCS_DEVAD,
++ MDIO_PCS_REG_7101_SPI_BYTES_TO_TRANSFER_ADDR,
++ 1);
++
++ SPI_START_TRANSFER(bp, port, ext_phy_addr);
++ bnx2x_cl45_write(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
++ ext_phy_addr,
++ MDIO_PCS_DEVAD,
++ MDIO_PCS_REG_7101_SPI_FIFO_ADDR,
++ MDIO_PCS_REG_7101_SPI_FIFO_ADDR_BULK_ERASE_CMD);
++
++ bnx2x_cl45_write(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
++ ext_phy_addr,
++ MDIO_PCS_DEVAD,
++ MDIO_PCS_REG_7101_SPI_BYTES_TO_TRANSFER_ADDR,
++ 1);
++ SPI_START_TRANSFER(bp, port, ext_phy_addr);
++
++ /* Wait 10 seconds, the maximum time for the erase to complete */
++ DP(NETIF_MSG_LINK, "Erasing flash, this takes 10 seconds...\n");
++ for (cnt = 0; cnt < 1000; cnt++)
++ msleep(10);
++
++ DP(NETIF_MSG_LINK, "Downloading flash, please wait...\n");
++ data_index = 0;
++ for (trans_cnt = 0; trans_cnt < num_trans; trans_cnt++) {
++ bnx2x_cl45_write(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
++ ext_phy_addr,
++ MDIO_PCS_DEVAD,
++ MDIO_PCS_REG_7101_SPI_FIFO_ADDR,
++ MDIO_PCS_REG_7101_SPI_FIFO_ADDR_WRITE_ENABLE_CMD);
++
++ bnx2x_cl45_write(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
++ ext_phy_addr,
++ MDIO_PCS_DEVAD,
++ MDIO_PCS_REG_7101_SPI_BYTES_TO_TRANSFER_ADDR,
++ 1);
++ SPI_START_TRANSFER(bp, port, ext_phy_addr);
++
++ bnx2x_cl45_write(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
++ ext_phy_addr,
++ MDIO_PCS_DEVAD,
++ MDIO_PCS_REG_7101_SPI_FIFO_ADDR,
++ MDIO_PCS_REG_7101_SPI_FIFO_ADDR_PAGE_PROGRAM_CMD);
++
++ /* Bits 23-16 of address */
++ bnx2x_cl45_write(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
++ ext_phy_addr,
++ MDIO_PCS_DEVAD,
++ MDIO_PCS_REG_7101_SPI_FIFO_ADDR,
++ (data_index>>16));
++ /* Bits 15-8 of address */
++ bnx2x_cl45_write(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
++ ext_phy_addr,
++ MDIO_PCS_DEVAD,
++ MDIO_PCS_REG_7101_SPI_FIFO_ADDR,
++ (data_index>>8));
++
++ /* Bits 7-0 of address */
++ bnx2x_cl45_write(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
++ ext_phy_addr,
++ MDIO_PCS_DEVAD,
++ MDIO_PCS_REG_7101_SPI_FIFO_ADDR,
++ ((u16)data_index));
++
++ byte_cnt = 0;
++ while (byte_cnt < 4 && data_index < size) {
++ bnx2x_cl45_write(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
++ ext_phy_addr,
++ MDIO_PCS_DEVAD,
++ MDIO_PCS_REG_7101_SPI_FIFO_ADDR,
++ data[data_index++]);
++ byte_cnt++;
++ }
++
++ bnx2x_cl45_write(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
++ ext_phy_addr,
++ MDIO_PCS_DEVAD,
++ MDIO_PCS_REG_7101_SPI_BYTES_TO_TRANSFER_ADDR,
++ byte_cnt+4);
++
++ SPI_START_TRANSFER(bp, port, ext_phy_addr);
++ msleep(5); /* Wait 5 ms minimum between transs */
++
++ /* Let the user know something's going on.*/
++ /* a pacifier ever 4K */
++ if ((data_index % 1023) == 0)
++ DP(NETIF_MSG_LINK, "Download %d%%\n", data_index/size);
++ }
++
++ DP(NETIF_MSG_LINK, "\n");
++ /* Transfer the last block if there is data remaining */
++ if (last_trans_size) {
++ bnx2x_cl45_write(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
++ ext_phy_addr,
++ MDIO_PCS_DEVAD,
++ MDIO_PCS_REG_7101_SPI_FIFO_ADDR,
++ MDIO_PCS_REG_7101_SPI_FIFO_ADDR_WRITE_ENABLE_CMD);
++
++ bnx2x_cl45_write(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
++ ext_phy_addr,
++ MDIO_PCS_DEVAD,
++ MDIO_PCS_REG_7101_SPI_BYTES_TO_TRANSFER_ADDR,
++ 1);
++
++ SPI_START_TRANSFER(bp, port, ext_phy_addr);
++
++ bnx2x_cl45_write(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
++ ext_phy_addr,
++ MDIO_PCS_DEVAD,
++ MDIO_PCS_REG_7101_SPI_FIFO_ADDR,
++ MDIO_PCS_REG_7101_SPI_FIFO_ADDR_PAGE_PROGRAM_CMD);
++
++ /* Bits 23-16 of address */
++ bnx2x_cl45_write(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
++ ext_phy_addr,
++ MDIO_PCS_DEVAD,
++ MDIO_PCS_REG_7101_SPI_FIFO_ADDR,
++ (data_index>>16));
++ /* Bits 15-8 of address */
++ bnx2x_cl45_write(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
++ ext_phy_addr,
++ MDIO_PCS_DEVAD,
++ MDIO_PCS_REG_7101_SPI_FIFO_ADDR,
++ (data_index>>8));
++
++ /* Bits 7-0 of address */
++ bnx2x_cl45_write(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
++ ext_phy_addr,
++ MDIO_PCS_DEVAD,
++ MDIO_PCS_REG_7101_SPI_FIFO_ADDR,
++ ((u16)data_index));
++
++ byte_cnt = 0;
++ while (byte_cnt < last_trans_size && data_index < size) {
++ /* Bits 7-0 of address */
++ bnx2x_cl45_write(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
++ ext_phy_addr,
++ MDIO_PCS_DEVAD,
++ MDIO_PCS_REG_7101_SPI_FIFO_ADDR,
++ data[data_index++]);
++ byte_cnt++;
++ }
++
++ bnx2x_cl45_write(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
++ ext_phy_addr,
++ MDIO_PCS_DEVAD,
++ MDIO_PCS_REG_7101_SPI_BYTES_TO_TRANSFER_ADDR,
++ byte_cnt+4);
++
++ SPI_START_TRANSFER(bp, port, ext_phy_addr);
++ }
++
++ /* DSP Remove Download Mode */
++ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
++ MISC_REGISTERS_GPIO_LOW, port);
++
++ bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
++
++ /* wait 0.5 sec to allow it to run */
++ for (cnt = 0; cnt < 100; cnt++)
++ msleep(5);
++
++ bnx2x_hw_reset(bp, port);
++
++ for (cnt = 0; cnt < 100; cnt++)
++ msleep(5);
++
++ /* Check that the code is started. In case the download
++ checksum failed, the code won't be started. */
++ bnx2x_cl45_read(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
++ ext_phy_addr,
++ MDIO_PCS_DEVAD,
++ MDIO_PCS_REG_7101_DSP_ACCESS,
++ &tmp);
++
++ code_started = (tmp & (1<<4));
++ if (!code_started) {
++ DP(NETIF_MSG_LINK, "Download failed. Please check file.\n");
++ return -EINVAL;
++ }
++
++ /* Verify that the file revision is now equal to the image
++ revision within the DSP */
++ bnx2x_cl45_read(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_7101_VER1,
++ &image_revision1);
++
++ bnx2x_cl45_read(bp, port,
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
++ ext_phy_addr,
++ MDIO_PMA_DEVAD,
++ MDIO_PMA_REG_7101_VER2,
++ &image_revision2);
++
++ if (data[0x14e] != (image_revision2&0xFF) ||
++ data[0x14f] != ((image_revision2&0xFF00)>>8) ||
++ data[0x150] != (image_revision1&0xFF) ||
++ data[0x151] != ((image_revision1&0xFF00)>>8)) {
++ DP(NETIF_MSG_LINK, "Download failed.\n");
++ return -EINVAL;
++ }
++ DP(NETIF_MSG_LINK, "Download %d%%\n", data_index/size);
++ return 0;
++}
++
++u8 bnx2x_flash_download(struct bnx2x *bp, u8 port, u32 ext_phy_config,
++ u8 driver_loaded, char data[], u32 size)
++{
++ u8 rc = 0;
++ u32 ext_phy_type;
++ u8 ext_phy_addr;
++ ext_phy_addr = ((ext_phy_config &
++ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
++ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
++
++ ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
++
++ switch (ext_phy_type) {
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
++ DP(NETIF_MSG_LINK,
++ "Flash download not supported for this ext phy\n");
++ rc = -EINVAL;
++ break;
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
++ /* Take ext phy out of reset */
++ if (!driver_loaded)
++ bnx2x_turn_on_ef(bp, port, ext_phy_addr, ext_phy_type);
++ rc = bnx2x_sfx7101_flash_download(bp, port, ext_phy_addr,
++ data, size);
++ if (!driver_loaded)
++ bnx2x_turn_off_sf(bp, port);
++ break;
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN:
++ default:
++ DP(NETIF_MSG_LINK, "Invalid ext phy type\n");
++ rc = -EINVAL;
++ break;
++ }
++ return rc;
++}
++
+diff --git a/drivers/net/bnx2x_link.h b/drivers/net/bnx2x_link.h
+new file mode 100644
+index 0000000..47cb585
+--- /dev/null
++++ b/drivers/net/bnx2x_link.h
+@@ -0,0 +1,173 @@
++/* Copyright 2008 Broadcom Corporation
++ *
++ * Unless you and Broadcom execute a separate written software license
++ * agreement governing use of this software, this software is licensed to you
++ * under the terms of the GNU General Public License version 2, available
++ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
++ *
++ * Notwithstanding the above, under no circumstances may you combine this
++ * software in any way with any other Broadcom software provided under a
++ * license other than the GPL, without Broadcom's express prior written
++ * consent.
++ *
++ * Written by Yaniv Rosner
++ *
++ */
++
++#ifndef BNX2X_LINK_H
++#define BNX2X_LINK_H
++
++
++
++/***********************************************************/
++/* Defines */
++/***********************************************************/
++#define DEFAULT_PHY_DEV_ADDR 3
++
++
++
++#define BNX2X_FLOW_CTRL_AUTO PORT_FEATURE_FLOW_CONTROL_AUTO
++#define BNX2X_FLOW_CTRL_TX PORT_FEATURE_FLOW_CONTROL_TX
++#define BNX2X_FLOW_CTRL_RX PORT_FEATURE_FLOW_CONTROL_RX
++#define BNX2X_FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH
++#define BNX2X_FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE
++
++#define SPEED_AUTO_NEG 0
++#define SPEED_12000 12000
++#define SPEED_12500 12500
++#define SPEED_13000 13000
++#define SPEED_15000 15000
++#define SPEED_16000 16000
++
++
++/***********************************************************/
++/* Structs */
++/***********************************************************/
++/* Inputs parameters to the CLC */
++struct link_params {
++
++ u8 port;
++
++ /* Default / User Configuration */
++ u8 loopback_mode;
++#define LOOPBACK_NONE 0
++#define LOOPBACK_EMAC 1
++#define LOOPBACK_BMAC 2
++#define LOOPBACK_XGXS_10 3
++#define LOOPBACK_EXT_PHY 4
++#define LOOPBACK_EXT 5
++
++ u16 req_duplex;
++ u16 req_flow_ctrl;
++ u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
++ req_flow_ctrl is set to AUTO */
++ u16 req_line_speed; /* Also determine AutoNeg */
++
++ /* Device parameters */
++ u8 mac_addr[6];
++
++
++
++ /* shmem parameters */
++ u32 shmem_base;
++ u32 speed_cap_mask;
++ u32 switch_cfg;
++#define SWITCH_CFG_1G PORT_FEATURE_CON_SWITCH_1G_SWITCH
++#define SWITCH_CFG_10G PORT_FEATURE_CON_SWITCH_10G_SWITCH
++#define SWITCH_CFG_AUTO_DETECT PORT_FEATURE_CON_SWITCH_AUTO_DETECT
++
++ u16 hw_led_mode; /* part of the hw_config read from the shmem */
++ u32 serdes_config;
++ u32 lane_config;
++ u32 ext_phy_config;
++#define XGXS_EXT_PHY_TYPE(ext_phy_config) (ext_phy_config & \
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK)
++#define SERDES_EXT_PHY_TYPE(ext_phy_config) (ext_phy_config & \
++ PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK)
++ /* Phy register parameter */
++ u32 chip_id;
++
++ /* phy_addr populated by the CLC */
++ u8 phy_addr;
++ /* Device pointer passed to all callback functions */
++ struct bnx2x *bp;
++};
++
++/* Output parameters */
++struct link_vars {
++ u8 phy_link_up; /* internal phy link indication */
++ u8 link_up;
++ u16 duplex;
++ u16 flow_ctrl;
++ u32 ieee_fc;
++ u8 mac_type;
++
++#define MAC_TYPE_NONE 0
++#define MAC_TYPE_EMAC 1
++#define MAC_TYPE_BMAC 2
++ u16 line_speed;
++ u32 autoneg;
++#define AUTO_NEG_DISABLED 0x0
++#define AUTO_NEG_ENABLED 0x1
++#define AUTO_NEG_COMPLETE 0x2
++#define AUTO_NEG_PARALLEL_DETECTION_USED 0x3
++
++ u8 phy_flags;
++
++ /* The same definitions as the shmem parameter */
++ u32 link_status;
++};
++
++/***********************************************************/
++/* Functions */
++/***********************************************************/
++
++/* Initialize the phy */
++u8 bnx2x_phy_init(struct link_params *input, struct link_vars *output);
++
++/* Reset the link. Should be called when driver or interface goes down */
++u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars);
++
++/* bnx2x_link_update should be called upon link interrupt */
++u8 bnx2x_link_update(struct link_params *input, struct link_vars *output);
++
++/* use the following cl45 functions to read/write from external_phy
++ In order to use it to read/write internal phy registers, use
++ DEFAULT_PHY_DEV_ADDR as devad, and (_bank + (_addr & 0xf)) as
++ Use ext_phy_type of 0 in case of cl22 over cl45
++ the register */
++u8 bnx2x_cl45_read(struct bnx2x *bp, u8 port, u32 ext_phy_type,
++ u8 phy_addr, u8 devad, u16 reg, u16 *ret_val);
++
++u8 bnx2x_cl45_write(struct bnx2x *bp, u8 port, u32 ext_phy_type,
++ u8 phy_addr, u8 devad, u16 reg, u16 val);
++
++/* Reads the link_status from the shmem,
++ and update the link vars accordingly */
++void bnx2x_link_status_update(struct link_params *input,
++ struct link_vars *output);
++/* returns string representing the fw_version of the external phy */
++u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
++ u8 *version, u16 len);
++
++/* Set/Unset the led
++ Basically, the CLC takes care of the led for the link, but in case one needs
++ to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to
++ blink the led, and LED_MODE_OFF to set the led off.*/
++u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed,
++ u16 hw_led_mode, u32 chip_id);
++#define LED_MODE_OFF 0
++#define LED_MODE_OPER 2
++
++u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port, u32 led_idx, u32 value);
++
++u8 bnx2x_flash_download(struct bnx2x *bp, u8 port, u32 ext_phy_config,
++ u8 driver_loaded, char data[], u32 size);
++/* Get the actual link status. In case it returns 0, link is up,
++ otherwise link is down*/
++u8 bnx2x_test_link(struct link_params *input, struct link_vars *vars);
++
++/* One-time initialization for external phy after power up */
++u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base);
++
++#endif /* BNX2X_LINK_H */
+diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
+new file mode 100644
+index 0000000..9cdd6a9
+--- /dev/null
++++ b/drivers/net/bnx2x_main.c
+@@ -0,0 +1,10661 @@
++/* bnx2x_main.c: Broadcom Everest network driver.
++ *
++ * Copyright (c) 2007-2009 Broadcom Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation.
++ *
++ * Maintained by: Eilon Greenstein <eilong at broadcom.com>
++ * Written by: Eliezer Tamir
++ * Based on code from Michael Chan's bnx2 driver
++ * UDP CSUM errata workaround by Arik Gendelman
++ * Slowpath rework by Vladislav Zolotarov
++ * Statistics and Link management by Yitchak Gertner
++ *
++ */
++
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/kernel.h>
++#include <linux/device.h> /* for dev_info() */
++#include <linux/timer.h>
++#include <linux/errno.h>
++#include <linux/ioport.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/interrupt.h>
++#include <linux/pci.h>
++#include <linux/init.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/skbuff.h>
++#include <linux/dma-mapping.h>
++#include <linux/bitops.h>
++#include <linux/irq.h>
++#include <linux/delay.h>
++#include <asm/byteorder.h>
++#include <linux/time.h>
++#include <linux/ethtool.h>
++#include <linux/mii.h>
++#include <linux/if_vlan.h>
++#include <net/ip.h>
++#include <net/tcp.h>
++#include <net/checksum.h>
++#include <linux/version.h>
++#include <net/ip6_checksum.h>
++#include <linux/workqueue.h>
++#include <linux/crc32.h>
++#include <linux/crc32c.h>
++#include <linux/prefetch.h>
++#include <linux/zlib.h>
++#include <linux/io.h>
++
++#include "bnx2x_reg.h"
++#include "bnx2x_fw_defs.h"
++#include "bnx2x_hsi.h"
++#include "bnx2x_link.h"
++#include "bnx2x.h"
++#include "bnx2x_init.h"
++
++#define DRV_MODULE_VERSION "1.45.26"
++#define DRV_MODULE_RELDATE "2009/01/26"
++#define BNX2X_BC_VER 0x040200
++
++/* Time in jiffies before concluding the transmitter is hung */
++#define TX_TIMEOUT (5*HZ)
++
++static char version[] __devinitdata =
++ "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
++ DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
++
++MODULE_AUTHOR("Eliezer Tamir");
++MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
++MODULE_LICENSE("GPL");
++MODULE_VERSION(DRV_MODULE_VERSION);
++
++static int disable_tpa;
++static int use_inta;
++static int poll;
++static int debug;
++static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
++static int use_multi;
++
++module_param(disable_tpa, int, 0);
++module_param(use_inta, int, 0);
++module_param(poll, int, 0);
++
++static int mrrs = -1;
++module_param(mrrs, int, 0);
++MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
++
++module_param(debug, int, 0);
++MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
++MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
++MODULE_PARM_DESC(poll, "use polling (for debug)");
++MODULE_PARM_DESC(debug, "default debug msglevel");
++
++#ifdef BNX2X_MULTI
++module_param(use_multi, int, 0);
++MODULE_PARM_DESC(use_multi, "use per-CPU queues");
++#endif
++static struct workqueue_struct *bnx2x_wq;
++
++enum bnx2x_board_type {
++ BCM57710 = 0,
++ BCM57711 = 1,
++ BCM57711E = 2,
++};
++
++/* indexed by board_type, above */
++static struct {
++ char *name;
++} board_info[] __devinitdata = {
++ { "Broadcom NetXtreme II BCM57710 XGb" },
++ { "Broadcom NetXtreme II BCM57711 XGb" },
++ { "Broadcom NetXtreme II BCM57711E XGb" }
++};
++
++
++static const struct pci_device_id bnx2x_pci_tbl[] = {
++ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
++ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
++ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
++ { 0 }
++};
++
++MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
++
++/****************************************************************************
++* General service functions
++****************************************************************************/
++
++/* used only at init
++ * locking is done by mcp
++ */
++static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
++{
++ pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
++ pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
++ pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
++ PCICFG_VENDOR_ID_OFFSET);
++}
++
++static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
++{
++ u32 val;
++
++ pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
++ pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
++ pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
++ PCICFG_VENDOR_ID_OFFSET);
++
++ return val;
++}
++
++static const u32 dmae_reg_go_c[] = {
++ DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
++ DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
++ DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
++ DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
++};
++
++/* copy command into DMAE command memory and set DMAE command go */
++static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
++ int idx)
++{
++ u32 cmd_offset;
++ int i;
++
++ cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
++ for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
++ REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
++
++ DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
++ idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
++ }
++ REG_WR(bp, dmae_reg_go_c[idx], 1);
++}
++
++void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
++ u32 len32)
++{
++ struct dmae_command *dmae = &bp->init_dmae;
++ u32 *wb_comp = bnx2x_sp(bp, wb_comp);
++ int cnt = 200;
++
++ if (!bp->dmae_ready) {
++ u32 *data = bnx2x_sp(bp, wb_data[0]);
++
++ DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
++ " using indirect\n", dst_addr, len32);
++ bnx2x_init_ind_wr(bp, dst_addr, data, len32);
++ return;
++ }
++
++ mutex_lock(&bp->dmae_mutex);
++
++ memset(dmae, 0, sizeof(struct dmae_command));
++
++ dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
++ DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
++ DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
++#ifdef __BIG_ENDIAN
++ DMAE_CMD_ENDIANITY_B_DW_SWAP |
++#else
++ DMAE_CMD_ENDIANITY_DW_SWAP |
++#endif
++ (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
++ (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
++ dmae->src_addr_lo = U64_LO(dma_addr);
++ dmae->src_addr_hi = U64_HI(dma_addr);
++ dmae->dst_addr_lo = dst_addr >> 2;
++ dmae->dst_addr_hi = 0;
++ dmae->len = len32;
++ dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
++ dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
++ dmae->comp_val = DMAE_COMP_VAL;
++
++ DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
++ DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
++ "dst_addr [%x:%08x (%08x)]\n"
++ DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
++ dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
++ dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
++ dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
++ DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
++ bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
++ bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
++
++ *wb_comp = 0;
++
++ bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
++
++ udelay(5);
++
++ while (*wb_comp != DMAE_COMP_VAL) {
++ DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
++
++ if (!cnt) {
++ BNX2X_ERR("dmae timeout!\n");
++ break;
++ }
++ cnt--;
++ /* adjust delay for emulation/FPGA */
++ if (CHIP_REV_IS_SLOW(bp))
++ msleep(100);
++ else
++ udelay(5);
++ }
++
++ mutex_unlock(&bp->dmae_mutex);
++}
++
++void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
++{
++ struct dmae_command *dmae = &bp->init_dmae;
++ u32 *wb_comp = bnx2x_sp(bp, wb_comp);
++ int cnt = 200;
++
++ if (!bp->dmae_ready) {
++ u32 *data = bnx2x_sp(bp, wb_data[0]);
++ int i;
++
++ DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
++ " using indirect\n", src_addr, len32);
++ for (i = 0; i < len32; i++)
++ data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
++ return;
++ }
++
++ mutex_lock(&bp->dmae_mutex);
++
++ memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
++ memset(dmae, 0, sizeof(struct dmae_command));
++
++ dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
++ DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
++ DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
++#ifdef __BIG_ENDIAN
++ DMAE_CMD_ENDIANITY_B_DW_SWAP |
++#else
++ DMAE_CMD_ENDIANITY_DW_SWAP |
++#endif
++ (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
++ (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
++ dmae->src_addr_lo = src_addr >> 2;
++ dmae->src_addr_hi = 0;
++ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
++ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
++ dmae->len = len32;
++ dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
++ dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
++ dmae->comp_val = DMAE_COMP_VAL;
++
++ DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
++ DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
++ "dst_addr [%x:%08x (%08x)]\n"
++ DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
++ dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
++ dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
++ dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
++
++ *wb_comp = 0;
++
++ bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
++
++ udelay(5);
++
++ while (*wb_comp != DMAE_COMP_VAL) {
++
++ if (!cnt) {
++ BNX2X_ERR("dmae timeout!\n");
++ break;
++ }
++ cnt--;
++ /* adjust delay for emulation/FPGA */
++ if (CHIP_REV_IS_SLOW(bp))
++ msleep(100);
++ else
++ udelay(5);
++ }
++ DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
++ bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
++ bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
++
++ mutex_unlock(&bp->dmae_mutex);
++}
++
++/* used only for slowpath so not inlined */
++static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
++{
++ u32 wb_write[2];
++
++ wb_write[0] = val_hi;
++ wb_write[1] = val_lo;
++ REG_WR_DMAE(bp, reg, wb_write, 2);
++}
++
++#ifdef USE_WB_RD
++static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
++{
++ u32 wb_data[2];
++
++ REG_RD_DMAE(bp, reg, wb_data, 2);
++
++ return HILO_U64(wb_data[0], wb_data[1]);
++}
++#endif
++
++static int bnx2x_mc_assert(struct bnx2x *bp)
++{
++ char last_idx;
++ int i, rc = 0;
++ u32 row0, row1, row2, row3;
++
++ /* XSTORM */
++ last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
++ XSTORM_ASSERT_LIST_INDEX_OFFSET);
++ if (last_idx)
++ BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
++
++ /* print the asserts */
++ for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
++
++ row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
++ XSTORM_ASSERT_LIST_OFFSET(i));
++ row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
++ XSTORM_ASSERT_LIST_OFFSET(i) + 4);
++ row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
++ XSTORM_ASSERT_LIST_OFFSET(i) + 8);
++ row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
++ XSTORM_ASSERT_LIST_OFFSET(i) + 12);
++
++ if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
++ BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
++ " 0x%08x 0x%08x 0x%08x\n",
++ i, row3, row2, row1, row0);
++ rc++;
++ } else {
++ break;
++ }
++ }
++
++ /* TSTORM */
++ last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
++ TSTORM_ASSERT_LIST_INDEX_OFFSET);
++ if (last_idx)
++ BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
++
++ /* print the asserts */
++ for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
++
++ row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
++ TSTORM_ASSERT_LIST_OFFSET(i));
++ row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
++ TSTORM_ASSERT_LIST_OFFSET(i) + 4);
++ row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
++ TSTORM_ASSERT_LIST_OFFSET(i) + 8);
++ row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
++ TSTORM_ASSERT_LIST_OFFSET(i) + 12);
++
++ if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
++ BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
++ " 0x%08x 0x%08x 0x%08x\n",
++ i, row3, row2, row1, row0);
++ rc++;
++ } else {
++ break;
++ }
++ }
++
++ /* CSTORM */
++ last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
++ CSTORM_ASSERT_LIST_INDEX_OFFSET);
++ if (last_idx)
++ BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
++
++ /* print the asserts */
++ for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
++
++ row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
++ CSTORM_ASSERT_LIST_OFFSET(i));
++ row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
++ CSTORM_ASSERT_LIST_OFFSET(i) + 4);
++ row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
++ CSTORM_ASSERT_LIST_OFFSET(i) + 8);
++ row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
++ CSTORM_ASSERT_LIST_OFFSET(i) + 12);
++
++ if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
++ BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
++ " 0x%08x 0x%08x 0x%08x\n",
++ i, row3, row2, row1, row0);
++ rc++;
++ } else {
++ break;
++ }
++ }
++
++ /* USTORM */
++ last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
++ USTORM_ASSERT_LIST_INDEX_OFFSET);
++ if (last_idx)
++ BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
++
++ /* print the asserts */
++ for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
++
++ row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
++ USTORM_ASSERT_LIST_OFFSET(i));
++ row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
++ USTORM_ASSERT_LIST_OFFSET(i) + 4);
++ row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
++ USTORM_ASSERT_LIST_OFFSET(i) + 8);
++ row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
++ USTORM_ASSERT_LIST_OFFSET(i) + 12);
++
++ if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
++ BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
++ " 0x%08x 0x%08x 0x%08x\n",
++ i, row3, row2, row1, row0);
++ rc++;
++ } else {
++ break;
++ }
++ }
++
++ return rc;
++}
++
++static void bnx2x_fw_dump(struct bnx2x *bp)
++{
++ u32 mark, offset;
++ u32 data[9];
++ int word;
++
++ mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
++ mark = ((mark + 0x3) & ~0x3);
++ printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
++
++ for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
++ for (word = 0; word < 8; word++)
++ data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
++ offset + 4*word));
++ data[8] = 0x0;
++ printk(KERN_CONT "%s", (char *)data);
++ }
++ for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
++ for (word = 0; word < 8; word++)
++ data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
++ offset + 4*word));
++ data[8] = 0x0;
++ printk(KERN_CONT "%s", (char *)data);
++ }
++ printk("\n" KERN_ERR PFX "end of fw dump\n");
++}
++
++static void bnx2x_panic_dump(struct bnx2x *bp)
++{
++ int i;
++ u16 j, start, end;
++
++ bp->stats_state = STATS_STATE_DISABLED;
++ DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
++
++ BNX2X_ERR("begin crash dump -----------------\n");
++
++ for_each_queue(bp, i) {
++ struct bnx2x_fastpath *fp = &bp->fp[i];
++ struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
++
++ BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
++ " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
++ i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
++ fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
++ BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
++ " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
++ " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
++ fp->rx_bd_prod, fp->rx_bd_cons,
++ le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
++ fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
++ BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
++ " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
++ " *sb_u_idx(%x) bd data(%x,%x)\n",
++ fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
++ fp->status_blk->c_status_block.status_block_index,
++ fp->fp_u_idx,
++ fp->status_blk->u_status_block.status_block_index,
++ hw_prods->packets_prod, hw_prods->bds_prod);
++
++ start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
++ end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
++ for (j = start; j < end; j++) {
++ struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
++
++ BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
++ sw_bd->skb, sw_bd->first_bd);
++ }
++
++ start = TX_BD(fp->tx_bd_cons - 10);
++ end = TX_BD(fp->tx_bd_cons + 254);
++ for (j = start; j < end; j++) {
++ u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
++
++ BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
++ j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
++ }
++
++ start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
++ end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
++ for (j = start; j < end; j++) {
++ u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
++ struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
++
++ BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
++ j, rx_bd[1], rx_bd[0], sw_bd->skb);
++ }
++
++ start = RX_SGE(fp->rx_sge_prod);
++ end = RX_SGE(fp->last_max_sge);
++ for (j = start; j < end; j++) {
++ u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
++ struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
++
++ BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
++ j, rx_sge[1], rx_sge[0], sw_page->page);
++ }
++
++ start = RCQ_BD(fp->rx_comp_cons - 10);
++ end = RCQ_BD(fp->rx_comp_cons + 503);
++ for (j = start; j < end; j++) {
++ u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
++
++ BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
++ j, cqe[0], cqe[1], cqe[2], cqe[3]);
++ }
++ }
++
++ BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
++ " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
++ " spq_prod_idx(%u)\n",
++ bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
++ bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
++
++ bnx2x_fw_dump(bp);
++ bnx2x_mc_assert(bp);
++ BNX2X_ERR("end crash dump -----------------\n");
++}
++
++static void bnx2x_int_enable(struct bnx2x *bp)
++{
++ int port = BP_PORT(bp);
++ u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
++ u32 val = REG_RD(bp, addr);
++ int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
++
++ if (msix) {
++ val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
++ val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
++ HC_CONFIG_0_REG_ATTN_BIT_EN_0);
++ } else {
++ val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
++ HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
++ HC_CONFIG_0_REG_INT_LINE_EN_0 |
++ HC_CONFIG_0_REG_ATTN_BIT_EN_0);
++
++ DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
++ val, port, addr, msix);
++
++ REG_WR(bp, addr, val);
++
++ val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
++ }
++
++ DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
++ val, port, addr, msix);
++
++ REG_WR(bp, addr, val);
++
++ if (CHIP_IS_E1H(bp)) {
++ /* init leading/trailing edge */
++ if (IS_E1HMF(bp)) {
++ val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
++ if (bp->port.pmf)
++ /* enable nig attention */
++ val |= 0x0100;
++ } else
++ val = 0xffff;
++
++ REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
++ REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
++ }
++}
++
++static void bnx2x_int_disable(struct bnx2x *bp)
++{
++ int port = BP_PORT(bp);
++ u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
++ u32 val = REG_RD(bp, addr);
++
++ val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
++ HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
++ HC_CONFIG_0_REG_INT_LINE_EN_0 |
++ HC_CONFIG_0_REG_ATTN_BIT_EN_0);
++
++ DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
++ val, port, addr);
++
++ REG_WR(bp, addr, val);
++ if (REG_RD(bp, addr) != val)
++ BNX2X_ERR("BUG! proper val not read from IGU!\n");
++}
++
++static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
++{
++ int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
++ int i;
++
++ /* disable interrupt handling */
++ atomic_inc(&bp->intr_sem);
++ if (disable_hw)
++ /* prevent the HW from sending interrupts */
++ bnx2x_int_disable(bp);
++
++ /* make sure all ISRs are done */
++ if (msix) {
++ for_each_queue(bp, i)
++ synchronize_irq(bp->msix_table[i].vector);
++
++ /* one more for the Slow Path IRQ */
++ synchronize_irq(bp->msix_table[i].vector);
++ } else
++ synchronize_irq(bp->pdev->irq);
++
++ /* make sure sp_task is not running */
++ cancel_delayed_work(&bp->sp_task);
++ flush_workqueue(bnx2x_wq);
++}
++
++/* fast path */
++
++/*
++ * General service functions
++ */
++
++static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
++ u8 storm, u16 index, u8 op, u8 update)
++{
++ u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
++ COMMAND_REG_INT_ACK);
++ struct igu_ack_register igu_ack;
++
++ igu_ack.status_block_index = index;
++ igu_ack.sb_id_and_flags =
++ ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
++ (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
++ (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
++ (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
++
++ DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
++ (*(u32 *)&igu_ack), hc_addr);
++ REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
++}
++
++static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
++{
++ struct host_status_block *fpsb = fp->status_blk;
++ u16 rc = 0;
++
++ barrier(); /* status block is written to by the chip */
++ if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
++ fp->fp_c_idx = fpsb->c_status_block.status_block_index;
++ rc |= 1;
++ }
++ if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
++ fp->fp_u_idx = fpsb->u_status_block.status_block_index;
++ rc |= 2;
++ }
++ return rc;
++}
++
++static u16 bnx2x_ack_int(struct bnx2x *bp)
++{
++ u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
++ COMMAND_REG_SIMD_MASK);
++ u32 result = REG_RD(bp, hc_addr);
++
++ DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
++ result, hc_addr);
++
++ return result;
++}
++
++
++/*
++ * fast path service functions
++ */
++
++static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
++{
++ u16 tx_cons_sb;
++
++ /* Tell compiler that status block fields can change */
++ barrier();
++ tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
++ return (fp->tx_pkt_cons != tx_cons_sb);
++}
++
++static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
++{
++ /* Tell compiler that consumer and producer can change */
++ barrier();
++ return (fp->tx_pkt_prod != fp->tx_pkt_cons);
++
++}
++
++/* free skb in the packet ring at pos idx
++ * return idx of last bd freed
++ */
++static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
++ u16 idx)
++{
++ struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
++ struct eth_tx_bd *tx_bd;
++ struct sk_buff *skb = tx_buf->skb;
++ u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
++ int nbd;
++
++ DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
++ idx, tx_buf, skb);
++
++ /* unmap first bd */
++ DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
++ tx_bd = &fp->tx_desc_ring[bd_idx];
++ pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
++ BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
++
++ nbd = le16_to_cpu(tx_bd->nbd) - 1;
++ new_cons = nbd + tx_buf->first_bd;
++#ifdef BNX2X_STOP_ON_ERROR
++ if (nbd > (MAX_SKB_FRAGS + 2)) {
++ BNX2X_ERR("BAD nbd!\n");
++ bnx2x_panic();
++ }
++#endif
++
++ /* Skip a parse bd and the TSO split header bd
++ since they have no mapping */
++ if (nbd)
++ bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
++
++ if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
++ ETH_TX_BD_FLAGS_TCP_CSUM |
++ ETH_TX_BD_FLAGS_SW_LSO)) {
++ if (--nbd)
++ bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
++ tx_bd = &fp->tx_desc_ring[bd_idx];
++ /* is this a TSO split header bd? */
++ if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
++ if (--nbd)
++ bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
++ }
++ }
++
++ /* now free frags */
++ while (nbd > 0) {
++
++ DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
++ tx_bd = &fp->tx_desc_ring[bd_idx];
++ pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
++ BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
++ if (--nbd)
++ bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
++ }
++
++ /* release skb */
++ WARN_ON(!skb);
++ dev_kfree_skb(skb);
++ tx_buf->first_bd = 0;
++ tx_buf->skb = NULL;
++
++ return new_cons;
++}
++
++static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
++{
++ s16 used;
++ u16 prod;
++ u16 cons;
++
++ barrier(); /* Tell compiler that prod and cons can change */
++ prod = fp->tx_bd_prod;
++ cons = fp->tx_bd_cons;
++
++ /* NUM_TX_RINGS = number of "next-page" entries
++ It will be used as a threshold */
++ used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
++
++#ifdef BNX2X_STOP_ON_ERROR
++ WARN_ON(used < 0);
++ WARN_ON(used > fp->bp->tx_ring_size);
++ WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
++#endif
++
++ return (s16)(fp->bp->tx_ring_size) - used;
++}
++
++static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
++{
++ struct bnx2x *bp = fp->bp;
++ u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
++ int done = 0;
++
++#ifdef BNX2X_STOP_ON_ERROR
++ if (unlikely(bp->panic))
++ return;
++#endif
++
++ hw_cons = le16_to_cpu(*fp->tx_cons_sb);
++ sw_cons = fp->tx_pkt_cons;
++
++ while (sw_cons != hw_cons) {
++ u16 pkt_cons;
++
++ pkt_cons = TX_BD(sw_cons);
++
++ /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
++
++ DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
++ hw_cons, sw_cons, pkt_cons);
++
++/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
++ rmb();
++ prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
++ }
++*/
++ bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
++ sw_cons++;
++ done++;
++
++ if (done == work)
++ break;
++ }
++
++ fp->tx_pkt_cons = sw_cons;
++ fp->tx_bd_cons = bd_cons;
++
++ /* Need to make the tx_cons update visible to start_xmit()
++ * before checking for netif_queue_stopped(). Without the
++ * memory barrier, there is a small possibility that start_xmit()
++ * will miss it and cause the queue to be stopped forever.
++ */
++ smp_mb();
++
++ /* TBD need a thresh? */
++ if (unlikely(netif_queue_stopped(bp->dev))) {
++
++ netif_tx_lock(bp->dev);
++
++ if (netif_queue_stopped(bp->dev) &&
++ (bp->state == BNX2X_STATE_OPEN) &&
++ (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
++ netif_wake_queue(bp->dev);
++
++ netif_tx_unlock(bp->dev);
++ }
++}
++
++
++static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
++ union eth_rx_cqe *rr_cqe)
++{
++ struct bnx2x *bp = fp->bp;
++ int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
++ int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
++
++ DP(BNX2X_MSG_SP,
++ "fp %d cid %d got ramrod #%d state is %x type is %d\n",
++ FP_IDX(fp), cid, command, bp->state,
++ rr_cqe->ramrod_cqe.ramrod_type);
++
++ bp->spq_left++;
++
++ if (FP_IDX(fp)) {
++ switch (command | fp->state) {
++ case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
++ BNX2X_FP_STATE_OPENING):
++ DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
++ cid);
++ fp->state = BNX2X_FP_STATE_OPEN;
++ break;
++
++ case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
++ DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
++ cid);
++ fp->state = BNX2X_FP_STATE_HALTED;
++ break;
++
++ default:
++ BNX2X_ERR("unexpected MC reply (%d) "
++ "fp->state is %x\n", command, fp->state);
++ break;
++ }
++ mb(); /* force bnx2x_wait_ramrod() to see the change */
++ return;
++ }
++
++ switch (command | bp->state) {
++ case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
++ DP(NETIF_MSG_IFUP, "got setup ramrod\n");
++ bp->state = BNX2X_STATE_OPEN;
++ break;
++
++ case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
++ DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
++ bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
++ fp->state = BNX2X_FP_STATE_HALTED;
++ break;
++
++ case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
++ DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
++ bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
++ break;
++
++
++ case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
++ case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
++ DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
++ bp->set_mac_pending = 0;
++ break;
++
++ case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
++ DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
++ break;
++
++ default:
++ BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
++ command, bp->state);
++ break;
++ }
++ mb(); /* force bnx2x_wait_ramrod() to see the change */
++}
++
++static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
++ struct bnx2x_fastpath *fp, u16 index)
++{
++ struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
++ struct page *page = sw_buf->page;
++ struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
++
++ /* Skip "next page" elements */
++ if (!page)
++ return;
++
++ pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
++ SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
++ __free_pages(page, PAGES_PER_SGE_SHIFT);
++
++ sw_buf->page = NULL;
++ sge->addr_hi = 0;
++ sge->addr_lo = 0;
++}
++
++static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
++ struct bnx2x_fastpath *fp, int last)
++{
++ int i;
++
++ for (i = 0; i < last; i++)
++ bnx2x_free_rx_sge(bp, fp, i);
++}
++
++static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
++ struct bnx2x_fastpath *fp, u16 index)
++{
++ struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
++ struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
++ struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
++ dma_addr_t mapping;
++
++ if (unlikely(page == NULL))
++ return -ENOMEM;
++
++ mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
++ PCI_DMA_FROMDEVICE);
++ if (unlikely(dma_mapping_error(mapping))) {
++ __free_pages(page, PAGES_PER_SGE_SHIFT);
++ return -ENOMEM;
++ }
++
++ sw_buf->page = page;
++ pci_unmap_addr_set(sw_buf, mapping, mapping);
++
++ sge->addr_hi = cpu_to_le32(U64_HI(mapping));
++ sge->addr_lo = cpu_to_le32(U64_LO(mapping));
++
++ return 0;
++}
++
++static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
++ struct bnx2x_fastpath *fp, u16 index)
++{
++ struct sk_buff *skb;
++ struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
++ struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
++ dma_addr_t mapping;
++
++ skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
++ if (unlikely(skb == NULL))
++ return -ENOMEM;
++
++ mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
++ PCI_DMA_FROMDEVICE);
++ if (unlikely(dma_mapping_error(mapping))) {
++ dev_kfree_skb(skb);
++ return -ENOMEM;
++ }
++
++ rx_buf->skb = skb;
++ pci_unmap_addr_set(rx_buf, mapping, mapping);
++
++ rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
++ rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
++
++ return 0;
++}
++
++/* note that we are not allocating a new skb,
++ * we are just moving one from cons to prod
++ * we are not creating a new mapping,
++ * so there is no need to check for dma_mapping_error().
++ */
++static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
++ struct sk_buff *skb, u16 cons, u16 prod)
++{
++ struct bnx2x *bp = fp->bp;
++ struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
++ struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
++ struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
++ struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
++
++ pci_dma_sync_single_for_device(bp->pdev,
++ pci_unmap_addr(cons_rx_buf, mapping),
++ bp->rx_offset + RX_COPY_THRESH,
++ PCI_DMA_FROMDEVICE);
++
++ prod_rx_buf->skb = cons_rx_buf->skb;
++ pci_unmap_addr_set(prod_rx_buf, mapping,
++ pci_unmap_addr(cons_rx_buf, mapping));
++ *prod_bd = *cons_bd;
++}
++
++static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
++ u16 idx)
++{
++ u16 last_max = fp->last_max_sge;
++
++ if (SUB_S16(idx, last_max) > 0)
++ fp->last_max_sge = idx;
++}
++
++static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
++{
++ int i, j;
++
++ for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
++ int idx = RX_SGE_CNT * i - 1;
++
++ for (j = 0; j < 2; j++) {
++ SGE_MASK_CLEAR_BIT(fp, idx);
++ idx--;
++ }
++ }
++}
++
++static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
++ struct eth_fast_path_rx_cqe *fp_cqe)
++{
++ struct bnx2x *bp = fp->bp;
++ u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
++ le16_to_cpu(fp_cqe->len_on_bd)) >>
++ SGE_PAGE_SHIFT;
++ u16 last_max, last_elem, first_elem;
++ u16 delta = 0;
++ u16 i;
++
++ if (!sge_len)
++ return;
++
++ /* First mark all used pages */
++ for (i = 0; i < sge_len; i++)
++ SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
++
++ DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
++ sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
++
++ /* Here we assume that the last SGE index is the biggest */
++ prefetch((void *)(fp->sge_mask));
++ bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
++
++ last_max = RX_SGE(fp->last_max_sge);
++ last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
++ first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
++
++ /* If ring is not full */
++ if (last_elem + 1 != first_elem)
++ last_elem++;
++
++ /* Now update the prod */
++ for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
++ if (likely(fp->sge_mask[i]))
++ break;
++
++ fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
++ delta += RX_SGE_MASK_ELEM_SZ;
++ }
++
++ if (delta > 0) {
++ fp->rx_sge_prod += delta;
++ /* clear page-end entries */
++ bnx2x_clear_sge_mask_next_elems(fp);
++ }
++
++ DP(NETIF_MSG_RX_STATUS,
++ "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
++ fp->last_max_sge, fp->rx_sge_prod);
++}
++
++static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
++{
++ /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
++ memset(fp->sge_mask, 0xff,
++ (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
++
++ /* Clear the two last indices in the page to 1:
++ these are the indices that correspond to the "next" element,
++ hence will never be indicated and should be removed from
++ the calculations. */
++ bnx2x_clear_sge_mask_next_elems(fp);
++}
++
++static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
++ struct sk_buff *skb, u16 cons, u16 prod)
++{
++ struct bnx2x *bp = fp->bp;
++ struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
++ struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
++ struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
++ dma_addr_t mapping;
++
++ /* move empty skb from pool to prod and map it */
++ prod_rx_buf->skb = fp->tpa_pool[queue].skb;
++ mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
++ bp->rx_buf_size, PCI_DMA_FROMDEVICE);
++ pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
++
++ /* move partial skb from cons to pool (don't unmap yet) */
++ fp->tpa_pool[queue] = *cons_rx_buf;
++
++ /* mark bin state as start - print error if current state != stop */
++ if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
++ BNX2X_ERR("start of bin not in stop [%d]\n", queue);
++
++ fp->tpa_state[queue] = BNX2X_TPA_START;
++
++ /* point prod_bd to new skb */
++ prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
++ prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
++
++#ifdef BNX2X_STOP_ON_ERROR
++ fp->tpa_queue_used |= (1 << queue);
++#ifdef __powerpc64__
++ DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
++#else
++ DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
++#endif
++ fp->tpa_queue_used);
++#endif
++}
++
++static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
++ struct sk_buff *skb,
++ struct eth_fast_path_rx_cqe *fp_cqe,
++ u16 cqe_idx)
++{
++ struct sw_rx_page *rx_pg, old_rx_pg;
++ u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
++ u32 i, frag_len, frag_size, pages;
++ int err;
++ int j;
++
++ frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
++ pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
++
++ /* This is needed in order to enable forwarding support */
++ if (frag_size)
++ skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
++ max(frag_size, (u32)len_on_bd));
++
++#ifdef BNX2X_STOP_ON_ERROR
++ if (pages >
++ min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
++ BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
++ pages, cqe_idx);
++ BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
++ fp_cqe->pkt_len, len_on_bd);
++ bnx2x_panic();
++ return -EINVAL;
++ }
++#endif
++
++ /* Run through the SGL and compose the fragmented skb */
++ for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
++ u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
++
++ /* FW gives the indices of the SGE as if the ring is an array
++ (meaning that "next" element will consume 2 indices) */
++ frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
++ rx_pg = &fp->rx_page_ring[sge_idx];
++ old_rx_pg = *rx_pg;
++
++ /* If we fail to allocate a substitute page, we simply stop
++ where we are and drop the whole packet */
++ err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
++ if (unlikely(err)) {
++ bp->eth_stats.rx_skb_alloc_failed++;
++ return err;
++ }
++
++ /* Unmap the page as we r going to pass it to the stack */
++ pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
++ SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
++
++ /* Add one frag and update the appropriate fields in the skb */
++ skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
++
++ skb->data_len += frag_len;
++ skb->truesize += frag_len;
++ skb->len += frag_len;
++
++ frag_size -= frag_len;
++ }
++
++ return 0;
++}
++
++static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
++ u16 queue, int pad, int len, union eth_rx_cqe *cqe,
++ u16 cqe_idx)
++{
++ struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
++ struct sk_buff *skb = rx_buf->skb;
++ /* alloc new skb */
++ struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
++
++ /* Unmap skb in the pool anyway, as we are going to change
++ pool entry status to BNX2X_TPA_STOP even if new skb allocation
++ fails. */
++ pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
++ bp->rx_buf_size, PCI_DMA_FROMDEVICE);
++
++ if (likely(new_skb)) {
++ /* fix ip xsum and give it to the stack */
++ /* (no need to map the new skb) */
++#ifdef BCM_VLAN
++ int is_vlan_cqe =
++ (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
++ PARSING_FLAGS_VLAN);
++ int is_not_hwaccel_vlan_cqe =
++ (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
++#endif
++
++ prefetch(skb);
++ prefetch(((char *)(skb)) + 128);
++
++#ifdef BNX2X_STOP_ON_ERROR
++ if (pad + len > bp->rx_buf_size) {
++ BNX2X_ERR("skb_put is about to fail... "
++ "pad %d len %d rx_buf_size %d\n",
++ pad, len, bp->rx_buf_size);
++ bnx2x_panic();
++ return;
++ }
++#endif
++
++ skb_reserve(skb, pad);
++ skb_put(skb, len);
++
++ skb->protocol = eth_type_trans(skb, bp->dev);
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++
++ {
++ struct iphdr *iph;
++
++ iph = (struct iphdr *)skb->data;
++#ifdef BCM_VLAN
++ /* If there is no Rx VLAN offloading -
++ take VLAN tag into an account */
++ if (unlikely(is_not_hwaccel_vlan_cqe))
++ iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
++#endif
++ iph->check = 0;
++ iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
++ }
++
++ if (!bnx2x_fill_frag_skb(bp, fp, skb,
++ &cqe->fast_path_cqe, cqe_idx)) {
++#ifdef BCM_VLAN
++ if ((bp->vlgrp != NULL) && is_vlan_cqe &&
++ (!is_not_hwaccel_vlan_cqe))
++ vlan_hwaccel_receive_skb(skb, bp->vlgrp,
++ le16_to_cpu(cqe->fast_path_cqe.
++ vlan_tag));
++ else
++#endif
++ netif_receive_skb(skb);
++ } else {
++ DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
++ " - dropping packet!\n");
++ dev_kfree_skb(skb);
++ }
++
++ bp->dev->last_rx = jiffies;
++
++ /* put new skb in bin */
++ fp->tpa_pool[queue].skb = new_skb;
++
++ } else {
++ /* else drop the packet and keep the buffer in the bin */
++ DP(NETIF_MSG_RX_STATUS,
++ "Failed to allocate new skb - dropping packet!\n");
++ bp->eth_stats.rx_skb_alloc_failed++;
++ }
++
++ fp->tpa_state[queue] = BNX2X_TPA_STOP;
++}
++
++static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
++ struct bnx2x_fastpath *fp,
++ u16 bd_prod, u16 rx_comp_prod,
++ u16 rx_sge_prod)
++{
++ struct ustorm_eth_rx_producers rx_prods = {0};
++ int i;
++
++ /* Update producers */
++ rx_prods.bd_prod = bd_prod;
++ rx_prods.cqe_prod = rx_comp_prod;
++ rx_prods.sge_prod = rx_sge_prod;
++
++ /*
++ * Make sure that the BD and SGE data is updated before updating the
++ * producers since FW might read the BD/SGE right after the producer
++ * is updated.
++ * This is only applicable for weak-ordered memory model archs such
++ * as IA-64. The following barrier is also mandatory since FW will
++ * assumes BDs must have buffers.
++ */
++ wmb();
++
++ for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
++ REG_WR(bp, BAR_USTRORM_INTMEM +
++ USTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
++ ((u32 *)&rx_prods)[i]);
++
++ mmiowb(); /* keep prod updates ordered */
++
++ DP(NETIF_MSG_RX_STATUS,
++ "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
++ bd_prod, rx_comp_prod, rx_sge_prod);
++}
++
++static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
++{
++ struct bnx2x *bp = fp->bp;
++ u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
++ u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
++ int rx_pkt = 0;
++
++#ifdef BNX2X_STOP_ON_ERROR
++ if (unlikely(bp->panic))
++ return 0;
++#endif
++
++ /* CQ "next element" is of the size of the regular element,
++ that's why it's ok here */
++ hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
++ if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
++ hw_comp_cons++;
++
++ bd_cons = fp->rx_bd_cons;
++ bd_prod = fp->rx_bd_prod;
++ bd_prod_fw = bd_prod;
++ sw_comp_cons = fp->rx_comp_cons;
++ sw_comp_prod = fp->rx_comp_prod;
++
++ /* Memory barrier necessary as speculative reads of the rx
++ * buffer can be ahead of the index in the status block
++ */
++ rmb();
++
++ DP(NETIF_MSG_RX_STATUS,
++ "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
++ FP_IDX(fp), hw_comp_cons, sw_comp_cons);
++
++ while (sw_comp_cons != hw_comp_cons) {
++ struct sw_rx_bd *rx_buf = NULL;
++ struct sk_buff *skb;
++ union eth_rx_cqe *cqe;
++ u8 cqe_fp_flags;
++ u16 len, pad;
++
++ comp_ring_cons = RCQ_BD(sw_comp_cons);
++ bd_prod = RX_BD(bd_prod);
++ bd_cons = RX_BD(bd_cons);
++
++ cqe = &fp->rx_comp_ring[comp_ring_cons];
++ cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
++
++ DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
++ " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
++ cqe_fp_flags, cqe->fast_path_cqe.status_flags,
++ le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
++ le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
++ le16_to_cpu(cqe->fast_path_cqe.pkt_len));
++
++ /* is this a slowpath msg? */
++ if (unlikely(CQE_TYPE(cqe_fp_flags))) {
++ bnx2x_sp_event(fp, cqe);
++ goto next_cqe;
++
++ /* this is an rx packet */
++ } else {
++ rx_buf = &fp->rx_buf_ring[bd_cons];
++ skb = rx_buf->skb;
++ len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
++ pad = cqe->fast_path_cqe.placement_offset;
++
++ /* If CQE is marked both TPA_START and TPA_END
++ it is a non-TPA CQE */
++ if ((!fp->disable_tpa) &&
++ (TPA_TYPE(cqe_fp_flags) !=
++ (TPA_TYPE_START | TPA_TYPE_END))) {
++ u16 queue = cqe->fast_path_cqe.queue_index;
++
++ if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
++ DP(NETIF_MSG_RX_STATUS,
++ "calling tpa_start on queue %d\n",
++ queue);
++
++ bnx2x_tpa_start(fp, queue, skb,
++ bd_cons, bd_prod);
++ goto next_rx;
++ }
++
++ if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
++ DP(NETIF_MSG_RX_STATUS,
++ "calling tpa_stop on queue %d\n",
++ queue);
++
++ if (!BNX2X_RX_SUM_FIX(cqe))
++ BNX2X_ERR("STOP on none TCP "
++ "data\n");
++
++ /* This is a size of the linear data
++ on this skb */
++ len = le16_to_cpu(cqe->fast_path_cqe.
++ len_on_bd);
++ bnx2x_tpa_stop(bp, fp, queue, pad,
++ len, cqe, comp_ring_cons);
++#ifdef BNX2X_STOP_ON_ERROR
++ if (bp->panic)
++ return -EINVAL;
++#endif
++
++ bnx2x_update_sge_prod(fp,
++ &cqe->fast_path_cqe);
++ goto next_cqe;
++ }
++ }
++
++ pci_dma_sync_single_for_device(bp->pdev,
++ pci_unmap_addr(rx_buf, mapping),
++ pad + RX_COPY_THRESH,
++ PCI_DMA_FROMDEVICE);
++ prefetch(skb);
++ prefetch(((char *)(skb)) + 128);
++
++ /* is this an error packet? */
++ if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
++ DP(NETIF_MSG_RX_ERR,
++ "ERROR flags %x rx packet %u\n",
++ cqe_fp_flags, sw_comp_cons);
++ bp->eth_stats.rx_err_discard_pkt++;
++ goto reuse_rx;
++ }
++
++ /* Since we don't have a jumbo ring
++ * copy small packets if mtu > 1500
++ */
++ if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
++ (len <= RX_COPY_THRESH)) {
++ struct sk_buff *new_skb;
++
++ new_skb = netdev_alloc_skb(bp->dev,
++ len + pad);
++ if (new_skb == NULL) {
++ DP(NETIF_MSG_RX_ERR,
++ "ERROR packet dropped "
++ "because of alloc failure\n");
++ bp->eth_stats.rx_skb_alloc_failed++;
++ goto reuse_rx;
++ }
++
++ /* aligned copy */
++ skb_copy_from_linear_data_offset(skb, pad,
++ new_skb->data + pad, len);
++ skb_reserve(new_skb, pad);
++ skb_put(new_skb, len);
++
++ bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
++
++ skb = new_skb;
++
++ } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
++ pci_unmap_single(bp->pdev,
++ pci_unmap_addr(rx_buf, mapping),
++ bp->rx_buf_size,
++ PCI_DMA_FROMDEVICE);
++ skb_reserve(skb, pad);
++ skb_put(skb, len);
++
++ } else {
++ DP(NETIF_MSG_RX_ERR,
++ "ERROR packet dropped because "
++ "of alloc failure\n");
++ bp->eth_stats.rx_skb_alloc_failed++;
++reuse_rx:
++ bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
++ goto next_rx;
++ }
++
++ skb->protocol = eth_type_trans(skb, bp->dev);
++
++ skb->ip_summed = CHECKSUM_NONE;
++ if (bp->rx_csum) {
++ if (likely(BNX2X_RX_CSUM_OK(cqe)))
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++ else
++ bp->eth_stats.hw_csum_err++;
++ }
++ }
++
++#ifdef BCM_VLAN
++ if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
++ (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
++ PARSING_FLAGS_VLAN))
++ vlan_hwaccel_receive_skb(skb, bp->vlgrp,
++ le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
++ else
++#endif
++ netif_receive_skb(skb);
++
++ bp->dev->last_rx = jiffies;
++
++next_rx:
++ rx_buf->skb = NULL;
++
++ bd_cons = NEXT_RX_IDX(bd_cons);
++ bd_prod = NEXT_RX_IDX(bd_prod);
++ bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
++ rx_pkt++;
++next_cqe:
++ sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
++ sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
++
++ if (rx_pkt == budget)
++ break;
++ } /* while */
++
++ fp->rx_bd_cons = bd_cons;
++ fp->rx_bd_prod = bd_prod_fw;
++ fp->rx_comp_cons = sw_comp_cons;
++ fp->rx_comp_prod = sw_comp_prod;
++
++ /* Update producers */
++ bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
++ fp->rx_sge_prod);
++
++ fp->rx_pkt += rx_pkt;
++ fp->rx_calls++;
++
++ return rx_pkt;
++}
++
++static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
++{
++ struct bnx2x_fastpath *fp = fp_cookie;
++ struct bnx2x *bp = fp->bp;
++ struct net_device *dev = bp->dev;
++ int index = FP_IDX(fp);
++
++ /* Return here if interrupt is disabled */
++ if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
++ DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
++ return IRQ_HANDLED;
++ }
++
++ DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
++ index, FP_SB_ID(fp));
++ bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
++
++#ifdef BNX2X_STOP_ON_ERROR
++ if (unlikely(bp->panic))
++ return IRQ_HANDLED;
++#endif
++
++ prefetch(fp->rx_cons_sb);
++ prefetch(fp->tx_cons_sb);
++ prefetch(&fp->status_blk->c_status_block.status_block_index);
++ prefetch(&fp->status_blk->u_status_block.status_block_index);
++
++ netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
++
++ return IRQ_HANDLED;
++}
++
++static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
++{
++ struct net_device *dev = dev_instance;
++ struct bnx2x *bp = netdev_priv(dev);
++ u16 status = bnx2x_ack_int(bp);
++ u16 mask;
++
++ /* Return here if interrupt is shared and it's not for us */
++ if (unlikely(status == 0)) {
++ DP(NETIF_MSG_INTR, "not our interrupt!\n");
++ return IRQ_NONE;
++ }
++ DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
++
++ /* Return here if interrupt is disabled */
++ if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
++ DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
++ return IRQ_HANDLED;
++ }
++
++#ifdef BNX2X_STOP_ON_ERROR
++ if (unlikely(bp->panic))
++ return IRQ_HANDLED;
++#endif
++
++ mask = 0x2 << bp->fp[0].sb_id;
++ if (status & mask) {
++ struct bnx2x_fastpath *fp = &bp->fp[0];
++
++ prefetch(fp->rx_cons_sb);
++ prefetch(fp->tx_cons_sb);
++ prefetch(&fp->status_blk->c_status_block.status_block_index);
++ prefetch(&fp->status_blk->u_status_block.status_block_index);
++
++ netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
++
++ status &= ~mask;
++ }
++
++
++ if (unlikely(status & 0x1)) {
++ queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
++
++ status &= ~0x1;
++ if (!status)
++ return IRQ_HANDLED;
++ }
++
++ if (status)
++ DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
++ status);
++
++ return IRQ_HANDLED;
++}
++
++/* end of fast path */
++
++static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
++
++/* Link */
++
++/*
++ * General service functions
++ */
++
++static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
++{
++ u32 lock_status;
++ u32 resource_bit = (1 << resource);
++ int func = BP_FUNC(bp);
++ u32 hw_lock_control_reg;
++ int cnt;
++
++ /* Validating that the resource is within range */
++ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
++ DP(NETIF_MSG_HW,
++ "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
++ resource, HW_LOCK_MAX_RESOURCE_VALUE);
++ return -EINVAL;
++ }
++
++ if (func <= 5) {
++ hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
++ } else {
++ hw_lock_control_reg =
++ (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
++ }
++
++ /* Validating that the resource is not already taken */
++ lock_status = REG_RD(bp, hw_lock_control_reg);
++ if (lock_status & resource_bit) {
++ DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
++ lock_status, resource_bit);
++ return -EEXIST;
++ }
++
++ /* Try for 5 second every 5ms */
++ for (cnt = 0; cnt < 1000; cnt++) {
++ /* Try to acquire the lock */
++ REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
++ lock_status = REG_RD(bp, hw_lock_control_reg);
++ if (lock_status & resource_bit)
++ return 0;
++
++ msleep(5);
++ }
++ DP(NETIF_MSG_HW, "Timeout\n");
++ return -EAGAIN;
++}
++
++static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
++{
++ u32 lock_status;
++ u32 resource_bit = (1 << resource);
++ int func = BP_FUNC(bp);
++ u32 hw_lock_control_reg;
++
++ /* Validating that the resource is within range */
++ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
++ DP(NETIF_MSG_HW,
++ "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
++ resource, HW_LOCK_MAX_RESOURCE_VALUE);
++ return -EINVAL;
++ }
++
++ if (func <= 5) {
++ hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
++ } else {
++ hw_lock_control_reg =
++ (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
++ }
++
++ /* Validating that the resource is currently taken */
++ lock_status = REG_RD(bp, hw_lock_control_reg);
++ if (!(lock_status & resource_bit)) {
++ DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
++ lock_status, resource_bit);
++ return -EFAULT;
++ }
++
++ REG_WR(bp, hw_lock_control_reg, resource_bit);
++ return 0;
++}
++
++/* HW Lock for shared dual port PHYs */
++static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
++{
++ u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
++
++ mutex_lock(&bp->port.phy_mutex);
++
++ if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
++ (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
++ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
++}
++
++static void bnx2x_release_phy_lock(struct bnx2x *bp)
++{
++ u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
++
++ if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
++ (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
++ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
++
++ mutex_unlock(&bp->port.phy_mutex);
++}
++
++int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
++{
++ /* The GPIO should be swapped if swap register is set and active */
++ int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
++ REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
++ int gpio_shift = gpio_num +
++ (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
++ u32 gpio_mask = (1 << gpio_shift);
++ u32 gpio_reg;
++
++ if (gpio_num > MISC_REGISTERS_GPIO_3) {
++ BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
++ return -EINVAL;
++ }
++
++ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
++ /* read GPIO and mask except the float bits */
++ gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
++
++ switch (mode) {
++ case MISC_REGISTERS_GPIO_OUTPUT_LOW:
++ DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
++ gpio_num, gpio_shift);
++ /* clear FLOAT and set CLR */
++ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
++ gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
++ break;
++
++ case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
++ DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
++ gpio_num, gpio_shift);
++ /* clear FLOAT and set SET */
++ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
++ gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
++ break;
++
++ case MISC_REGISTERS_GPIO_INPUT_HI_Z:
++ DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
++ gpio_num, gpio_shift);
++ /* set FLOAT */
++ gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
++ break;
++
++ default:
++ break;
++ }
++
++ REG_WR(bp, MISC_REG_GPIO, gpio_reg);
++ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
++
++ return 0;
++}
++
++static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
++{
++ u32 spio_mask = (1 << spio_num);
++ u32 spio_reg;
++
++ if ((spio_num < MISC_REGISTERS_SPIO_4) ||
++ (spio_num > MISC_REGISTERS_SPIO_7)) {
++ BNX2X_ERR("Invalid SPIO %d\n", spio_num);
++ return -EINVAL;
++ }
++
++ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
++ /* read SPIO and mask except the float bits */
++ spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
++
++ switch (mode) {
++ case MISC_REGISTERS_SPIO_OUTPUT_LOW:
++ DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
++ /* clear FLOAT and set CLR */
++ spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
++ spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
++ break;
++
++ case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
++ DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
++ /* clear FLOAT and set SET */
++ spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
++ spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
++ break;
++
++ case MISC_REGISTERS_SPIO_INPUT_HI_Z:
++ DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
++ /* set FLOAT */
++ spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
++ break;
++
++ default:
++ break;
++ }
++
++ REG_WR(bp, MISC_REG_SPIO, spio_reg);
++ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
++
++ return 0;
++}
++
++static void bnx2x_calc_fc_adv(struct bnx2x *bp)
++{
++ switch (bp->link_vars.ieee_fc &
++ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
++ case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
++ bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
++ ADVERTISED_Pause);
++ break;
++ case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
++ bp->port.advertising |= (ADVERTISED_Asym_Pause |
++ ADVERTISED_Pause);
++ break;
++ case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
++ bp->port.advertising |= ADVERTISED_Asym_Pause;
++ break;
++ default:
++ bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
++ ADVERTISED_Pause);
++ break;
++ }
++}
++
++static void bnx2x_link_report(struct bnx2x *bp)
++{
++ if (bp->link_vars.link_up) {
++ if (bp->state == BNX2X_STATE_OPEN)
++ netif_carrier_on(bp->dev);
++ printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
++
++ printk("%d Mbps ", bp->link_vars.line_speed);
++
++ if (bp->link_vars.duplex == DUPLEX_FULL)
++ printk("full duplex");
++ else
++ printk("half duplex");
++
++ if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
++ if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
++ printk(", receive ");
++ if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
++ printk("& transmit ");
++ } else {
++ printk(", transmit ");
++ }
++ printk("flow control ON");
++ }
++ printk("\n");
++
++ } else { /* link_down */
++ netif_carrier_off(bp->dev);
++ printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
++ }
++}
++
++static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
++{
++ if (!BP_NOMCP(bp)) {
++ u8 rc;
++
++ /* Initialize link parameters structure variables */
++ /* It is recommended to turn off RX FC for jumbo frames
++ for better performance */
++ if (IS_E1HMF(bp))
++ bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
++ else if (bp->dev->mtu > 5000)
++ bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
++ else
++ bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
++
++ bnx2x_acquire_phy_lock(bp);
++ rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
++ bnx2x_release_phy_lock(bp);
++
++ bnx2x_calc_fc_adv(bp);
++
++ if (bp->link_vars.link_up)
++ bnx2x_link_report(bp);
++
++
++ return rc;
++ }
++ BNX2X_ERR("Bootcode is missing -not initializing link\n");
++ return -EINVAL;
++}
++
++static void bnx2x_link_set(struct bnx2x *bp)
++{
++ if (!BP_NOMCP(bp)) {
++ bnx2x_acquire_phy_lock(bp);
++ bnx2x_phy_init(&bp->link_params, &bp->link_vars);
++ bnx2x_release_phy_lock(bp);
++
++ bnx2x_calc_fc_adv(bp);
++ } else
++ BNX2X_ERR("Bootcode is missing -not setting link\n");
++}
++
++static void bnx2x__link_reset(struct bnx2x *bp)
++{
++ if (!BP_NOMCP(bp)) {
++ bnx2x_acquire_phy_lock(bp);
++ bnx2x_link_reset(&bp->link_params, &bp->link_vars);
++ bnx2x_release_phy_lock(bp);
++ } else
++ BNX2X_ERR("Bootcode is missing -not resetting link\n");
++}
++
++static u8 bnx2x_link_test(struct bnx2x *bp)
++{
++ u8 rc;
++
++ bnx2x_acquire_phy_lock(bp);
++ rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
++ bnx2x_release_phy_lock(bp);
++
++ return rc;
++}
++
++/* Calculates the sum of vn_min_rates.
++ It's needed for further normalizing of the min_rates.
++
++ Returns:
++ sum of vn_min_rates
++ or
++ 0 - if all the min_rates are 0.
++ In the later case fairness algorithm should be deactivated.
++ If not all min_rates are zero then those that are zeroes will
++ be set to 1.
++ */
++static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
++{
++ int i, port = BP_PORT(bp);
++ u32 wsum = 0;
++ int all_zero = 1;
++
++ for (i = 0; i < E1HVN_MAX; i++) {
++ u32 vn_cfg =
++ SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
++ u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
++ FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
++ if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
++ /* If min rate is zero - set it to 1 */
++ if (!vn_min_rate)
++ vn_min_rate = DEF_MIN_RATE;
++ else
++ all_zero = 0;
++
++ wsum += vn_min_rate;
++ }
++ }
++
++ /* ... only if all min rates are zeros - disable FAIRNESS */
++ if (all_zero)
++ return 0;
++
++ return wsum;
++}
++
++static void bnx2x_init_port_minmax(struct bnx2x *bp,
++ int en_fness,
++ u16 port_rate,
++ struct cmng_struct_per_port *m_cmng_port)
++{
++ u32 r_param = port_rate / 8;
++ int port = BP_PORT(bp);
++ int i;
++
++ memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
++
++ /* Enable minmax only if we are in e1hmf mode */
++ if (IS_E1HMF(bp)) {
++ u32 fair_periodic_timeout_usec;
++ u32 t_fair;
++
++ /* Enable rate shaping and fairness */
++ m_cmng_port->flags.cmng_vn_enable = 1;
++ m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
++ m_cmng_port->flags.rate_shaping_enable = 1;
++
++ if (!en_fness)
++ DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
++ " fairness will be disabled\n");
++
++ /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
++ m_cmng_port->rs_vars.rs_periodic_timeout =
++ RS_PERIODIC_TIMEOUT_USEC / 4;
++
++ /* this is the threshold below which no timer arming will occur
++ 1.25 coefficient is for the threshold to be a little bigger
++ than the real time, to compensate for timer in-accuracy */
++ m_cmng_port->rs_vars.rs_threshold =
++ (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
++
++ /* resolution of fairness timer */
++ fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
++ /* for 10G it is 1000usec. for 1G it is 10000usec. */
++ t_fair = T_FAIR_COEF / port_rate;
++
++ /* this is the threshold below which we won't arm
++ the timer anymore */
++ m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
++
++ /* we multiply by 1e3/8 to get bytes/msec.
++ We don't want the credits to pass a credit
++ of the T_FAIR*FAIR_MEM (algorithm resolution) */
++ m_cmng_port->fair_vars.upper_bound =
++ r_param * t_fair * FAIR_MEM;
++ /* since each tick is 4 usec */
++ m_cmng_port->fair_vars.fairness_timeout =
++ fair_periodic_timeout_usec / 4;
++
++ } else {
++ /* Disable rate shaping and fairness */
++ m_cmng_port->flags.cmng_vn_enable = 0;
++ m_cmng_port->flags.fairness_enable = 0;
++ m_cmng_port->flags.rate_shaping_enable = 0;
++
++ DP(NETIF_MSG_IFUP,
++ "Single function mode minmax will be disabled\n");
++ }
++
++ /* Store it to internal memory */
++ for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
++ REG_WR(bp, BAR_XSTRORM_INTMEM +
++ XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
++ ((u32 *)(m_cmng_port))[i]);
++}
++
++static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
++ u32 wsum, u16 port_rate,
++ struct cmng_struct_per_port *m_cmng_port)
++{
++ struct rate_shaping_vars_per_vn m_rs_vn;
++ struct fairness_vars_per_vn m_fair_vn;
++ u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
++ u16 vn_min_rate, vn_max_rate;
++ int i;
++
++ /* If function is hidden - set min and max to zeroes */
++ if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
++ vn_min_rate = 0;
++ vn_max_rate = 0;
++
++ } else {
++ vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
++ FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
++ /* If FAIRNESS is enabled (not all min rates are zeroes) and
++ if current min rate is zero - set it to 1.
++ This is a requirement of the algorithm. */
++ if ((vn_min_rate == 0) && wsum)
++ vn_min_rate = DEF_MIN_RATE;
++ vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
++ FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
++ }
++
++ DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
++ "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
++
++ memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
++ memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
++
++ /* global vn counter - maximal Mbps for this vn */
++ m_rs_vn.vn_counter.rate = vn_max_rate;
++
++ /* quota - number of bytes transmitted in this period */
++ m_rs_vn.vn_counter.quota =
++ (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
++
++#ifdef BNX2X_PER_PROT_QOS
++ /* per protocol counter */
++ for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
++ /* maximal Mbps for this protocol */
++ m_rs_vn.protocol_counters[protocol].rate =
++ protocol_max_rate[protocol];
++ /* the quota in each timer period -
++ number of bytes transmitted in this period */
++ m_rs_vn.protocol_counters[protocol].quota =
++ (u32)(rs_periodic_timeout_usec *
++ ((double)m_rs_vn.
++ protocol_counters[protocol].rate/8));
++ }
++#endif
++
++ if (wsum) {
++ /* credit for each period of the fairness algorithm:
++ number of bytes in T_FAIR (the vn share the port rate).
++ wsum should not be larger than 10000, thus
++ T_FAIR_COEF / (8 * wsum) will always be grater than zero */
++ m_fair_vn.vn_credit_delta =
++ max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
++ (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
++ DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
++ m_fair_vn.vn_credit_delta);
++ }
++
++#ifdef BNX2X_PER_PROT_QOS
++ do {
++ u32 protocolWeightSum = 0;
++
++ for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
++ protocolWeightSum +=
++ drvInit.protocol_min_rate[protocol];
++ /* per protocol counter -
++ NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
++ if (protocolWeightSum > 0) {
++ for (protocol = 0;
++ protocol < NUM_OF_PROTOCOLS; protocol++)
++ /* credit for each period of the
++ fairness algorithm - number of bytes in
++ T_FAIR (the protocol share the vn rate) */
++ m_fair_vn.protocol_credit_delta[protocol] =
++ (u32)((vn_min_rate / 8) * t_fair *
++ protocol_min_rate / protocolWeightSum);
++ }
++ } while (0);
++#endif
++
++ /* Store it to internal memory */
++ for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
++ REG_WR(bp, BAR_XSTRORM_INTMEM +
++ XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
++ ((u32 *)(&m_rs_vn))[i]);
++
++ for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
++ REG_WR(bp, BAR_XSTRORM_INTMEM +
++ XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
++ ((u32 *)(&m_fair_vn))[i]);
++}
++
++/* This function is called upon link interrupt */
++static void bnx2x_link_attn(struct bnx2x *bp)
++{
++ int vn;
++
++ /* Make sure that we are synced with the current statistics */
++ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
++
++ bnx2x_link_update(&bp->link_params, &bp->link_vars);
++
++ if (bp->link_vars.link_up) {
++
++ if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
++ struct host_port_stats *pstats;
++
++ pstats = bnx2x_sp(bp, port_stats);
++ /* reset old bmac stats */
++ memset(&(pstats->mac_stx[0]), 0,
++ sizeof(struct mac_stx));
++ }
++ if ((bp->state == BNX2X_STATE_OPEN) ||
++ (bp->state == BNX2X_STATE_DISABLED))
++ bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
++ }
++
++ /* indicate link status */
++ bnx2x_link_report(bp);
++
++ if (IS_E1HMF(bp)) {
++ int func;
++
++ for (vn = VN_0; vn < E1HVN_MAX; vn++) {
++ if (vn == BP_E1HVN(bp))
++ continue;
++
++ func = ((vn << 1) | BP_PORT(bp));
++
++ /* Set the attention towards other drivers
++ on the same port */
++ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
++ (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
++ }
++ }
++
++ if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
++ struct cmng_struct_per_port m_cmng_port;
++ u32 wsum;
++ int port = BP_PORT(bp);
++
++ /* Init RATE SHAPING and FAIRNESS contexts */
++ wsum = bnx2x_calc_vn_wsum(bp);
++ bnx2x_init_port_minmax(bp, (int)wsum,
++ bp->link_vars.line_speed,
++ &m_cmng_port);
++ if (IS_E1HMF(bp))
++ for (vn = VN_0; vn < E1HVN_MAX; vn++)
++ bnx2x_init_vn_minmax(bp, 2*vn + port,
++ wsum, bp->link_vars.line_speed,
++ &m_cmng_port);
++ }
++}
++
++static void bnx2x__link_status_update(struct bnx2x *bp)
++{
++ if (bp->state != BNX2X_STATE_OPEN)
++ return;
++
++ bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
++
++ if (bp->link_vars.link_up)
++ bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
++ else
++ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
++
++ /* indicate link status */
++ bnx2x_link_report(bp);
++}
++
++static void bnx2x_pmf_update(struct bnx2x *bp)
++{
++ int port = BP_PORT(bp);
++ u32 val;
++
++ bp->port.pmf = 1;
++ DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
++
++ /* enable nig attention */
++ val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
++ REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
++ REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
++
++ bnx2x_stats_handle(bp, STATS_EVENT_PMF);
++}
++
++/* end of Link */
++
++/* slow path */
++
++/*
++ * General service functions
++ */
++
++/* the slow path queue is odd since completions arrive on the fastpath ring */
++static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
++ u32 data_hi, u32 data_lo, int common)
++{
++ int func = BP_FUNC(bp);
++
++ DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
++ "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
++ (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
++ (void *)bp->spq_prod_bd - (void *)bp->spq), command,
++ HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
++
++#ifdef BNX2X_STOP_ON_ERROR
++ if (unlikely(bp->panic))
++ return -EIO;
++#endif
++
++ spin_lock_bh(&bp->spq_lock);
++
++ if (!bp->spq_left) {
++ BNX2X_ERR("BUG! SPQ ring full!\n");
++ spin_unlock_bh(&bp->spq_lock);
++ bnx2x_panic();
++ return -EBUSY;
++ }
++
++ /* CID needs port number to be encoded int it */
++ bp->spq_prod_bd->hdr.conn_and_cmd_data =
++ cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
++ HW_CID(bp, cid)));
++ bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
++ if (common)
++ bp->spq_prod_bd->hdr.type |=
++ cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
++
++ bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
++ bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
++
++ bp->spq_left--;
++
++ if (bp->spq_prod_bd == bp->spq_last_bd) {
++ bp->spq_prod_bd = bp->spq;
++ bp->spq_prod_idx = 0;
++ DP(NETIF_MSG_TIMER, "end of spq\n");
++
++ } else {
++ bp->spq_prod_bd++;
++ bp->spq_prod_idx++;
++ }
++
++ REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
++ bp->spq_prod_idx);
++
++ spin_unlock_bh(&bp->spq_lock);
++ return 0;
++}
++
++/* acquire split MCP access lock register */
++static int bnx2x_acquire_alr(struct bnx2x *bp)
++{
++ u32 i, j, val;
++ int rc = 0;
++
++ might_sleep();
++ i = 100;
++ for (j = 0; j < i*10; j++) {
++ val = (1UL << 31);
++ REG_WR(bp, GRCBASE_MCP + 0x9c, val);
++ val = REG_RD(bp, GRCBASE_MCP + 0x9c);
++ if (val & (1L << 31))
++ break;
++
++ msleep(5);
++ }
++ if (!(val & (1L << 31))) {
++ BNX2X_ERR("Cannot acquire MCP access lock register\n");
++ rc = -EBUSY;
++ }
++
++ return rc;
++}
++
++/* release split MCP access lock register */
++static void bnx2x_release_alr(struct bnx2x *bp)
++{
++ u32 val = 0;
++
++ REG_WR(bp, GRCBASE_MCP + 0x9c, val);
++}
++
++static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
++{
++ struct host_def_status_block *def_sb = bp->def_status_blk;
++ u16 rc = 0;
++
++ barrier(); /* status block is written to by the chip */
++ if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
++ bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
++ rc |= 1;
++ }
++ if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
++ bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
++ rc |= 2;
++ }
++ if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
++ bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
++ rc |= 4;
++ }
++ if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
++ bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
++ rc |= 8;
++ }
++ if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
++ bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
++ rc |= 16;
++ }
++ return rc;
++}
++
++/*
++ * slow path service functions
++ */
++
++static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
++{
++ int port = BP_PORT(bp);
++ u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
++ COMMAND_REG_ATTN_BITS_SET);
++ u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
++ MISC_REG_AEU_MASK_ATTN_FUNC_0;
++ u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
++ NIG_REG_MASK_INTERRUPT_PORT0;
++ u32 aeu_mask;
++
++ if (bp->attn_state & asserted)
++ BNX2X_ERR("IGU ERROR\n");
++
++ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
++ aeu_mask = REG_RD(bp, aeu_addr);
++
++ DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
++ aeu_mask, asserted);
++ aeu_mask &= ~(asserted & 0xff);
++ DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
++
++ REG_WR(bp, aeu_addr, aeu_mask);
++ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
++
++ DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
++ bp->attn_state |= asserted;
++ DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
++
++ if (asserted & ATTN_HARD_WIRED_MASK) {
++ if (asserted & ATTN_NIG_FOR_FUNC) {
++
++ bnx2x_acquire_phy_lock(bp);
++
++ /* save nig interrupt mask */
++ bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
++ REG_WR(bp, nig_int_mask_addr, 0);
++
++ bnx2x_link_attn(bp);
++
++ /* handle unicore attn? */
++ }
++ if (asserted & ATTN_SW_TIMER_4_FUNC)
++ DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
++
++ if (asserted & GPIO_2_FUNC)
++ DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
++
++ if (asserted & GPIO_3_FUNC)
++ DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
++
++ if (asserted & GPIO_4_FUNC)
++ DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
++
++ if (port == 0) {
++ if (asserted & ATTN_GENERAL_ATTN_1) {
++ DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
++ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
++ }
++ if (asserted & ATTN_GENERAL_ATTN_2) {
++ DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
++ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
++ }
++ if (asserted & ATTN_GENERAL_ATTN_3) {
++ DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
++ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
++ }
++ } else {
++ if (asserted & ATTN_GENERAL_ATTN_4) {
++ DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
++ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
++ }
++ if (asserted & ATTN_GENERAL_ATTN_5) {
++ DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
++ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
++ }
++ if (asserted & ATTN_GENERAL_ATTN_6) {
++ DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
++ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
++ }
++ }
++
++ } /* if hardwired */
++
++ DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
++ asserted, hc_addr);
++ REG_WR(bp, hc_addr, asserted);
++
++ /* now set back the mask */
++ if (asserted & ATTN_NIG_FOR_FUNC) {
++ REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
++ bnx2x_release_phy_lock(bp);
++ }
++}
++
++static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
++{
++ int port = BP_PORT(bp);
++ int reg_offset;
++ u32 val;
++
++ reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
++ MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
++
++ if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
++
++ val = REG_RD(bp, reg_offset);
++ val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
++ REG_WR(bp, reg_offset, val);
++
++ BNX2X_ERR("SPIO5 hw attention\n");
++
++ switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
++ case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
++ case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
++ /* Fan failure attention */
++
++ /* The PHY reset is controlled by GPIO 1 */
++ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
++ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
++ /* Low power mode is controlled by GPIO 2 */
++ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
++ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
++ /* mark the failure */
++ bp->link_params.ext_phy_config &=
++ ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
++ bp->link_params.ext_phy_config |=
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
++ SHMEM_WR(bp,
++ dev_info.port_hw_config[port].
++ external_phy_config,
++ bp->link_params.ext_phy_config);
++ /* log the failure */
++ printk(KERN_ERR PFX "Fan Failure on Network"
++ " Controller %s has caused the driver to"
++ " shutdown the card to prevent permanent"
++ " damage. Please contact Dell Support for"
++ " assistance\n", bp->dev->name);
++ break;
++
++ default:
++ break;
++ }
++ }
++
++ if (attn & HW_INTERRUT_ASSERT_SET_0) {
++
++ val = REG_RD(bp, reg_offset);
++ val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
++ REG_WR(bp, reg_offset, val);
++
++ BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
++ (attn & HW_INTERRUT_ASSERT_SET_0));
++ bnx2x_panic();
++ }
++}
++
++static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
++{
++ u32 val;
++
++ if (attn & BNX2X_DOORQ_ASSERT) {
++
++ val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
++ BNX2X_ERR("DB hw attention 0x%x\n", val);
++ /* DORQ discard attention */
++ if (val & 0x2)
++ BNX2X_ERR("FATAL error from DORQ\n");
++ }
++
++ if (attn & HW_INTERRUT_ASSERT_SET_1) {
++
++ int port = BP_PORT(bp);
++ int reg_offset;
++
++ reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
++ MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
++
++ val = REG_RD(bp, reg_offset);
++ val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
++ REG_WR(bp, reg_offset, val);
++
++ BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
++ (attn & HW_INTERRUT_ASSERT_SET_1));
++ bnx2x_panic();
++ }
++}
++
++static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
++{
++ u32 val;
++
++ if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
++
++ val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
++ BNX2X_ERR("CFC hw attention 0x%x\n", val);
++ /* CFC error attention */
++ if (val & 0x2)
++ BNX2X_ERR("FATAL error from CFC\n");
++ }
++
++ if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
++
++ val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
++ BNX2X_ERR("PXP hw attention 0x%x\n", val);
++ /* RQ_USDMDP_FIFO_OVERFLOW */
++ if (val & 0x18000)
++ BNX2X_ERR("FATAL error from PXP\n");
++ }
++
++ if (attn & HW_INTERRUT_ASSERT_SET_2) {
++
++ int port = BP_PORT(bp);
++ int reg_offset;
++
++ reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
++ MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
++
++ val = REG_RD(bp, reg_offset);
++ val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
++ REG_WR(bp, reg_offset, val);
++
++ BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
++ (attn & HW_INTERRUT_ASSERT_SET_2));
++ bnx2x_panic();
++ }
++}
++
++static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
++{
++ u32 val;
++
++ if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
++
++ if (attn & BNX2X_PMF_LINK_ASSERT) {
++ int func = BP_FUNC(bp);
++
++ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
++ bnx2x__link_status_update(bp);
++ if (SHMEM_RD(bp, func_mb[func].drv_status) &
++ DRV_STATUS_PMF)
++ bnx2x_pmf_update(bp);
++
++ } else if (attn & BNX2X_MC_ASSERT_BITS) {
++
++ BNX2X_ERR("MC assert!\n");
++ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
++ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
++ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
++ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
++ bnx2x_panic();
++
++ } else if (attn & BNX2X_MCP_ASSERT) {
++
++ BNX2X_ERR("MCP assert!\n");
++ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
++ bnx2x_fw_dump(bp);
++
++ } else
++ BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
++ }
++
++ if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
++ BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
++ if (attn & BNX2X_GRC_TIMEOUT) {
++ val = CHIP_IS_E1H(bp) ?
++ REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
++ BNX2X_ERR("GRC time-out 0x%08x\n", val);
++ }
++ if (attn & BNX2X_GRC_RSV) {
++ val = CHIP_IS_E1H(bp) ?
++ REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
++ BNX2X_ERR("GRC reserved 0x%08x\n", val);
++ }
++ REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
++ }
++}
++
++static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
++{
++ struct attn_route attn;
++ struct attn_route group_mask;
++ int port = BP_PORT(bp);
++ int index;
++ u32 reg_addr;
++ u32 val;
++ u32 aeu_mask;
++
++ /* need to take HW lock because MCP or other port might also
++ try to handle this event */
++ bnx2x_acquire_alr(bp);
++
++ attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
++ attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
++ attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
++ attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
++ DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
++ attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
++
++ for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
++ if (deasserted & (1 << index)) {
++ group_mask = bp->attn_group[index];
++
++ DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
++ index, group_mask.sig[0], group_mask.sig[1],
++ group_mask.sig[2], group_mask.sig[3]);
++
++ bnx2x_attn_int_deasserted3(bp,
++ attn.sig[3] & group_mask.sig[3]);
++ bnx2x_attn_int_deasserted1(bp,
++ attn.sig[1] & group_mask.sig[1]);
++ bnx2x_attn_int_deasserted2(bp,
++ attn.sig[2] & group_mask.sig[2]);
++ bnx2x_attn_int_deasserted0(bp,
++ attn.sig[0] & group_mask.sig[0]);
++
++ if ((attn.sig[0] & group_mask.sig[0] &
++ HW_PRTY_ASSERT_SET_0) ||
++ (attn.sig[1] & group_mask.sig[1] &
++ HW_PRTY_ASSERT_SET_1) ||
++ (attn.sig[2] & group_mask.sig[2] &
++ HW_PRTY_ASSERT_SET_2))
++ BNX2X_ERR("FATAL HW block parity attention\n");
++ }
++ }
++
++ bnx2x_release_alr(bp);
++
++ reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
++
++ val = ~deasserted;
++ DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
++ val, reg_addr);
++ REG_WR(bp, reg_addr, val);
++
++ if (~bp->attn_state & deasserted)
++ BNX2X_ERR("IGU ERROR\n");
++
++ reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
++ MISC_REG_AEU_MASK_ATTN_FUNC_0;
++
++ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
++ aeu_mask = REG_RD(bp, reg_addr);
++
++ DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
++ aeu_mask, deasserted);
++ aeu_mask |= (deasserted & 0xff);
++ DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
++
++ REG_WR(bp, reg_addr, aeu_mask);
++ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
++
++ DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
++ bp->attn_state &= ~deasserted;
++ DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
++}
++
++static void bnx2x_attn_int(struct bnx2x *bp)
++{
++ /* read local copy of bits */
++ u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
++ attn_bits);
++ u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
++ attn_bits_ack);
++ u32 attn_state = bp->attn_state;
++
++ /* look for changed bits */
++ u32 asserted = attn_bits & ~attn_ack & ~attn_state;
++ u32 deasserted = ~attn_bits & attn_ack & attn_state;
++
++ DP(NETIF_MSG_HW,
++ "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
++ attn_bits, attn_ack, asserted, deasserted);
++
++ if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
++ BNX2X_ERR("BAD attention state\n");
++
++ /* handle bits that were raised */
++ if (asserted)
++ bnx2x_attn_int_asserted(bp, asserted);
++
++ if (deasserted)
++ bnx2x_attn_int_deasserted(bp, deasserted);
++}
++
++static void bnx2x_sp_task(struct work_struct *work)
++{
++ struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
++ u16 status;
++
++
++ /* Return here if interrupt is disabled */
++ if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
++ DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
++ return;
++ }
++
++ status = bnx2x_update_dsb_idx(bp);
++/* if (status == 0) */
++/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
++
++ DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
++
++ /* HW attentions */
++ if (status & 0x1)
++ bnx2x_attn_int(bp);
++
++ /* CStorm events: query_stats, port delete ramrod */
++ if (status & 0x2)
++ bp->stats_pending = 0;
++
++ bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
++ IGU_INT_NOP, 1);
++ bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
++ IGU_INT_NOP, 1);
++ bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
++ IGU_INT_NOP, 1);
++ bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
++ IGU_INT_NOP, 1);
++ bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
++ IGU_INT_ENABLE, 1);
++
++}
++
++static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
++{
++ struct net_device *dev = dev_instance;
++ struct bnx2x *bp = netdev_priv(dev);
++
++ /* Return here if interrupt is disabled */
++ if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
++ DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
++ return IRQ_HANDLED;
++ }
++
++ bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
++
++#ifdef BNX2X_STOP_ON_ERROR
++ if (unlikely(bp->panic))
++ return IRQ_HANDLED;
++#endif
++
++ queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
++
++ return IRQ_HANDLED;
++}
++
++/* end of slow path */
++
++/* Statistics */
++
++/****************************************************************************
++* Macros
++****************************************************************************/
++
++/* sum[hi:lo] += add[hi:lo] */
++#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
++ do { \
++ s_lo += a_lo; \
++ s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
++ } while (0)
++
++/* difference = minuend - subtrahend */
++#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
++ do { \
++ if (m_lo < s_lo) { \
++ /* underflow */ \
++ d_hi = m_hi - s_hi; \
++ if (d_hi > 0) { \
++ /* we can 'loan' 1 */ \
++ d_hi--; \
++ d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
++ } else { \
++ /* m_hi <= s_hi */ \
++ d_hi = 0; \
++ d_lo = 0; \
++ } \
++ } else { \
++ /* m_lo >= s_lo */ \
++ if (m_hi < s_hi) { \
++ d_hi = 0; \
++ d_lo = 0; \
++ } else { \
++ /* m_hi >= s_hi */ \
++ d_hi = m_hi - s_hi; \
++ d_lo = m_lo - s_lo; \
++ } \
++ } \
++ } while (0)
++
++#define UPDATE_STAT64(s, t) \
++ do { \
++ DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
++ diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
++ pstats->mac_stx[0].t##_hi = new->s##_hi; \
++ pstats->mac_stx[0].t##_lo = new->s##_lo; \
++ ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
++ pstats->mac_stx[1].t##_lo, diff.lo); \
++ } while (0)
++
++#define UPDATE_STAT64_NIG(s, t) \
++ do { \
++ DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
++ diff.lo, new->s##_lo, old->s##_lo); \
++ ADD_64(estats->t##_hi, diff.hi, \
++ estats->t##_lo, diff.lo); \
++ } while (0)
++
++/* sum[hi:lo] += add */
++#define ADD_EXTEND_64(s_hi, s_lo, a) \
++ do { \
++ s_lo += a; \
++ s_hi += (s_lo < a) ? 1 : 0; \
++ } while (0)
++
++#define UPDATE_EXTEND_STAT(s) \
++ do { \
++ ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
++ pstats->mac_stx[1].s##_lo, \
++ new->s); \
++ } while (0)
++
++#define UPDATE_EXTEND_TSTAT(s, t) \
++ do { \
++ diff = le32_to_cpu(tclient->s) - old_tclient->s; \
++ old_tclient->s = le32_to_cpu(tclient->s); \
++ ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
++ } while (0)
++
++#define UPDATE_EXTEND_XSTAT(s, t) \
++ do { \
++ diff = le32_to_cpu(xclient->s) - old_xclient->s; \
++ old_xclient->s = le32_to_cpu(xclient->s); \
++ ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
++ } while (0)
++
++/*
++ * General service functions
++ */
++
++static inline long bnx2x_hilo(u32 *hiref)
++{
++ u32 lo = *(hiref + 1);
++#if (BITS_PER_LONG == 64)
++ u32 hi = *hiref;
++
++ return HILO_U64(hi, lo);
++#else
++ return lo;
++#endif
++}
++
++/*
++ * Init service functions
++ */
++
++static void bnx2x_storm_stats_post(struct bnx2x *bp)
++{
++ if (!bp->stats_pending) {
++ struct eth_query_ramrod_data ramrod_data = {0};
++ int rc;
++
++ ramrod_data.drv_counter = bp->stats_counter++;
++ ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
++ ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
++
++ rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
++ ((u32 *)&ramrod_data)[1],
++ ((u32 *)&ramrod_data)[0], 0);
++ if (rc == 0) {
++ /* stats ramrod has it's own slot on the spq */
++ bp->spq_left++;
++ bp->stats_pending = 1;
++ }
++ }
++}
++
++static void bnx2x_stats_init(struct bnx2x *bp)
++{
++ int port = BP_PORT(bp);
++
++ bp->executer_idx = 0;
++ bp->stats_counter = 0;
++
++ /* port stats */
++ if (!BP_NOMCP(bp))
++ bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
++ else
++ bp->port.port_stx = 0;
++ DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
++
++ memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
++ bp->port.old_nig_stats.brb_discard =
++ REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
++ bp->port.old_nig_stats.brb_truncate =
++ REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
++ REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
++ &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
++ REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
++ &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
++
++ /* function stats */
++ memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
++ memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
++ memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
++ memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
++
++ bp->stats_state = STATS_STATE_DISABLED;
++ if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
++ bnx2x_stats_handle(bp, STATS_EVENT_PMF);
++}
++
++static void bnx2x_hw_stats_post(struct bnx2x *bp)
++{
++ struct dmae_command *dmae = &bp->stats_dmae;
++ u32 *stats_comp = bnx2x_sp(bp, stats_comp);
++
++ *stats_comp = DMAE_COMP_VAL;
++
++ /* loader */
++ if (bp->executer_idx) {
++ int loader_idx = PMF_DMAE_C(bp);
++
++ memset(dmae, 0, sizeof(struct dmae_command));
++
++ dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
++ DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
++ DMAE_CMD_DST_RESET |
++#ifdef __BIG_ENDIAN
++ DMAE_CMD_ENDIANITY_B_DW_SWAP |
++#else
++ DMAE_CMD_ENDIANITY_DW_SWAP |
++#endif
++ (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
++ DMAE_CMD_PORT_0) |
++ (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
++ dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
++ dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
++ dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
++ sizeof(struct dmae_command) *
++ (loader_idx + 1)) >> 2;
++ dmae->dst_addr_hi = 0;
++ dmae->len = sizeof(struct dmae_command) >> 2;
++ if (CHIP_IS_E1(bp))
++ dmae->len--;
++ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
++ dmae->comp_addr_hi = 0;
++ dmae->comp_val = 1;
++
++ *stats_comp = 0;
++ bnx2x_post_dmae(bp, dmae, loader_idx);
++
++ } else if (bp->func_stx) {
++ *stats_comp = 0;
++ bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
++ }
++}
++
++static int bnx2x_stats_comp(struct bnx2x *bp)
++{
++ u32 *stats_comp = bnx2x_sp(bp, stats_comp);
++ int cnt = 10;
++
++ might_sleep();
++ while (*stats_comp != DMAE_COMP_VAL) {
++ if (!cnt) {
++ BNX2X_ERR("timeout waiting for stats finished\n");
++ break;
++ }
++ cnt--;
++ msleep(1);
++ }
++ return 1;
++}
++
++/*
++ * Statistics service functions
++ */
++
++static void bnx2x_stats_pmf_update(struct bnx2x *bp)
++{
++ struct dmae_command *dmae;
++ u32 opcode;
++ int loader_idx = PMF_DMAE_C(bp);
++ u32 *stats_comp = bnx2x_sp(bp, stats_comp);
++
++ /* sanity */
++ if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
++ BNX2X_ERR("BUG!\n");
++ return;
++ }
++
++ bp->executer_idx = 0;
++
++ opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
++ DMAE_CMD_C_ENABLE |
++ DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
++#ifdef __BIG_ENDIAN
++ DMAE_CMD_ENDIANITY_B_DW_SWAP |
++#else
++ DMAE_CMD_ENDIANITY_DW_SWAP |
++#endif
++ (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
++ (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
++
++ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
++ dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
++ dmae->src_addr_lo = bp->port.port_stx >> 2;
++ dmae->src_addr_hi = 0;
++ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
++ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
++ dmae->len = DMAE_LEN32_RD_MAX;
++ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
++ dmae->comp_addr_hi = 0;
++ dmae->comp_val = 1;
++
++ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
++ dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
++ dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
++ dmae->src_addr_hi = 0;
++ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
++ DMAE_LEN32_RD_MAX * 4);
++ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
++ DMAE_LEN32_RD_MAX * 4);
++ dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
++ dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
++ dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
++ dmae->comp_val = DMAE_COMP_VAL;
++
++ *stats_comp = 0;
++ bnx2x_hw_stats_post(bp);
++ bnx2x_stats_comp(bp);
++}
++
++static void bnx2x_port_stats_init(struct bnx2x *bp)
++{
++ struct dmae_command *dmae;
++ int port = BP_PORT(bp);
++ int vn = BP_E1HVN(bp);
++ u32 opcode;
++ int loader_idx = PMF_DMAE_C(bp);
++ u32 mac_addr;
++ u32 *stats_comp = bnx2x_sp(bp, stats_comp);
++
++ /* sanity */
++ if (!bp->link_vars.link_up || !bp->port.pmf) {
++ BNX2X_ERR("BUG!\n");
++ return;
++ }
++
++ bp->executer_idx = 0;
++
++ /* MCP */
++ opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
++ DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
++ DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
++#ifdef __BIG_ENDIAN
++ DMAE_CMD_ENDIANITY_B_DW_SWAP |
++#else
++ DMAE_CMD_ENDIANITY_DW_SWAP |
++#endif
++ (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
++ (vn << DMAE_CMD_E1HVN_SHIFT));
++
++ if (bp->port.port_stx) {
++
++ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
++ dmae->opcode = opcode;
++ dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
++ dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
++ dmae->dst_addr_lo = bp->port.port_stx >> 2;
++ dmae->dst_addr_hi = 0;
++ dmae->len = sizeof(struct host_port_stats) >> 2;
++ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
++ dmae->comp_addr_hi = 0;
++ dmae->comp_val = 1;
++ }
++
++ if (bp->func_stx) {
++
++ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
++ dmae->opcode = opcode;
++ dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
++ dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
++ dmae->dst_addr_lo = bp->func_stx >> 2;
++ dmae->dst_addr_hi = 0;
++ dmae->len = sizeof(struct host_func_stats) >> 2;
++ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
++ dmae->comp_addr_hi = 0;
++ dmae->comp_val = 1;
++ }
++
++ /* MAC */
++ opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
++ DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
++ DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
++#ifdef __BIG_ENDIAN
++ DMAE_CMD_ENDIANITY_B_DW_SWAP |
++#else
++ DMAE_CMD_ENDIANITY_DW_SWAP |
++#endif
++ (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
++ (vn << DMAE_CMD_E1HVN_SHIFT));
++
++ if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
++
++ mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
++ NIG_REG_INGRESS_BMAC0_MEM);
++
++ /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
++ BIGMAC_REGISTER_TX_STAT_GTBYT */
++ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
++ dmae->opcode = opcode;
++ dmae->src_addr_lo = (mac_addr +
++ BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
++ dmae->src_addr_hi = 0;
++ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
++ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
++ dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
++ BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
++ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
++ dmae->comp_addr_hi = 0;
++ dmae->comp_val = 1;
++
++ /* BIGMAC_REGISTER_RX_STAT_GR64 ..
++ BIGMAC_REGISTER_RX_STAT_GRIPJ */
++ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
++ dmae->opcode = opcode;
++ dmae->src_addr_lo = (mac_addr +
++ BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
++ dmae->src_addr_hi = 0;
++ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
++ offsetof(struct bmac_stats, rx_stat_gr64_lo));
++ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
++ offsetof(struct bmac_stats, rx_stat_gr64_lo));
++ dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
++ BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
++ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
++ dmae->comp_addr_hi = 0;
++ dmae->comp_val = 1;
++
++ } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
++
++ mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
++
++ /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
++ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
++ dmae->opcode = opcode;
++ dmae->src_addr_lo = (mac_addr +
++ EMAC_REG_EMAC_RX_STAT_AC) >> 2;
++ dmae->src_addr_hi = 0;
++ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
++ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
++ dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
++ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
++ dmae->comp_addr_hi = 0;
++ dmae->comp_val = 1;
++
++ /* EMAC_REG_EMAC_RX_STAT_AC_28 */
++ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
++ dmae->opcode = opcode;
++ dmae->src_addr_lo = (mac_addr +
++ EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
++ dmae->src_addr_hi = 0;
++ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
++ offsetof(struct emac_stats, rx_stat_falsecarriererrors));
++ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
++ offsetof(struct emac_stats, rx_stat_falsecarriererrors));
++ dmae->len = 1;
++ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
++ dmae->comp_addr_hi = 0;
++ dmae->comp_val = 1;
++
++ /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
++ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
++ dmae->opcode = opcode;
++ dmae->src_addr_lo = (mac_addr +
++ EMAC_REG_EMAC_TX_STAT_AC) >> 2;
++ dmae->src_addr_hi = 0;
++ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
++ offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
++ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
++ offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
++ dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
++ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
++ dmae->comp_addr_hi = 0;
++ dmae->comp_val = 1;
++ }
++
++ /* NIG */
++ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
++ dmae->opcode = opcode;
++ dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
++ NIG_REG_STAT0_BRB_DISCARD) >> 2;
++ dmae->src_addr_hi = 0;
++ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
++ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
++ dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
++ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
++ dmae->comp_addr_hi = 0;
++ dmae->comp_val = 1;
++
++ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
++ dmae->opcode = opcode;
++ dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
++ NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
++ dmae->src_addr_hi = 0;
++ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
++ offsetof(struct nig_stats, egress_mac_pkt0_lo));
++ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
++ offsetof(struct nig_stats, egress_mac_pkt0_lo));
++ dmae->len = (2*sizeof(u32)) >> 2;
++ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
++ dmae->comp_addr_hi = 0;
++ dmae->comp_val = 1;
++
++ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
++ dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
++ DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
++ DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
++#ifdef __BIG_ENDIAN
++ DMAE_CMD_ENDIANITY_B_DW_SWAP |
++#else
++ DMAE_CMD_ENDIANITY_DW_SWAP |
++#endif
++ (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
++ (vn << DMAE_CMD_E1HVN_SHIFT));
++ dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
++ NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
++ dmae->src_addr_hi = 0;
++ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
++ offsetof(struct nig_stats, egress_mac_pkt1_lo));
++ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
++ offsetof(struct nig_stats, egress_mac_pkt1_lo));
++ dmae->len = (2*sizeof(u32)) >> 2;
++ dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
++ dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
++ dmae->comp_val = DMAE_COMP_VAL;
++
++ *stats_comp = 0;
++}
++
++static void bnx2x_func_stats_init(struct bnx2x *bp)
++{
++ struct dmae_command *dmae = &bp->stats_dmae;
++ u32 *stats_comp = bnx2x_sp(bp, stats_comp);
++
++ /* sanity */
++ if (!bp->func_stx) {
++ BNX2X_ERR("BUG!\n");
++ return;
++ }
++
++ bp->executer_idx = 0;
++ memset(dmae, 0, sizeof(struct dmae_command));
++
++ dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
++ DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
++ DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
++#ifdef __BIG_ENDIAN
++ DMAE_CMD_ENDIANITY_B_DW_SWAP |
++#else
++ DMAE_CMD_ENDIANITY_DW_SWAP |
++#endif
++ (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
++ (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
++ dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
++ dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
++ dmae->dst_addr_lo = bp->func_stx >> 2;
++ dmae->dst_addr_hi = 0;
++ dmae->len = sizeof(struct host_func_stats) >> 2;
++ dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
++ dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
++ dmae->comp_val = DMAE_COMP_VAL;
++
++ *stats_comp = 0;
++}
++
++static void bnx2x_stats_start(struct bnx2x *bp)
++{
++ if (bp->port.pmf)
++ bnx2x_port_stats_init(bp);
++
++ else if (bp->func_stx)
++ bnx2x_func_stats_init(bp);
++
++ bnx2x_hw_stats_post(bp);
++ bnx2x_storm_stats_post(bp);
++}
++
++static void bnx2x_stats_pmf_start(struct bnx2x *bp)
++{
++ bnx2x_stats_comp(bp);
++ bnx2x_stats_pmf_update(bp);
++ bnx2x_stats_start(bp);
++}
++
++static void bnx2x_stats_restart(struct bnx2x *bp)
++{
++ bnx2x_stats_comp(bp);
++ bnx2x_stats_start(bp);
++}
++
++static void bnx2x_bmac_stats_update(struct bnx2x *bp)
++{
++ struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
++ struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
++ struct regpair diff;
++
++ UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
++ UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
++ UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
++ UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
++ UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
++ UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
++ UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
++ UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
++ UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
++ UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
++ UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
++ UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
++ UPDATE_STAT64(tx_stat_gt127,
++ tx_stat_etherstatspkts65octetsto127octets);
++ UPDATE_STAT64(tx_stat_gt255,
++ tx_stat_etherstatspkts128octetsto255octets);
++ UPDATE_STAT64(tx_stat_gt511,
++ tx_stat_etherstatspkts256octetsto511octets);
++ UPDATE_STAT64(tx_stat_gt1023,
++ tx_stat_etherstatspkts512octetsto1023octets);
++ UPDATE_STAT64(tx_stat_gt1518,
++ tx_stat_etherstatspkts1024octetsto1522octets);
++ UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
++ UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
++ UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
++ UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
++ UPDATE_STAT64(tx_stat_gterr,
++ tx_stat_dot3statsinternalmactransmiterrors);
++ UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
++}
++
++static void bnx2x_emac_stats_update(struct bnx2x *bp)
++{
++ struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
++ struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
++
++ UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
++ UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
++ UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
++ UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
++ UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
++ UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
++ UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
++ UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
++ UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
++ UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
++ UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
++ UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
++ UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
++ UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
++ UPDATE_EXTEND_STAT(tx_stat_outxonsent);
++ UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
++ UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
++ UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
++ UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
++ UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
++ UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
++ UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
++ UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
++ UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
++ UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
++ UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
++ UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
++ UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
++ UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
++ UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
++ UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
++}
++
++static int bnx2x_hw_stats_update(struct bnx2x *bp)
++{
++ struct nig_stats *new = bnx2x_sp(bp, nig_stats);
++ struct nig_stats *old = &(bp->port.old_nig_stats);
++ struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
++ struct bnx2x_eth_stats *estats = &bp->eth_stats;
++ struct regpair diff;
++
++ if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
++ bnx2x_bmac_stats_update(bp);
++
++ else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
++ bnx2x_emac_stats_update(bp);
++
++ else { /* unreached */
++ BNX2X_ERR("stats updated by dmae but no MAC active\n");
++ return -1;
++ }
++
++ ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
++ new->brb_discard - old->brb_discard);
++ ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
++ new->brb_truncate - old->brb_truncate);
++
++ UPDATE_STAT64_NIG(egress_mac_pkt0,
++ etherstatspkts1024octetsto1522octets);
++ UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
++
++ memcpy(old, new, sizeof(struct nig_stats));
++
++ memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
++ sizeof(struct mac_stx));
++ estats->brb_drop_hi = pstats->brb_drop_hi;
++ estats->brb_drop_lo = pstats->brb_drop_lo;
++
++ pstats->host_port_stats_start = ++pstats->host_port_stats_end;
++
++ return 0;
++}
++
++static int bnx2x_storm_stats_update(struct bnx2x *bp)
++{
++ struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
++ int cl_id = BP_CL_ID(bp);
++ struct tstorm_per_port_stats *tport =
++ &stats->tstorm_common.port_statistics;
++ struct tstorm_per_client_stats *tclient =
++ &stats->tstorm_common.client_statistics[cl_id];
++ struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
++ struct xstorm_per_client_stats *xclient =
++ &stats->xstorm_common.client_statistics[cl_id];
++ struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
++ struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
++ struct bnx2x_eth_stats *estats = &bp->eth_stats;
++ u32 diff;
++
++ /* are storm stats valid? */
++ if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
++ bp->stats_counter) {
++ DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
++ " tstorm counter (%d) != stats_counter (%d)\n",
++ tclient->stats_counter, bp->stats_counter);
++ return -1;
++ }
++ if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
++ bp->stats_counter) {
++ DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
++ " xstorm counter (%d) != stats_counter (%d)\n",
++ xclient->stats_counter, bp->stats_counter);
++ return -2;
++ }
++
++ fstats->total_bytes_received_hi =
++ fstats->valid_bytes_received_hi =
++ le32_to_cpu(tclient->total_rcv_bytes.hi);
++ fstats->total_bytes_received_lo =
++ fstats->valid_bytes_received_lo =
++ le32_to_cpu(tclient->total_rcv_bytes.lo);
++
++ estats->error_bytes_received_hi =
++ le32_to_cpu(tclient->rcv_error_bytes.hi);
++ estats->error_bytes_received_lo =
++ le32_to_cpu(tclient->rcv_error_bytes.lo);
++ ADD_64(estats->error_bytes_received_hi,
++ estats->rx_stat_ifhcinbadoctets_hi,
++ estats->error_bytes_received_lo,
++ estats->rx_stat_ifhcinbadoctets_lo);
++
++ ADD_64(fstats->total_bytes_received_hi,
++ estats->error_bytes_received_hi,
++ fstats->total_bytes_received_lo,
++ estats->error_bytes_received_lo);
++
++ UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
++ UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
++ total_multicast_packets_received);
++ UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
++ total_broadcast_packets_received);
++
++ fstats->total_bytes_transmitted_hi =
++ le32_to_cpu(xclient->total_sent_bytes.hi);
++ fstats->total_bytes_transmitted_lo =
++ le32_to_cpu(xclient->total_sent_bytes.lo);
++
++ UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
++ total_unicast_packets_transmitted);
++ UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
++ total_multicast_packets_transmitted);
++ UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
++ total_broadcast_packets_transmitted);
++
++ memcpy(estats, &(fstats->total_bytes_received_hi),
++ sizeof(struct host_func_stats) - 2*sizeof(u32));
++
++ estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
++ estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
++ estats->brb_truncate_discard =
++ le32_to_cpu(tport->brb_truncate_discard);
++ estats->mac_discard = le32_to_cpu(tport->mac_discard);
++
++ old_tclient->rcv_unicast_bytes.hi =
++ le32_to_cpu(tclient->rcv_unicast_bytes.hi);
++ old_tclient->rcv_unicast_bytes.lo =
++ le32_to_cpu(tclient->rcv_unicast_bytes.lo);
++ old_tclient->rcv_broadcast_bytes.hi =
++ le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
++ old_tclient->rcv_broadcast_bytes.lo =
++ le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
++ old_tclient->rcv_multicast_bytes.hi =
++ le32_to_cpu(tclient->rcv_multicast_bytes.hi);
++ old_tclient->rcv_multicast_bytes.lo =
++ le32_to_cpu(tclient->rcv_multicast_bytes.lo);
++ old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
++
++ old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
++ old_tclient->packets_too_big_discard =
++ le32_to_cpu(tclient->packets_too_big_discard);
++ estats->no_buff_discard =
++ old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
++ old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
++
++ old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
++ old_xclient->unicast_bytes_sent.hi =
++ le32_to_cpu(xclient->unicast_bytes_sent.hi);
++ old_xclient->unicast_bytes_sent.lo =
++ le32_to_cpu(xclient->unicast_bytes_sent.lo);
++ old_xclient->multicast_bytes_sent.hi =
++ le32_to_cpu(xclient->multicast_bytes_sent.hi);
++ old_xclient->multicast_bytes_sent.lo =
++ le32_to_cpu(xclient->multicast_bytes_sent.lo);
++ old_xclient->broadcast_bytes_sent.hi =
++ le32_to_cpu(xclient->broadcast_bytes_sent.hi);
++ old_xclient->broadcast_bytes_sent.lo =
++ le32_to_cpu(xclient->broadcast_bytes_sent.lo);
++
++ fstats->host_func_stats_start = ++fstats->host_func_stats_end;
++
++ return 0;
++}
++
++static void bnx2x_net_stats_update(struct bnx2x *bp)
++{
++ struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
++ struct bnx2x_eth_stats *estats = &bp->eth_stats;
++ struct net_device_stats *nstats = &bp->dev->stats;
++
++ nstats->rx_packets =
++ bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
++ bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
++ bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
++
++ nstats->tx_packets =
++ bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
++ bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
++ bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
++
++ nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
++
++ nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
++
++ nstats->rx_dropped = old_tclient->checksum_discard +
++ estats->mac_discard;
++ nstats->tx_dropped = 0;
++
++ nstats->multicast =
++ bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
++
++ nstats->collisions =
++ estats->tx_stat_dot3statssinglecollisionframes_lo +
++ estats->tx_stat_dot3statsmultiplecollisionframes_lo +
++ estats->tx_stat_dot3statslatecollisions_lo +
++ estats->tx_stat_dot3statsexcessivecollisions_lo;
++
++ estats->jabber_packets_received =
++ old_tclient->packets_too_big_discard +
++ estats->rx_stat_dot3statsframestoolong_lo;
++
++ nstats->rx_length_errors =
++ estats->rx_stat_etherstatsundersizepkts_lo +
++ estats->jabber_packets_received;
++ nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
++ nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
++ nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
++ nstats->rx_fifo_errors = old_tclient->no_buff_discard;
++ nstats->rx_missed_errors = estats->xxoverflow_discard;
++
++ nstats->rx_errors = nstats->rx_length_errors +
++ nstats->rx_over_errors +
++ nstats->rx_crc_errors +
++ nstats->rx_frame_errors +
++ nstats->rx_fifo_errors +
++ nstats->rx_missed_errors;
++
++ nstats->tx_aborted_errors =
++ estats->tx_stat_dot3statslatecollisions_lo +
++ estats->tx_stat_dot3statsexcessivecollisions_lo;
++ nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
++ nstats->tx_fifo_errors = 0;
++ nstats->tx_heartbeat_errors = 0;
++ nstats->tx_window_errors = 0;
++
++ nstats->tx_errors = nstats->tx_aborted_errors +
++ nstats->tx_carrier_errors;
++}
++
++static void bnx2x_stats_update(struct bnx2x *bp)
++{
++ u32 *stats_comp = bnx2x_sp(bp, stats_comp);
++ int update = 0;
++
++ if (*stats_comp != DMAE_COMP_VAL)
++ return;
++
++ if (bp->port.pmf)
++ update = (bnx2x_hw_stats_update(bp) == 0);
++
++ update |= (bnx2x_storm_stats_update(bp) == 0);
++
++ if (update)
++ bnx2x_net_stats_update(bp);
++
++ else {
++ if (bp->stats_pending) {
++ bp->stats_pending++;
++ if (bp->stats_pending == 3) {
++ BNX2X_ERR("stats not updated for 3 times\n");
++ bnx2x_panic();
++ return;
++ }
++ }
++ }
++
++ if (bp->msglevel & NETIF_MSG_TIMER) {
++ struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
++ struct bnx2x_eth_stats *estats = &bp->eth_stats;
++ struct net_device_stats *nstats = &bp->dev->stats;
++ int i;
++
++ printk(KERN_DEBUG "%s:\n", bp->dev->name);
++ printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
++ " tx pkt (%lx)\n",
++ bnx2x_tx_avail(bp->fp),
++ le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
++ printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
++ " rx pkt (%lx)\n",
++ (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
++ bp->fp->rx_comp_cons),
++ le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
++ printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
++ netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
++ estats->driver_xoff, estats->brb_drop_lo);
++ printk(KERN_DEBUG "tstats: checksum_discard %u "
++ "packets_too_big_discard %u no_buff_discard %u "
++ "mac_discard %u mac_filter_discard %u "
++ "xxovrflow_discard %u brb_truncate_discard %u "
++ "ttl0_discard %u\n",
++ old_tclient->checksum_discard,
++ old_tclient->packets_too_big_discard,
++ old_tclient->no_buff_discard, estats->mac_discard,
++ estats->mac_filter_discard, estats->xxoverflow_discard,
++ estats->brb_truncate_discard,
++ old_tclient->ttl0_discard);
++
++ for_each_queue(bp, i) {
++ printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
++ bnx2x_fp(bp, i, tx_pkt),
++ bnx2x_fp(bp, i, rx_pkt),
++ bnx2x_fp(bp, i, rx_calls));
++ }
++ }
++
++ bnx2x_hw_stats_post(bp);
++ bnx2x_storm_stats_post(bp);
++}
++
++static void bnx2x_port_stats_stop(struct bnx2x *bp)
++{
++ struct dmae_command *dmae;
++ u32 opcode;
++ int loader_idx = PMF_DMAE_C(bp);
++ u32 *stats_comp = bnx2x_sp(bp, stats_comp);
++
++ bp->executer_idx = 0;
++
++ opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
++ DMAE_CMD_C_ENABLE |
++ DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
++#ifdef __BIG_ENDIAN
++ DMAE_CMD_ENDIANITY_B_DW_SWAP |
++#else
++ DMAE_CMD_ENDIANITY_DW_SWAP |
++#endif
++ (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
++ (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
++
++ if (bp->port.port_stx) {
++
++ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
++ if (bp->func_stx)
++ dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
++ else
++ dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
++ dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
++ dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
++ dmae->dst_addr_lo = bp->port.port_stx >> 2;
++ dmae->dst_addr_hi = 0;
++ dmae->len = sizeof(struct host_port_stats) >> 2;
++ if (bp->func_stx) {
++ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
++ dmae->comp_addr_hi = 0;
++ dmae->comp_val = 1;
++ } else {
++ dmae->comp_addr_lo =
++ U64_LO(bnx2x_sp_mapping(bp, stats_comp));
++ dmae->comp_addr_hi =
++ U64_HI(bnx2x_sp_mapping(bp, stats_comp));
++ dmae->comp_val = DMAE_COMP_VAL;
++
++ *stats_comp = 0;
++ }
++ }
++
++ if (bp->func_stx) {
++
++ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
++ dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
++ dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
++ dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
++ dmae->dst_addr_lo = bp->func_stx >> 2;
++ dmae->dst_addr_hi = 0;
++ dmae->len = sizeof(struct host_func_stats) >> 2;
++ dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
++ dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
++ dmae->comp_val = DMAE_COMP_VAL;
++
++ *stats_comp = 0;
++ }
++}
++
++static void bnx2x_stats_stop(struct bnx2x *bp)
++{
++ int update = 0;
++
++ bnx2x_stats_comp(bp);
++
++ if (bp->port.pmf)
++ update = (bnx2x_hw_stats_update(bp) == 0);
++
++ update |= (bnx2x_storm_stats_update(bp) == 0);
++
++ if (update) {
++ bnx2x_net_stats_update(bp);
++
++ if (bp->port.pmf)
++ bnx2x_port_stats_stop(bp);
++
++ bnx2x_hw_stats_post(bp);
++ bnx2x_stats_comp(bp);
++ }
++}
++
++static void bnx2x_stats_do_nothing(struct bnx2x *bp)
++{
++}
++
++static const struct {
++ void (*action)(struct bnx2x *bp);
++ enum bnx2x_stats_state next_state;
++} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
++/* state event */
++{
++/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
++/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
++/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
++/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
++},
++{
++/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
++/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
++/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
++/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
++}
++};
++
++static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
++{
++ enum bnx2x_stats_state state = bp->stats_state;
++
++ bnx2x_stats_stm[state][event].action(bp);
++ bp->stats_state = bnx2x_stats_stm[state][event].next_state;
++
++ if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
++ DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
++ state, event, bp->stats_state);
++}
++
++static void bnx2x_timer(unsigned long data)
++{
++ struct bnx2x *bp = (struct bnx2x *) data;
++
++ if (!netif_running(bp->dev))
++ return;
++
++ if (atomic_read(&bp->intr_sem) != 0)
++ goto timer_restart;
++
++ if (poll) {
++ struct bnx2x_fastpath *fp = &bp->fp[0];
++ int rc;
++
++ bnx2x_tx_int(fp, 1000);
++ rc = bnx2x_rx_int(fp, 1000);
++ }
++
++ if (!BP_NOMCP(bp)) {
++ int func = BP_FUNC(bp);
++ u32 drv_pulse;
++ u32 mcp_pulse;
++
++ ++bp->fw_drv_pulse_wr_seq;
++ bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
++ /* TBD - add SYSTEM_TIME */
++ drv_pulse = bp->fw_drv_pulse_wr_seq;
++ SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
++
++ mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
++ MCP_PULSE_SEQ_MASK);
++ /* The delta between driver pulse and mcp response
++ * should be 1 (before mcp response) or 0 (after mcp response)
++ */
++ if ((drv_pulse != mcp_pulse) &&
++ (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
++ /* someone lost a heartbeat... */
++ BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
++ drv_pulse, mcp_pulse);
++ }
++ }
++
++ if ((bp->state == BNX2X_STATE_OPEN) ||
++ (bp->state == BNX2X_STATE_DISABLED))
++ bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
++
++timer_restart:
++ mod_timer(&bp->timer, jiffies + bp->current_interval);
++}
++
++/* end of Statistics */
++
++/* nic init */
++
++/*
++ * nic init service functions
++ */
++
++static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
++{
++ int port = BP_PORT(bp);
++
++ bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
++ USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
++ sizeof(struct ustorm_status_block)/4);
++ bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
++ CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
++ sizeof(struct cstorm_status_block)/4);
++}
++
++static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
++ dma_addr_t mapping, int sb_id)
++{
++ int port = BP_PORT(bp);
++ int func = BP_FUNC(bp);
++ int index;
++ u64 section;
++
++ /* USTORM */
++ section = ((u64)mapping) + offsetof(struct host_status_block,
++ u_status_block);
++ sb->u_status_block.status_block_id = sb_id;
++
++ REG_WR(bp, BAR_USTRORM_INTMEM +
++ USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
++ REG_WR(bp, BAR_USTRORM_INTMEM +
++ ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
++ U64_HI(section));
++ REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
++ USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
++
++ for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
++ REG_WR16(bp, BAR_USTRORM_INTMEM +
++ USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
++
++ /* CSTORM */
++ section = ((u64)mapping) + offsetof(struct host_status_block,
++ c_status_block);
++ sb->c_status_block.status_block_id = sb_id;
++
++ REG_WR(bp, BAR_CSTRORM_INTMEM +
++ CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
++ REG_WR(bp, BAR_CSTRORM_INTMEM +
++ ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
++ U64_HI(section));
++ REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
++ CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
++
++ for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
++ REG_WR16(bp, BAR_CSTRORM_INTMEM +
++ CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
++
++ bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
++}
++
++static void bnx2x_zero_def_sb(struct bnx2x *bp)
++{
++ int func = BP_FUNC(bp);
++
++ bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
++ USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
++ sizeof(struct ustorm_def_status_block)/4);
++ bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
++ CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
++ sizeof(struct cstorm_def_status_block)/4);
++ bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
++ XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
++ sizeof(struct xstorm_def_status_block)/4);
++ bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
++ TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
++ sizeof(struct tstorm_def_status_block)/4);
++}
++
++static void bnx2x_init_def_sb(struct bnx2x *bp,
++ struct host_def_status_block *def_sb,
++ dma_addr_t mapping, int sb_id)
++{
++ int port = BP_PORT(bp);
++ int func = BP_FUNC(bp);
++ int index, val, reg_offset;
++ u64 section;
++
++ /* ATTN */
++ section = ((u64)mapping) + offsetof(struct host_def_status_block,
++ atten_status_block);
++ def_sb->atten_status_block.status_block_id = sb_id;
++
++ bp->attn_state = 0;
++
++ reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
++ MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
++
++ for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
++ bp->attn_group[index].sig[0] = REG_RD(bp,
++ reg_offset + 0x10*index);
++ bp->attn_group[index].sig[1] = REG_RD(bp,
++ reg_offset + 0x4 + 0x10*index);
++ bp->attn_group[index].sig[2] = REG_RD(bp,
++ reg_offset + 0x8 + 0x10*index);
++ bp->attn_group[index].sig[3] = REG_RD(bp,
++ reg_offset + 0xc + 0x10*index);
++ }
++
++ reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
++ HC_REG_ATTN_MSG0_ADDR_L);
++
++ REG_WR(bp, reg_offset, U64_LO(section));
++ REG_WR(bp, reg_offset + 4, U64_HI(section));
++
++ reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
++
++ val = REG_RD(bp, reg_offset);
++ val |= sb_id;
++ REG_WR(bp, reg_offset, val);
++
++ /* USTORM */
++ section = ((u64)mapping) + offsetof(struct host_def_status_block,
++ u_def_status_block);
++ def_sb->u_def_status_block.status_block_id = sb_id;
++
++ REG_WR(bp, BAR_USTRORM_INTMEM +
++ USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
++ REG_WR(bp, BAR_USTRORM_INTMEM +
++ ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
++ U64_HI(section));
++ REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
++ USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
++
++ for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
++ REG_WR16(bp, BAR_USTRORM_INTMEM +
++ USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
++
++ /* CSTORM */
++ section = ((u64)mapping) + offsetof(struct host_def_status_block,
++ c_def_status_block);
++ def_sb->c_def_status_block.status_block_id = sb_id;
++
++ REG_WR(bp, BAR_CSTRORM_INTMEM +
++ CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
++ REG_WR(bp, BAR_CSTRORM_INTMEM +
++ ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
++ U64_HI(section));
++ REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
++ CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
++
++ for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
++ REG_WR16(bp, BAR_CSTRORM_INTMEM +
++ CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
++
++ /* TSTORM */
++ section = ((u64)mapping) + offsetof(struct host_def_status_block,
++ t_def_status_block);
++ def_sb->t_def_status_block.status_block_id = sb_id;
++
++ REG_WR(bp, BAR_TSTRORM_INTMEM +
++ TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
++ REG_WR(bp, BAR_TSTRORM_INTMEM +
++ ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
++ U64_HI(section));
++ REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
++ TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
++
++ for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
++ REG_WR16(bp, BAR_TSTRORM_INTMEM +
++ TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
++
++ /* XSTORM */
++ section = ((u64)mapping) + offsetof(struct host_def_status_block,
++ x_def_status_block);
++ def_sb->x_def_status_block.status_block_id = sb_id;
++
++ REG_WR(bp, BAR_XSTRORM_INTMEM +
++ XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
++ REG_WR(bp, BAR_XSTRORM_INTMEM +
++ ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
++ U64_HI(section));
++ REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
++ XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
++
++ for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
++ REG_WR16(bp, BAR_XSTRORM_INTMEM +
++ XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
++
++ bp->stats_pending = 0;
++ bp->set_mac_pending = 0;
++
++ bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
++}
++
++static void bnx2x_update_coalesce(struct bnx2x *bp)
++{
++ int port = BP_PORT(bp);
++ int i;
++
++ for_each_queue(bp, i) {
++ int sb_id = bp->fp[i].sb_id;
++
++ /* HC_INDEX_U_ETH_RX_CQ_CONS */
++ REG_WR8(bp, BAR_USTRORM_INTMEM +
++ USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
++ U_SB_ETH_RX_CQ_INDEX),
++ bp->rx_ticks/12);
++ REG_WR16(bp, BAR_USTRORM_INTMEM +
++ USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
++ U_SB_ETH_RX_CQ_INDEX),
++ bp->rx_ticks ? 0 : 1);
++
++ /* HC_INDEX_C_ETH_TX_CQ_CONS */
++ REG_WR8(bp, BAR_CSTRORM_INTMEM +
++ CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
++ C_SB_ETH_TX_CQ_INDEX),
++ bp->tx_ticks/12);
++ REG_WR16(bp, BAR_CSTRORM_INTMEM +
++ CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
++ C_SB_ETH_TX_CQ_INDEX),
++ bp->tx_ticks ? 0 : 1);
++ }
++}
++
++static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
++ struct bnx2x_fastpath *fp, int last)
++{
++ int i;
++
++ for (i = 0; i < last; i++) {
++ struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
++ struct sk_buff *skb = rx_buf->skb;
++
++ if (skb == NULL) {
++ DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
++ continue;
++ }
++
++ if (fp->tpa_state[i] == BNX2X_TPA_START)
++ pci_unmap_single(bp->pdev,
++ pci_unmap_addr(rx_buf, mapping),
++ bp->rx_buf_size,
++ PCI_DMA_FROMDEVICE);
++
++ dev_kfree_skb(skb);
++ rx_buf->skb = NULL;
++ }
++}
++
++static void bnx2x_init_rx_rings(struct bnx2x *bp)
++{
++ int func = BP_FUNC(bp);
++ int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
++ ETH_MAX_AGGREGATION_QUEUES_E1H;
++ u16 ring_prod, cqe_ring_prod;
++ int i, j;
++
++ bp->rx_buf_size = bp->dev->mtu;
++ bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
++ BCM_RX_ETH_PAYLOAD_ALIGN;
++
++ if (bp->flags & TPA_ENABLE_FLAG) {
++ DP(NETIF_MSG_IFUP,
++ "rx_buf_size %d effective_mtu %d\n",
++ bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
++
++ for_each_queue(bp, j) {
++ struct bnx2x_fastpath *fp = &bp->fp[j];
++
++ for (i = 0; i < max_agg_queues; i++) {
++ fp->tpa_pool[i].skb =
++ netdev_alloc_skb(bp->dev, bp->rx_buf_size);
++ if (!fp->tpa_pool[i].skb) {
++ BNX2X_ERR("Failed to allocate TPA "
++ "skb pool for queue[%d] - "
++ "disabling TPA on this "
++ "queue!\n", j);
++ bnx2x_free_tpa_pool(bp, fp, i);
++ fp->disable_tpa = 1;
++ break;
++ }
++ pci_unmap_addr_set((struct sw_rx_bd *)
++ &bp->fp->tpa_pool[i],
++ mapping, 0);
++ fp->tpa_state[i] = BNX2X_TPA_STOP;
++ }
++ }
++ }
++
++ for_each_queue(bp, j) {
++ struct bnx2x_fastpath *fp = &bp->fp[j];
++
++ fp->rx_bd_cons = 0;
++ fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
++ fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
++
++ /* "next page" elements initialization */
++ /* SGE ring */
++ for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
++ struct eth_rx_sge *sge;
++
++ sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
++ sge->addr_hi =
++ cpu_to_le32(U64_HI(fp->rx_sge_mapping +
++ BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
++ sge->addr_lo =
++ cpu_to_le32(U64_LO(fp->rx_sge_mapping +
++ BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
++ }
++
++ bnx2x_init_sge_ring_bit_mask(fp);
++
++ /* RX BD ring */
++ for (i = 1; i <= NUM_RX_RINGS; i++) {
++ struct eth_rx_bd *rx_bd;
++
++ rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
++ rx_bd->addr_hi =
++ cpu_to_le32(U64_HI(fp->rx_desc_mapping +
++ BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
++ rx_bd->addr_lo =
++ cpu_to_le32(U64_LO(fp->rx_desc_mapping +
++ BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
++ }
++
++ /* CQ ring */
++ for (i = 1; i <= NUM_RCQ_RINGS; i++) {
++ struct eth_rx_cqe_next_page *nextpg;
++
++ nextpg = (struct eth_rx_cqe_next_page *)
++ &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
++ nextpg->addr_hi =
++ cpu_to_le32(U64_HI(fp->rx_comp_mapping +
++ BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
++ nextpg->addr_lo =
++ cpu_to_le32(U64_LO(fp->rx_comp_mapping +
++ BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
++ }
++
++ /* Allocate SGEs and initialize the ring elements */
++ for (i = 0, ring_prod = 0;
++ i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
++
++ if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
++ BNX2X_ERR("was only able to allocate "
++ "%d rx sges\n", i);
++ BNX2X_ERR("disabling TPA for queue[%d]\n", j);
++ /* Cleanup already allocated elements */
++ bnx2x_free_rx_sge_range(bp, fp, ring_prod);
++ bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
++ fp->disable_tpa = 1;
++ ring_prod = 0;
++ break;
++ }
++ ring_prod = NEXT_SGE_IDX(ring_prod);
++ }
++ fp->rx_sge_prod = ring_prod;
++
++ /* Allocate BDs and initialize BD ring */
++ fp->rx_comp_cons = 0;
++ cqe_ring_prod = ring_prod = 0;
++ for (i = 0; i < bp->rx_ring_size; i++) {
++ if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
++ BNX2X_ERR("was only able to allocate "
++ "%d rx skbs\n", i);
++ bp->eth_stats.rx_skb_alloc_failed++;
++ break;
++ }
++ ring_prod = NEXT_RX_IDX(ring_prod);
++ cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
++ WARN_ON(ring_prod <= i);
++ }
++
++ fp->rx_bd_prod = ring_prod;
++ /* must not have more available CQEs than BDs */
++ fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
++ cqe_ring_prod);
++ fp->rx_pkt = fp->rx_calls = 0;
++
++ /* Warning!
++ * this will generate an interrupt (to the TSTORM)
++ * must only be done after chip is initialized
++ */
++ bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
++ fp->rx_sge_prod);
++ if (j != 0)
++ continue;
++
++ REG_WR(bp, BAR_USTRORM_INTMEM +
++ USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
++ U64_LO(fp->rx_comp_mapping));
++ REG_WR(bp, BAR_USTRORM_INTMEM +
++ USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
++ U64_HI(fp->rx_comp_mapping));
++ }
++}
++
++static void bnx2x_init_tx_ring(struct bnx2x *bp)
++{
++ int i, j;
++
++ for_each_queue(bp, j) {
++ struct bnx2x_fastpath *fp = &bp->fp[j];
++
++ for (i = 1; i <= NUM_TX_RINGS; i++) {
++ struct eth_tx_bd *tx_bd =
++ &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
++
++ tx_bd->addr_hi =
++ cpu_to_le32(U64_HI(fp->tx_desc_mapping +
++ BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
++ tx_bd->addr_lo =
++ cpu_to_le32(U64_LO(fp->tx_desc_mapping +
++ BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
++ }
++
++ fp->tx_pkt_prod = 0;
++ fp->tx_pkt_cons = 0;
++ fp->tx_bd_prod = 0;
++ fp->tx_bd_cons = 0;
++ fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
++ fp->tx_pkt = 0;
++ }
++}
++
++static void bnx2x_init_sp_ring(struct bnx2x *bp)
++{
++ int func = BP_FUNC(bp);
++
++ spin_lock_init(&bp->spq_lock);
++
++ bp->spq_left = MAX_SPQ_PENDING;
++ bp->spq_prod_idx = 0;
++ bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
++ bp->spq_prod_bd = bp->spq;
++ bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
++
++ REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
++ U64_LO(bp->spq_mapping));
++ REG_WR(bp,
++ XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
++ U64_HI(bp->spq_mapping));
++
++ REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
++ bp->spq_prod_idx);
++}
++
++static void bnx2x_init_context(struct bnx2x *bp)
++{
++ int i;
++
++ for_each_queue(bp, i) {
++ struct eth_context *context = bnx2x_sp(bp, context[i].eth);
++ struct bnx2x_fastpath *fp = &bp->fp[i];
++ u8 sb_id = FP_SB_ID(fp);
++
++ context->ustorm_st_context.common.sb_index_numbers =
++ BNX2X_RX_SB_INDEX_NUM;
++ context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
++ context->ustorm_st_context.common.status_block_id = sb_id;
++ context->ustorm_st_context.common.flags =
++ USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
++ context->ustorm_st_context.common.mc_alignment_log_size =
++ 6 /*BCM_RX_ETH_PAYLOAD_ALIGN*/;
++ context->ustorm_st_context.common.bd_buff_size =
++ bp->rx_buf_size;
++ context->ustorm_st_context.common.bd_page_base_hi =
++ U64_HI(fp->rx_desc_mapping);
++ context->ustorm_st_context.common.bd_page_base_lo =
++ U64_LO(fp->rx_desc_mapping);
++ if (!fp->disable_tpa) {
++ context->ustorm_st_context.common.flags |=
++ (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
++ USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
++ context->ustorm_st_context.common.sge_buff_size =
++ (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
++ (u32)0xffff);
++ context->ustorm_st_context.common.sge_page_base_hi =
++ U64_HI(fp->rx_sge_mapping);
++ context->ustorm_st_context.common.sge_page_base_lo =
++ U64_LO(fp->rx_sge_mapping);
++ }
++
++ context->ustorm_ag_context.cdu_usage =
++ CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
++ CDU_REGION_NUMBER_UCM_AG,
++ ETH_CONNECTION_TYPE);
++
++ context->xstorm_st_context.tx_bd_page_base_hi =
++ U64_HI(fp->tx_desc_mapping);
++ context->xstorm_st_context.tx_bd_page_base_lo =
++ U64_LO(fp->tx_desc_mapping);
++ context->xstorm_st_context.db_data_addr_hi =
++ U64_HI(fp->tx_prods_mapping);
++ context->xstorm_st_context.db_data_addr_lo =
++ U64_LO(fp->tx_prods_mapping);
++ context->xstorm_st_context.statistics_data = (fp->cl_id |
++ XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
++ context->cstorm_st_context.sb_index_number =
++ C_SB_ETH_TX_CQ_INDEX;
++ context->cstorm_st_context.status_block_id = sb_id;
++
++ context->xstorm_ag_context.cdu_reserved =
++ CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
++ CDU_REGION_NUMBER_XCM_AG,
++ ETH_CONNECTION_TYPE);
++ }
++}
++
++static void bnx2x_init_ind_table(struct bnx2x *bp)
++{
++ int func = BP_FUNC(bp);
++ int i;
++
++ if (!is_multi(bp))
++ return;
++
++ DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
++ for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
++ REG_WR8(bp, BAR_TSTRORM_INTMEM +
++ TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
++ BP_CL_ID(bp) + (i % bp->num_queues));
++}
++
++static void bnx2x_set_client_config(struct bnx2x *bp)
++{
++ struct tstorm_eth_client_config tstorm_client = {0};
++ int port = BP_PORT(bp);
++ int i;
++
++ tstorm_client.mtu = bp->dev->mtu;
++ tstorm_client.statistics_counter_id = BP_CL_ID(bp);
++ tstorm_client.config_flags =
++ TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
++#ifdef BCM_VLAN
++ if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
++ tstorm_client.config_flags |=
++ TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
++ DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
++ }
++#endif
++
++ if (bp->flags & TPA_ENABLE_FLAG) {
++ tstorm_client.max_sges_for_packet =
++ SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
++ tstorm_client.max_sges_for_packet =
++ ((tstorm_client.max_sges_for_packet +
++ PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
++ PAGES_PER_SGE_SHIFT;
++
++ tstorm_client.config_flags |=
++ TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
++ }
++
++ for_each_queue(bp, i) {
++ REG_WR(bp, BAR_TSTRORM_INTMEM +
++ TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
++ ((u32 *)&tstorm_client)[0]);
++ REG_WR(bp, BAR_TSTRORM_INTMEM +
++ TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
++ ((u32 *)&tstorm_client)[1]);
++ }
++
++ DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
++ ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
++}
++
++static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
++{
++ struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
++ int mode = bp->rx_mode;
++ int mask = (1 << BP_L_ID(bp));
++ int func = BP_FUNC(bp);
++ int i;
++
++ DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
++
++ switch (mode) {
++ case BNX2X_RX_MODE_NONE: /* no Rx */
++ tstorm_mac_filter.ucast_drop_all = mask;
++ tstorm_mac_filter.mcast_drop_all = mask;
++ tstorm_mac_filter.bcast_drop_all = mask;
++ break;
++ case BNX2X_RX_MODE_NORMAL:
++ tstorm_mac_filter.bcast_accept_all = mask;
++ break;
++ case BNX2X_RX_MODE_ALLMULTI:
++ tstorm_mac_filter.mcast_accept_all = mask;
++ tstorm_mac_filter.bcast_accept_all = mask;
++ break;
++ case BNX2X_RX_MODE_PROMISC:
++ tstorm_mac_filter.ucast_accept_all = mask;
++ tstorm_mac_filter.mcast_accept_all = mask;
++ tstorm_mac_filter.bcast_accept_all = mask;
++ break;
++ default:
++ BNX2X_ERR("BAD rx mode (%d)\n", mode);
++ break;
++ }
++
++ for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
++ REG_WR(bp, BAR_TSTRORM_INTMEM +
++ TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
++ ((u32 *)&tstorm_mac_filter)[i]);
++
++/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
++ ((u32 *)&tstorm_mac_filter)[i]); */
++ }
++
++ if (mode != BNX2X_RX_MODE_NONE)
++ bnx2x_set_client_config(bp);
++}
++
++static void bnx2x_init_internal_common(struct bnx2x *bp)
++{
++ int i;
++
++ if (bp->flags & TPA_ENABLE_FLAG) {
++ struct tstorm_eth_tpa_exist tpa = {0};
++
++ tpa.tpa_exist = 1;
++
++ REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
++ ((u32 *)&tpa)[0]);
++ REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
++ ((u32 *)&tpa)[1]);
++ }
++
++ /* Zero this manually as its initialization is
++ currently missing in the initTool */
++ for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
++ REG_WR(bp, BAR_USTRORM_INTMEM +
++ USTORM_AGG_DATA_OFFSET + i * 4, 0);
++}
++
++static void bnx2x_init_internal_port(struct bnx2x *bp)
++{
++ int port = BP_PORT(bp);
++
++ REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
++ REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
++ REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
++ REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
++}
++
++static void bnx2x_init_internal_func(struct bnx2x *bp)
++{
++ struct tstorm_eth_function_common_config tstorm_config = {0};
++ struct stats_indication_flags stats_flags = {0};
++ int port = BP_PORT(bp);
++ int func = BP_FUNC(bp);
++ int i;
++ u16 max_agg_size;
++
++ if (is_multi(bp)) {
++ tstorm_config.config_flags = MULTI_FLAGS;
++ tstorm_config.rss_result_mask = MULTI_MASK;
++ }
++ if (IS_E1HMF(bp))
++ tstorm_config.config_flags |=
++ TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
++
++ tstorm_config.leading_client_id = BP_L_ID(bp);
++
++ REG_WR(bp, BAR_TSTRORM_INTMEM +
++ TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
++ (*(u32 *)&tstorm_config));
++
++ bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
++ bnx2x_set_storm_rx_mode(bp);
++
++ /* reset xstorm per client statistics */
++ for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
++ REG_WR(bp, BAR_XSTRORM_INTMEM +
++ XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
++ i*4, 0);
++ }
++ /* reset tstorm per client statistics */
++ for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
++ REG_WR(bp, BAR_TSTRORM_INTMEM +
++ TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
++ i*4, 0);
++ }
++
++ /* Init statistics related context */
++ stats_flags.collect_eth = 1;
++
++ REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
++ ((u32 *)&stats_flags)[0]);
++ REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
++ ((u32 *)&stats_flags)[1]);
++
++ REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
++ ((u32 *)&stats_flags)[0]);
++ REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
++ ((u32 *)&stats_flags)[1]);
++
++ REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
++ ((u32 *)&stats_flags)[0]);
++ REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
++ ((u32 *)&stats_flags)[1]);
++
++ REG_WR(bp, BAR_XSTRORM_INTMEM +
++ XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
++ U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
++ REG_WR(bp, BAR_XSTRORM_INTMEM +
++ XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
++ U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
++
++ REG_WR(bp, BAR_TSTRORM_INTMEM +
++ TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
++ U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
++ REG_WR(bp, BAR_TSTRORM_INTMEM +
++ TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
++ U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
++
++ if (CHIP_IS_E1H(bp)) {
++ REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
++ IS_E1HMF(bp));
++ REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
++ IS_E1HMF(bp));
++ REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
++ IS_E1HMF(bp));
++ REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
++ IS_E1HMF(bp));
++
++ REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
++ bp->e1hov);
++ }
++
++ /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
++ max_agg_size =
++ min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
++ SGE_PAGE_SIZE * PAGES_PER_SGE),
++ (u32)0xffff);
++ for_each_queue(bp, i) {
++ struct bnx2x_fastpath *fp = &bp->fp[i];
++
++ REG_WR(bp, BAR_USTRORM_INTMEM +
++ USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
++ U64_LO(fp->rx_comp_mapping));
++ REG_WR(bp, BAR_USTRORM_INTMEM +
++ USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
++ U64_HI(fp->rx_comp_mapping));
++
++ REG_WR16(bp, BAR_USTRORM_INTMEM +
++ USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
++ max_agg_size);
++ }
++}
++
++static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
++{
++ switch (load_code) {
++ case FW_MSG_CODE_DRV_LOAD_COMMON:
++ bnx2x_init_internal_common(bp);
++ /* no break */
++
++ case FW_MSG_CODE_DRV_LOAD_PORT:
++ bnx2x_init_internal_port(bp);
++ /* no break */
++
++ case FW_MSG_CODE_DRV_LOAD_FUNCTION:
++ bnx2x_init_internal_func(bp);
++ break;
++
++ default:
++ BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
++ break;
++ }
++}
++
++static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
++{
++ int i;
++
++ for_each_queue(bp, i) {
++ struct bnx2x_fastpath *fp = &bp->fp[i];
++
++ fp->bp = bp;
++ fp->state = BNX2X_FP_STATE_CLOSED;
++ fp->index = i;
++ fp->cl_id = BP_L_ID(bp) + i;
++ fp->sb_id = fp->cl_id;
++ DP(NETIF_MSG_IFUP,
++ "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
++ bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
++ bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
++ FP_SB_ID(fp));
++ bnx2x_update_fpsb_idx(fp);
++ }
++
++ bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
++ DEF_SB_ID);
++ bnx2x_update_dsb_idx(bp);
++ bnx2x_update_coalesce(bp);
++ bnx2x_init_rx_rings(bp);
++ bnx2x_init_tx_ring(bp);
++ bnx2x_init_sp_ring(bp);
++ bnx2x_init_context(bp);
++ bnx2x_init_internal(bp, load_code);
++ bnx2x_init_ind_table(bp);
++ bnx2x_stats_init(bp);
++
++ /* At this point, we are ready for interrupts */
++ atomic_set(&bp->intr_sem, 0);
++
++ /* flush all before enabling interrupts */
++ mb();
++ mmiowb();
++
++ bnx2x_int_enable(bp);
++}
++
++/* end of nic init */
++
++/*
++ * gzip service functions
++ */
++
++static int bnx2x_gunzip_init(struct bnx2x *bp)
++{
++ bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
++ &bp->gunzip_mapping);
++ if (bp->gunzip_buf == NULL)
++ goto gunzip_nomem1;
++
++ bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
++ if (bp->strm == NULL)
++ goto gunzip_nomem2;
++
++ bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
++ GFP_KERNEL);
++ if (bp->strm->workspace == NULL)
++ goto gunzip_nomem3;
++
++ return 0;
++
++gunzip_nomem3:
++ kfree(bp->strm);
++ bp->strm = NULL;
++
++gunzip_nomem2:
++ pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
++ bp->gunzip_mapping);
++ bp->gunzip_buf = NULL;
++
++gunzip_nomem1:
++ printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
++ " un-compression\n", bp->dev->name);
++ return -ENOMEM;
++}
++
++static void bnx2x_gunzip_end(struct bnx2x *bp)
++{
++ kfree(bp->strm->workspace);
++
++ kfree(bp->strm);
++ bp->strm = NULL;
++
++ if (bp->gunzip_buf) {
++ pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
++ bp->gunzip_mapping);
++ bp->gunzip_buf = NULL;
++ }
++}
++
++static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
++{
++ int n, rc;
++
++ /* check gzip header */
++ if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
++ return -EINVAL;
++
++ n = 10;
++
++#define FNAME 0x8
++
++ if (zbuf[3] & FNAME)
++ while ((zbuf[n++] != 0) && (n < len));
++
++ bp->strm->next_in = zbuf + n;
++ bp->strm->avail_in = len - n;
++ bp->strm->next_out = bp->gunzip_buf;
++ bp->strm->avail_out = FW_BUF_SIZE;
++
++ rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
++ if (rc != Z_OK)
++ return rc;
++
++ rc = zlib_inflate(bp->strm, Z_FINISH);
++ if ((rc != Z_OK) && (rc != Z_STREAM_END))
++ printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
++ bp->dev->name, bp->strm->msg);
++
++ bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
++ if (bp->gunzip_outlen & 0x3)
++ printk(KERN_ERR PFX "%s: Firmware decompression error:"
++ " gunzip_outlen (%d) not aligned\n",
++ bp->dev->name, bp->gunzip_outlen);
++ bp->gunzip_outlen >>= 2;
++
++ zlib_inflateEnd(bp->strm);
++
++ if (rc == Z_STREAM_END)
++ return 0;
++
++ return rc;
++}
++
++/* nic load/unload */
++
++/*
++ * General service functions
++ */
++
++/* send a NIG loopback debug packet */
++static void bnx2x_lb_pckt(struct bnx2x *bp)
++{
++ u32 wb_write[3];
++
++ /* Ethernet source and destination addresses */
++ wb_write[0] = 0x55555555;
++ wb_write[1] = 0x55555555;
++ wb_write[2] = 0x20; /* SOP */
++ REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
++
++ /* NON-IP protocol */
++ wb_write[0] = 0x09000000;
++ wb_write[1] = 0x55555555;
++ wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
++ REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
++}
++
++/* some of the internal memories
++ * are not directly readable from the driver
++ * to test them we send debug packets
++ */
++static int bnx2x_int_mem_test(struct bnx2x *bp)
++{
++ int factor;
++ int count, i;
++ u32 val = 0;
++
++ if (CHIP_REV_IS_FPGA(bp))
++ factor = 120;
++ else if (CHIP_REV_IS_EMUL(bp))
++ factor = 200;
++ else
++ factor = 1;
++
++ DP(NETIF_MSG_HW, "start part1\n");
++
++ /* Disable inputs of parser neighbor blocks */
++ REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
++ REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
++ REG_WR(bp, CFC_REG_DEBUG0, 0x1);
++ REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
++
++ /* Write 0 to parser credits for CFC search request */
++ REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
++
++ /* send Ethernet packet */
++ bnx2x_lb_pckt(bp);
++
++ /* TODO do i reset NIG statistic? */
++ /* Wait until NIG register shows 1 packet of size 0x10 */
++ count = 1000 * factor;
++ while (count) {
++
++ bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
++ val = *bnx2x_sp(bp, wb_data[0]);
++ if (val == 0x10)
++ break;
++
++ msleep(10);
++ count--;
++ }
++ if (val != 0x10) {
++ BNX2X_ERR("NIG timeout val = 0x%x\n", val);
++ return -1;
++ }
++
++ /* Wait until PRS register shows 1 packet */
++ count = 1000 * factor;
++ while (count) {
++ val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
++ if (val == 1)
++ break;
++
++ msleep(10);
++ count--;
++ }
++ if (val != 0x1) {
++ BNX2X_ERR("PRS timeout val = 0x%x\n", val);
++ return -2;
++ }
++
++ /* Reset and init BRB, PRS */
++ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
++ msleep(50);
++ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
++ msleep(50);
++ bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
++ bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
++
++ DP(NETIF_MSG_HW, "part2\n");
++
++ /* Disable inputs of parser neighbor blocks */
++ REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
++ REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
++ REG_WR(bp, CFC_REG_DEBUG0, 0x1);
++ REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
++
++ /* Write 0 to parser credits for CFC search request */
++ REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
++
++ /* send 10 Ethernet packets */
++ for (i = 0; i < 10; i++)
++ bnx2x_lb_pckt(bp);
++
++ /* Wait until NIG register shows 10 + 1
++ packets of size 11*0x10 = 0xb0 */
++ count = 1000 * factor;
++ while (count) {
++
++ bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
++ val = *bnx2x_sp(bp, wb_data[0]);
++ if (val == 0xb0)
++ break;
++
++ msleep(10);
++ count--;
++ }
++ if (val != 0xb0) {
++ BNX2X_ERR("NIG timeout val = 0x%x\n", val);
++ return -3;
++ }
++
++ /* Wait until PRS register shows 2 packets */
++ val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
++ if (val != 2)
++ BNX2X_ERR("PRS timeout val = 0x%x\n", val);
++
++ /* Write 1 to parser credits for CFC search request */
++ REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
++
++ /* Wait until PRS register shows 3 packets */
++ msleep(10 * factor);
++ /* Wait until NIG register shows 1 packet of size 0x10 */
++ val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
++ if (val != 3)
++ BNX2X_ERR("PRS timeout val = 0x%x\n", val);
++
++ /* clear NIG EOP FIFO */
++ for (i = 0; i < 11; i++)
++ REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
++ val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
++ if (val != 1) {
++ BNX2X_ERR("clear of NIG failed\n");
++ return -4;
++ }
++
++ /* Reset and init BRB, PRS, NIG */
++ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
++ msleep(50);
++ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
++ msleep(50);
++ bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
++ bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
++#ifndef BCM_ISCSI
++ /* set NIC mode */
++ REG_WR(bp, PRS_REG_NIC_MODE, 1);
++#endif
++
++ /* Enable inputs of parser neighbor blocks */
++ REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
++ REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
++ REG_WR(bp, CFC_REG_DEBUG0, 0x0);
++ REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
++
++ DP(NETIF_MSG_HW, "done\n");
++
++ return 0; /* OK */
++}
++
++static void enable_blocks_attention(struct bnx2x *bp)
++{
++ REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
++ REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
++ REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
++ REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
++ REG_WR(bp, QM_REG_QM_INT_MASK, 0);
++ REG_WR(bp, TM_REG_TM_INT_MASK, 0);
++ REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
++ REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
++ REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
++/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
++/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
++ REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
++ REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
++ REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
++/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
++/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
++ REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
++ REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
++ REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
++ REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
++/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
++/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
++ if (CHIP_REV_IS_FPGA(bp))
++ REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
++ else
++ REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
++ REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
++ REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
++ REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
++/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
++/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
++ REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
++ REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
++/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
++ REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
++}
++
++
++static void bnx2x_reset_common(struct bnx2x *bp)
++{
++ /* reset_common */
++ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
++ 0xd3ffff7f);
++ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
++}
++
++static int bnx2x_init_common(struct bnx2x *bp)
++{
++ u32 val, i;
++
++ DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
++
++ bnx2x_reset_common(bp);
++ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
++ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
++
++ bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
++ if (CHIP_IS_E1H(bp))
++ REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
++
++ REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
++ msleep(30);
++ REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
++
++ bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
++ if (CHIP_IS_E1(bp)) {
++ /* enable HW interrupt from PXP on USDM overflow
++ bit 16 on INT_MASK_0 */
++ REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
++ }
++
++ bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
++ bnx2x_init_pxp(bp);
++
++#ifdef __BIG_ENDIAN
++ REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
++ REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
++ REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
++ REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
++ REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
++
++/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
++ REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
++ REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
++ REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
++ REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
++#endif
++
++ REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
++#ifdef BCM_ISCSI
++ REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
++ REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
++ REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
++#endif
++
++ if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
++ REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
++
++ /* let the HW do it's magic ... */
++ msleep(100);
++ /* finish PXP init */
++ val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
++ if (val != 1) {
++ BNX2X_ERR("PXP2 CFG failed\n");
++ return -EBUSY;
++ }
++ val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
++ if (val != 1) {
++ BNX2X_ERR("PXP2 RD_INIT failed\n");
++ return -EBUSY;
++ }
++
++ REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
++ REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
++
++ bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
++
++ /* clean the DMAE memory */
++ bp->dmae_ready = 1;
++ bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
++
++ bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
++ bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
++ bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
++ bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
++
++ bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
++ bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
++ bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
++ bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
++
++ bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
++ /* soft reset pulse */
++ REG_WR(bp, QM_REG_SOFT_RESET, 1);
++ REG_WR(bp, QM_REG_SOFT_RESET, 0);
++
++#ifdef BCM_ISCSI
++ bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
++#endif
++
++ bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
++ REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
++ if (!CHIP_REV_IS_SLOW(bp)) {
++ /* enable hw interrupt from doorbell Q */
++ REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
++ }
++
++ bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
++ if (CHIP_REV_IS_SLOW(bp)) {
++ /* fix for emulation and FPGA for no pause */
++ REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
++ REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
++ REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
++ REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
++ }
++
++ bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
++ REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
++ /* set NIC mode */
++ REG_WR(bp, PRS_REG_NIC_MODE, 1);
++ if (CHIP_IS_E1H(bp))
++ REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
++
++ bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
++ bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
++ bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
++ bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
++
++ if (CHIP_IS_E1H(bp)) {
++ bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
++ STORM_INTMEM_SIZE_E1H/2);
++ bnx2x_init_fill(bp,
++ TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
++ 0, STORM_INTMEM_SIZE_E1H/2);
++ bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
++ STORM_INTMEM_SIZE_E1H/2);
++ bnx2x_init_fill(bp,
++ CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
++ 0, STORM_INTMEM_SIZE_E1H/2);
++ bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
++ STORM_INTMEM_SIZE_E1H/2);
++ bnx2x_init_fill(bp,
++ XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
++ 0, STORM_INTMEM_SIZE_E1H/2);
++ bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
++ STORM_INTMEM_SIZE_E1H/2);
++ bnx2x_init_fill(bp,
++ USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
++ 0, STORM_INTMEM_SIZE_E1H/2);
++ } else { /* E1 */
++ bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
++ STORM_INTMEM_SIZE_E1);
++ bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
++ STORM_INTMEM_SIZE_E1);
++ bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
++ STORM_INTMEM_SIZE_E1);
++ bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
++ STORM_INTMEM_SIZE_E1);
++ }
++
++ bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
++ bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
++ bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
++ bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
++
++ /* sync semi rtc */
++ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
++ 0x80000000);
++ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
++ 0x80000000);
++
++ bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
++ bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
++ bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
++
++ REG_WR(bp, SRC_REG_SOFT_RST, 1);
++ for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
++ REG_WR(bp, i, 0xc0cac01a);
++ /* TODO: replace with something meaningful */
++ }
++ bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
++ REG_WR(bp, SRC_REG_SOFT_RST, 0);
++
++ if (sizeof(union cdu_context) != 1024)
++ /* we currently assume that a context is 1024 bytes */
++ printk(KERN_ALERT PFX "please adjust the size of"
++ " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
++
++ bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
++ val = (4 << 24) + (0 << 12) + 1024;
++ REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
++ if (CHIP_IS_E1(bp)) {
++ /* !!! fix pxp client crdit until excel update */
++ REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
++ REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
++ }
++
++ bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
++ REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
++ /* enable context validation interrupt from CFC */
++ REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
++
++ /* set the thresholds to prevent CFC/CDU race */
++ REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
++
++ bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
++ bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
++
++ /* PXPCS COMMON comes here */
++ /* Reset PCIE errors for debug */
++ REG_WR(bp, 0x2814, 0xffffffff);
++ REG_WR(bp, 0x3820, 0xffffffff);
++
++ /* EMAC0 COMMON comes here */
++ /* EMAC1 COMMON comes here */
++ /* DBU COMMON comes here */
++ /* DBG COMMON comes here */
++
++ bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
++ if (CHIP_IS_E1H(bp)) {
++ REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
++ REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
++ }
++
++ if (CHIP_REV_IS_SLOW(bp))
++ msleep(200);
++
++ /* finish CFC init */
++ val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
++ if (val != 1) {
++ BNX2X_ERR("CFC LL_INIT failed\n");
++ return -EBUSY;
++ }
++ val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
++ if (val != 1) {
++ BNX2X_ERR("CFC AC_INIT failed\n");
++ return -EBUSY;
++ }
++ val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
++ if (val != 1) {
++ BNX2X_ERR("CFC CAM_INIT failed\n");
++ return -EBUSY;
++ }
++ REG_WR(bp, CFC_REG_DEBUG0, 0);
++
++ /* read NIG statistic
++ to see if this is our first up since powerup */
++ bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
++ val = *bnx2x_sp(bp, wb_data[0]);
++
++ /* do internal memory self test */
++ if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
++ BNX2X_ERR("internal mem self test failed\n");
++ return -EBUSY;
++ }
++
++ switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
++ case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
++ case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
++ /* Fan failure is indicated by SPIO 5 */
++ bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
++ MISC_REGISTERS_SPIO_INPUT_HI_Z);
++
++ /* set to active low mode */
++ val = REG_RD(bp, MISC_REG_SPIO_INT);
++ val |= ((1 << MISC_REGISTERS_SPIO_5) <<
++ MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
++ REG_WR(bp, MISC_REG_SPIO_INT, val);
++
++ /* enable interrupt to signal the IGU */
++ val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
++ val |= (1 << MISC_REGISTERS_SPIO_5);
++ REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
++ break;
++
++ default:
++ break;
++ }
++
++ /* clear PXP2 attentions */
++ REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
++
++ enable_blocks_attention(bp);
++
++ if (!BP_NOMCP(bp)) {
++ bnx2x_acquire_phy_lock(bp);
++ bnx2x_common_init_phy(bp, bp->common.shmem_base);
++ bnx2x_release_phy_lock(bp);
++ } else
++ BNX2X_ERR("Bootcode is missing - can not initialize link\n");
++
++ return 0;
++}
++
++static int bnx2x_init_port(struct bnx2x *bp)
++{
++ int port = BP_PORT(bp);
++ u32 val;
++
++ DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
++
++ REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
++
++ /* Port PXP comes here */
++ /* Port PXP2 comes here */
++#ifdef BCM_ISCSI
++ /* Port0 1
++ * Port1 385 */
++ i++;
++ wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
++ wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
++ REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
++ REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
++
++ /* Port0 2
++ * Port1 386 */
++ i++;
++ wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
++ wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
++ REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
++ REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
++
++ /* Port0 3
++ * Port1 387 */
++ i++;
++ wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
++ wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
++ REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
++ REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
++#endif
++ /* Port CMs come here */
++ bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
++ (port ? XCM_PORT1_END : XCM_PORT0_END));
++
++ /* Port QM comes here */
++#ifdef BCM_ISCSI
++ REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
++ REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
++
++ bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
++ func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
++#endif
++ /* Port DQ comes here */
++ /* Port BRB1 comes here */
++ /* Port PRS comes here */
++ /* Port TSDM comes here */
++ /* Port CSDM comes here */
++ /* Port USDM comes here */
++ /* Port XSDM comes here */
++ bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
++ port ? TSEM_PORT1_END : TSEM_PORT0_END);
++ bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
++ port ? USEM_PORT1_END : USEM_PORT0_END);
++ bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
++ port ? CSEM_PORT1_END : CSEM_PORT0_END);
++ bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
++ port ? XSEM_PORT1_END : XSEM_PORT0_END);
++ /* Port UPB comes here */
++ /* Port XPB comes here */
++
++ bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
++ port ? PBF_PORT1_END : PBF_PORT0_END);
++
++ /* configure PBF to work without PAUSE mtu 9000 */
++ REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
++
++ /* update threshold */
++ REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
++ /* update init credit */
++ REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
++
++ /* probe changes */
++ REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
++ msleep(5);
++ REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
++
++#ifdef BCM_ISCSI
++ /* tell the searcher where the T2 table is */
++ REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
++
++ wb_write[0] = U64_LO(bp->t2_mapping);
++ wb_write[1] = U64_HI(bp->t2_mapping);
++ REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
++ wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
++ wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
++ REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
++
++ REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
++ /* Port SRCH comes here */
++#endif
++ /* Port CDU comes here */
++ /* Port CFC comes here */
++
++ if (CHIP_IS_E1(bp)) {
++ REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
++ REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
++ }
++ bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
++ port ? HC_PORT1_END : HC_PORT0_END);
++
++ bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
++ MISC_AEU_PORT0_START,
++ port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
++ /* init aeu_mask_attn_func_0/1:
++ * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
++ * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
++ * bits 4-7 are used for "per vn group attention" */
++ REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
++ (IS_E1HMF(bp) ? 0xF7 : 0x7));
++
++ /* Port PXPCS comes here */
++ /* Port EMAC0 comes here */
++ /* Port EMAC1 comes here */
++ /* Port DBU comes here */
++ /* Port DBG comes here */
++ bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
++ port ? NIG_PORT1_END : NIG_PORT0_END);
++
++ REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
++
++ if (CHIP_IS_E1H(bp)) {
++ u32 wsum;
++ struct cmng_struct_per_port m_cmng_port;
++ int vn;
++
++ /* 0x2 disable e1hov, 0x1 enable */
++ REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
++ (IS_E1HMF(bp) ? 0x1 : 0x2));
++
++ /* Init RATE SHAPING and FAIRNESS contexts.
++ Initialize as if there is 10G link. */
++ wsum = bnx2x_calc_vn_wsum(bp);
++ bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
++ if (IS_E1HMF(bp))
++ for (vn = VN_0; vn < E1HVN_MAX; vn++)
++ bnx2x_init_vn_minmax(bp, 2*vn + port,
++ wsum, 10000, &m_cmng_port);
++ }
++
++ /* Port MCP comes here */
++ /* Port DMAE comes here */
++
++ switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
++ case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
++ case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
++ /* add SPIO 5 to group 0 */
++ val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
++ val |= AEU_INPUTS_ATTN_BITS_SPIO5;
++ REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
++ break;
++
++ default:
++ break;
++ }
++
++ bnx2x__link_reset(bp);
++
++ return 0;
++}
++
++#define ILT_PER_FUNC (768/2)
++#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
++/* the phys address is shifted right 12 bits and has an added
++ 1=valid bit added to the 53rd bit
++ then since this is a wide register(TM)
++ we split it into two 32 bit writes
++ */
++#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
++#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
++#define PXP_ONE_ILT(x) (((x) << 10) | x)
++#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
++
++#define CNIC_ILT_LINES 0
++
++static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
++{
++ int reg;
++
++ if (CHIP_IS_E1H(bp))
++ reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
++ else /* E1 */
++ reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
++
++ bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
++}
++
++static int bnx2x_init_func(struct bnx2x *bp)
++{
++ int port = BP_PORT(bp);
++ int func = BP_FUNC(bp);
++ int i;
++
++ DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
++
++ i = FUNC_ILT_BASE(func);
++
++ bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
++ if (CHIP_IS_E1H(bp)) {
++ REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
++ REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
++ } else /* E1 */
++ REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
++ PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
++
++
++ if (CHIP_IS_E1H(bp)) {
++ for (i = 0; i < 9; i++)
++ bnx2x_init_block(bp,
++ cm_start[func][i], cm_end[func][i]);
++
++ REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
++ REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
++ }
++
++ /* HC init per function */
++ if (CHIP_IS_E1H(bp)) {
++ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
++
++ REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
++ REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
++ }
++ bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
++
++ /* Reset PCIE errors for debug */
++ REG_WR(bp, 0x2114, 0xffffffff);
++ REG_WR(bp, 0x2120, 0xffffffff);
++
++ return 0;
++}
++
++static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
++{
++ int i, rc = 0;
++
++ DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
++ BP_FUNC(bp), load_code);
++
++ bp->dmae_ready = 0;
++ mutex_init(&bp->dmae_mutex);
++ bnx2x_gunzip_init(bp);
++
++ switch (load_code) {
++ case FW_MSG_CODE_DRV_LOAD_COMMON:
++ rc = bnx2x_init_common(bp);
++ if (rc)
++ goto init_hw_err;
++ /* no break */
++
++ case FW_MSG_CODE_DRV_LOAD_PORT:
++ bp->dmae_ready = 1;
++ rc = bnx2x_init_port(bp);
++ if (rc)
++ goto init_hw_err;
++ /* no break */
++
++ case FW_MSG_CODE_DRV_LOAD_FUNCTION:
++ bp->dmae_ready = 1;
++ rc = bnx2x_init_func(bp);
++ if (rc)
++ goto init_hw_err;
++ break;
++
++ default:
++ BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
++ break;
++ }
++
++ if (!BP_NOMCP(bp)) {
++ int func = BP_FUNC(bp);
++
++ bp->fw_drv_pulse_wr_seq =
++ (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
++ DRV_PULSE_SEQ_MASK);
++ bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
++ DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
++ bp->fw_drv_pulse_wr_seq, bp->func_stx);
++ } else
++ bp->func_stx = 0;
++
++ /* this needs to be done before gunzip end */
++ bnx2x_zero_def_sb(bp);
++ for_each_queue(bp, i)
++ bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
++
++init_hw_err:
++ bnx2x_gunzip_end(bp);
++
++ return rc;
++}
++
++/* send the MCP a request, block until there is a reply */
++static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
++{
++ int func = BP_FUNC(bp);
++ u32 seq = ++bp->fw_seq;
++ u32 rc = 0;
++ u32 cnt = 1;
++ u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
++
++ SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
++ DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
++
++ do {
++ /* let the FW do it's magic ... */
++ msleep(delay);
++
++ rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
++
++ /* Give the FW up to 2 second (200*10ms) */
++ } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
++
++ DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
++ cnt*delay, rc, seq);
++
++ /* is this a reply to our command? */
++ if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
++ rc &= FW_MSG_CODE_MASK;
++
++ } else {
++ /* FW BUG! */
++ BNX2X_ERR("FW failed to respond!\n");
++ bnx2x_fw_dump(bp);
++ rc = 0;
++ }
++
++ return rc;
++}
++
++static void bnx2x_free_mem(struct bnx2x *bp)
++{
++
++#define BNX2X_PCI_FREE(x, y, size) \
++ do { \
++ if (x) { \
++ pci_free_consistent(bp->pdev, size, x, y); \
++ x = NULL; \
++ y = 0; \
++ } \
++ } while (0)
++
++#define BNX2X_FREE(x) \
++ do { \
++ if (x) { \
++ vfree(x); \
++ x = NULL; \
++ } \
++ } while (0)
++
++ int i;
++
++ /* fastpath */
++ for_each_queue(bp, i) {
++
++ /* Status blocks */
++ BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
++ bnx2x_fp(bp, i, status_blk_mapping),
++ sizeof(struct host_status_block) +
++ sizeof(struct eth_tx_db_data));
++
++ /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
++ BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
++ BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
++ bnx2x_fp(bp, i, tx_desc_mapping),
++ sizeof(struct eth_tx_bd) * NUM_TX_BD);
++
++ BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
++ BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
++ bnx2x_fp(bp, i, rx_desc_mapping),
++ sizeof(struct eth_rx_bd) * NUM_RX_BD);
++
++ BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
++ bnx2x_fp(bp, i, rx_comp_mapping),
++ sizeof(struct eth_fast_path_rx_cqe) *
++ NUM_RCQ_BD);
++
++ /* SGE ring */
++ BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
++ BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
++ bnx2x_fp(bp, i, rx_sge_mapping),
++ BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
++ }
++ /* end of fastpath */
++
++ BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
++ sizeof(struct host_def_status_block));
++
++ BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
++ sizeof(struct bnx2x_slowpath));
++
++#ifdef BCM_ISCSI
++ BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
++ BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
++ BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
++ BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
++#endif
++ BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
++
++#undef BNX2X_PCI_FREE
++#undef BNX2X_KFREE
++}
++
++static int bnx2x_alloc_mem(struct bnx2x *bp)
++{
++
++#define BNX2X_PCI_ALLOC(x, y, size) \
++ do { \
++ x = pci_alloc_consistent(bp->pdev, size, y); \
++ if (x == NULL) \
++ goto alloc_mem_err; \
++ memset(x, 0, size); \
++ } while (0)
++
++#define BNX2X_ALLOC(x, size) \
++ do { \
++ x = vmalloc(size); \
++ if (x == NULL) \
++ goto alloc_mem_err; \
++ memset(x, 0, size); \
++ } while (0)
++
++ int i;
++
++ /* fastpath */
++ for_each_queue(bp, i) {
++ bnx2x_fp(bp, i, bp) = bp;
++
++ /* Status blocks */
++ BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
++ &bnx2x_fp(bp, i, status_blk_mapping),
++ sizeof(struct host_status_block) +
++ sizeof(struct eth_tx_db_data));
++
++ bnx2x_fp(bp, i, hw_tx_prods) =
++ (void *)(bnx2x_fp(bp, i, status_blk) + 1);
++
++ bnx2x_fp(bp, i, tx_prods_mapping) =
++ bnx2x_fp(bp, i, status_blk_mapping) +
++ sizeof(struct host_status_block);
++
++ /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
++ BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
++ sizeof(struct sw_tx_bd) * NUM_TX_BD);
++ BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
++ &bnx2x_fp(bp, i, tx_desc_mapping),
++ sizeof(struct eth_tx_bd) * NUM_TX_BD);
++
++ BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
++ sizeof(struct sw_rx_bd) * NUM_RX_BD);
++ BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
++ &bnx2x_fp(bp, i, rx_desc_mapping),
++ sizeof(struct eth_rx_bd) * NUM_RX_BD);
++
++ BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
++ &bnx2x_fp(bp, i, rx_comp_mapping),
++ sizeof(struct eth_fast_path_rx_cqe) *
++ NUM_RCQ_BD);
++
++ /* SGE ring */
++ BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
++ sizeof(struct sw_rx_page) * NUM_RX_SGE);
++ BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
++ &bnx2x_fp(bp, i, rx_sge_mapping),
++ BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
++ }
++ /* end of fastpath */
++
++ BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
++ sizeof(struct host_def_status_block));
++
++ BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
++ sizeof(struct bnx2x_slowpath));
++
++#ifdef BCM_ISCSI
++ BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
++
++ /* Initialize T1 */
++ for (i = 0; i < 64*1024; i += 64) {
++ *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
++ *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
++ }
++
++ /* allocate searcher T2 table
++ we allocate 1/4 of alloc num for T2
++ (which is not entered into the ILT) */
++ BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
++
++ /* Initialize T2 */
++ for (i = 0; i < 16*1024; i += 64)
++ * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
++
++ /* now fixup the last line in the block to point to the next block */
++ *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
++
++ /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
++ BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
++
++ /* QM queues (128*MAX_CONN) */
++ BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
++#endif
++
++ /* Slow path ring */
++ BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
++
++ return 0;
++
++alloc_mem_err:
++ bnx2x_free_mem(bp);
++ return -ENOMEM;
++
++#undef BNX2X_PCI_ALLOC
++#undef BNX2X_ALLOC
++}
++
++static void bnx2x_free_tx_skbs(struct bnx2x *bp)
++{
++ int i;
++
++ for_each_queue(bp, i) {
++ struct bnx2x_fastpath *fp = &bp->fp[i];
++
++ u16 bd_cons = fp->tx_bd_cons;
++ u16 sw_prod = fp->tx_pkt_prod;
++ u16 sw_cons = fp->tx_pkt_cons;
++
++ while (sw_cons != sw_prod) {
++ bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
++ sw_cons++;
++ }
++ }
++}
++
++static void bnx2x_free_rx_skbs(struct bnx2x *bp)
++{
++ int i, j;
++
++ for_each_queue(bp, j) {
++ struct bnx2x_fastpath *fp = &bp->fp[j];
++
++ for (i = 0; i < NUM_RX_BD; i++) {
++ struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
++ struct sk_buff *skb = rx_buf->skb;
++
++ if (skb == NULL)
++ continue;
++
++ pci_unmap_single(bp->pdev,
++ pci_unmap_addr(rx_buf, mapping),
++ bp->rx_buf_size,
++ PCI_DMA_FROMDEVICE);
++
++ rx_buf->skb = NULL;
++ dev_kfree_skb(skb);
++ }
++ if (!fp->disable_tpa)
++ bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
++ ETH_MAX_AGGREGATION_QUEUES_E1 :
++ ETH_MAX_AGGREGATION_QUEUES_E1H);
++ }
++}
++
++static void bnx2x_free_skbs(struct bnx2x *bp)
++{
++ bnx2x_free_tx_skbs(bp);
++ bnx2x_free_rx_skbs(bp);
++}
++
++static void bnx2x_free_msix_irqs(struct bnx2x *bp)
++{
++ int i, offset = 1;
++
++ free_irq(bp->msix_table[0].vector, bp->dev);
++ DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
++ bp->msix_table[0].vector);
++
++ for_each_queue(bp, i) {
++ DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
++ "state %x\n", i, bp->msix_table[i + offset].vector,
++ bnx2x_fp(bp, i, state));
++
++ if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
++ BNX2X_ERR("IRQ of fp #%d being freed while "
++ "state != closed\n", i);
++
++ free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
++ }
++}
++
++static void bnx2x_free_irq(struct bnx2x *bp)
++{
++ if (bp->flags & USING_MSIX_FLAG) {
++ bnx2x_free_msix_irqs(bp);
++ pci_disable_msix(bp->pdev);
++ bp->flags &= ~USING_MSIX_FLAG;
++
++ } else
++ free_irq(bp->pdev->irq, bp->dev);
++}
++
++static int bnx2x_enable_msix(struct bnx2x *bp)
++{
++ int i, rc, offset;
++
++ bp->msix_table[0].entry = 0;
++ offset = 1;
++ DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
++
++ for_each_queue(bp, i) {
++ int igu_vec = offset + i + BP_L_ID(bp);
++
++ bp->msix_table[i + offset].entry = igu_vec;
++ DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
++ "(fastpath #%u)\n", i + offset, igu_vec, i);
++ }
++
++ rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
++ bp->num_queues + offset);
++ if (rc) {
++ DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
++ return -1;
++ }
++ bp->flags |= USING_MSIX_FLAG;
++
++ return 0;
++}
++
++static int bnx2x_req_msix_irqs(struct bnx2x *bp)
++{
++ int i, rc, offset = 1;
++
++ rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
++ bp->dev->name, bp->dev);
++ if (rc) {
++ BNX2X_ERR("request sp irq failed\n");
++ return -EBUSY;
++ }
++
++ for_each_queue(bp, i) {
++ rc = request_irq(bp->msix_table[i + offset].vector,
++ bnx2x_msix_fp_int, 0,
++ bp->dev->name, &bp->fp[i]);
++ if (rc) {
++ BNX2X_ERR("request fp #%d irq failed rc -%d\n",
++ i + offset, -rc);
++ bnx2x_free_msix_irqs(bp);
++ return -EBUSY;
++ }
++
++ bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
++ }
++
++ return 0;
++}
++
++static int bnx2x_req_irq(struct bnx2x *bp)
++{
++ int rc;
++
++ rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
++ bp->dev->name, bp->dev);
++ if (!rc)
++ bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
++
++ return rc;
++}
++
++static void bnx2x_napi_enable(struct bnx2x *bp)
++{
++ int i;
++
++ for_each_queue(bp, i)
++ napi_enable(&bnx2x_fp(bp, i, napi));
++}
++
++static void bnx2x_napi_disable(struct bnx2x *bp)
++{
++ int i;
++
++ for_each_queue(bp, i)
++ napi_disable(&bnx2x_fp(bp, i, napi));
++}
++
++static void bnx2x_netif_start(struct bnx2x *bp)
++{
++ if (atomic_dec_and_test(&bp->intr_sem)) {
++ if (netif_running(bp->dev)) {
++ if (bp->state == BNX2X_STATE_OPEN)
++ netif_wake_queue(bp->dev);
++ bnx2x_napi_enable(bp);
++ bnx2x_int_enable(bp);
++ }
++ }
++}
++
++static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
++{
++ bnx2x_int_disable_sync(bp, disable_hw);
++ bnx2x_napi_disable(bp);
++ if (netif_running(bp->dev)) {
++ netif_tx_disable(bp->dev);
++ bp->dev->trans_start = jiffies; /* prevent tx timeout */
++ }
++}
++
++/*
++ * Init service functions
++ */
++
++static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
++{
++ struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
++ int port = BP_PORT(bp);
++
++ /* CAM allocation
++ * unicasts 0-31:port0 32-63:port1
++ * multicast 64-127:port0 128-191:port1
++ */
++ config->hdr.length = 2;
++ config->hdr.offset = port ? 32 : 0;
++ config->hdr.client_id = BP_CL_ID(bp);
++ config->hdr.reserved1 = 0;
++
++ /* primary MAC */
++ config->config_table[0].cam_entry.msb_mac_addr =
++ swab16(*(u16 *)&bp->dev->dev_addr[0]);
++ config->config_table[0].cam_entry.middle_mac_addr =
++ swab16(*(u16 *)&bp->dev->dev_addr[2]);
++ config->config_table[0].cam_entry.lsb_mac_addr =
++ swab16(*(u16 *)&bp->dev->dev_addr[4]);
++ config->config_table[0].cam_entry.flags = cpu_to_le16(port);
++ if (set)
++ config->config_table[0].target_table_entry.flags = 0;
++ else
++ CAM_INVALIDATE(config->config_table[0]);
++ config->config_table[0].target_table_entry.client_id = 0;
++ config->config_table[0].target_table_entry.vlan_id = 0;
++
++ DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
++ (set ? "setting" : "clearing"),
++ config->config_table[0].cam_entry.msb_mac_addr,
++ config->config_table[0].cam_entry.middle_mac_addr,
++ config->config_table[0].cam_entry.lsb_mac_addr);
++
++ /* broadcast */
++ config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
++ config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
++ config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
++ config->config_table[1].cam_entry.flags = cpu_to_le16(port);
++ if (set)
++ config->config_table[1].target_table_entry.flags =
++ TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
++ else
++ CAM_INVALIDATE(config->config_table[1]);
++ config->config_table[1].target_table_entry.client_id = 0;
++ config->config_table[1].target_table_entry.vlan_id = 0;
++
++ bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
++ U64_HI(bnx2x_sp_mapping(bp, mac_config)),
++ U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
++}
++
++static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
++{
++ struct mac_configuration_cmd_e1h *config =
++ (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
++
++ if (set && (bp->state != BNX2X_STATE_OPEN)) {
++ DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
++ return;
++ }
++
++ /* CAM allocation for E1H
++ * unicasts: by func number
++ * multicast: 20+FUNC*20, 20 each
++ */
++ config->hdr.length = 1;
++ config->hdr.offset = BP_FUNC(bp);
++ config->hdr.client_id = BP_CL_ID(bp);
++ config->hdr.reserved1 = 0;
++
++ /* primary MAC */
++ config->config_table[0].msb_mac_addr =
++ swab16(*(u16 *)&bp->dev->dev_addr[0]);
++ config->config_table[0].middle_mac_addr =
++ swab16(*(u16 *)&bp->dev->dev_addr[2]);
++ config->config_table[0].lsb_mac_addr =
++ swab16(*(u16 *)&bp->dev->dev_addr[4]);
++ config->config_table[0].client_id = BP_L_ID(bp);
++ config->config_table[0].vlan_id = 0;
++ config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
++ if (set)
++ config->config_table[0].flags = BP_PORT(bp);
++ else
++ config->config_table[0].flags =
++ MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
++
++ DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
++ (set ? "setting" : "clearing"),
++ config->config_table[0].msb_mac_addr,
++ config->config_table[0].middle_mac_addr,
++ config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
++
++ bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
++ U64_HI(bnx2x_sp_mapping(bp, mac_config)),
++ U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
++}
++
++static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
++ int *state_p, int poll)
++{
++ /* can take a while if any port is running */
++ int cnt = 500;
++
++ DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
++ poll ? "polling" : "waiting", state, idx);
++
++ might_sleep();
++ while (cnt--) {
++ if (poll) {
++ bnx2x_rx_int(bp->fp, 10);
++ /* if index is different from 0
++ * the reply for some commands will
++ * be on the non default queue
++ */
++ if (idx)
++ bnx2x_rx_int(&bp->fp[idx], 10);
++ }
++
++ mb(); /* state is changed by bnx2x_sp_event() */
++ if (*state_p == state)
++ return 0;
++
++ msleep(1);
++ }
++
++ /* timeout! */
++ BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
++ poll ? "polling" : "waiting", state, idx);
++#ifdef BNX2X_STOP_ON_ERROR
++ bnx2x_panic();
++#endif
++
++ return -EBUSY;
++}
++
++static int bnx2x_setup_leading(struct bnx2x *bp)
++{
++ int rc;
++
++ /* reset IGU state */
++ bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
++
++ /* SETUP ramrod */
++ bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
++
++ /* Wait for completion */
++ rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
++
++ return rc;
++}
++
++static int bnx2x_setup_multi(struct bnx2x *bp, int index)
++{
++ /* reset IGU state */
++ bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
++
++ /* SETUP ramrod */
++ bp->fp[index].state = BNX2X_FP_STATE_OPENING;
++ bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
++
++ /* Wait for completion */
++ return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
++ &(bp->fp[index].state), 0);
++}
++
++static int bnx2x_poll(struct napi_struct *napi, int budget);
++static void bnx2x_set_rx_mode(struct net_device *dev);
++
++/* must be called with rtnl_lock */
++static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
++{
++ u32 load_code;
++ int i, rc = 0;
++#ifdef BNX2X_STOP_ON_ERROR
++ if (unlikely(bp->panic))
++ return -EPERM;
++#endif
++
++ bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
++
++ if (use_inta) {
++ bp->num_queues = 1;
++
++ } else {
++ if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
++ /* user requested number */
++ bp->num_queues = use_multi;
++
++ else if (use_multi)
++ bp->num_queues = min_t(u32, num_online_cpus(),
++ BP_MAX_QUEUES(bp));
++ else
++ bp->num_queues = 1;
++
++ DP(NETIF_MSG_IFUP,
++ "set number of queues to %d\n", bp->num_queues);
++
++ /* if we can't use MSI-X we only need one fp,
++ * so try to enable MSI-X with the requested number of fp's
++ * and fallback to MSI or legacy INTx with one fp
++ */
++ rc = bnx2x_enable_msix(bp);
++ if (rc) {
++ /* failed to enable MSI-X */
++ bp->num_queues = 1;
++ if (use_multi)
++ BNX2X_ERR("Multi requested but failed"
++ " to enable MSI-X\n");
++ }
++ }
++
++ if (bnx2x_alloc_mem(bp))
++ return -ENOMEM;
++
++ for_each_queue(bp, i)
++ bnx2x_fp(bp, i, disable_tpa) =
++ ((bp->flags & TPA_ENABLE_FLAG) == 0);
++
++ for_each_queue(bp, i)
++ netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
++ bnx2x_poll, 128);
++
++#ifdef BNX2X_STOP_ON_ERROR
++ for_each_queue(bp, i) {
++ struct bnx2x_fastpath *fp = &bp->fp[i];
++
++ fp->poll_no_work = 0;
++ fp->poll_calls = 0;
++ fp->poll_max_calls = 0;
++ fp->poll_complete = 0;
++ fp->poll_exit = 0;
++ }
++#endif
++ bnx2x_napi_enable(bp);
++
++ if (bp->flags & USING_MSIX_FLAG) {
++ rc = bnx2x_req_msix_irqs(bp);
++ if (rc) {
++ pci_disable_msix(bp->pdev);
++ goto load_error1;
++ }
++ printk(KERN_INFO PFX "%s: using MSI-X\n", bp->dev->name);
++ } else {
++ bnx2x_ack_int(bp);
++ rc = bnx2x_req_irq(bp);
++ if (rc) {
++ BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
++ goto load_error1;
++ }
++ }
++
++ /* Send LOAD_REQUEST command to MCP
++ Returns the type of LOAD command:
++ if it is the first port to be initialized
++ common blocks should be initialized, otherwise - not
++ */
++ if (!BP_NOMCP(bp)) {
++ load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
++ if (!load_code) {
++ BNX2X_ERR("MCP response failure, aborting\n");
++ rc = -EBUSY;
++ goto load_error2;
++ }
++ if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
++ rc = -EBUSY; /* other port in diagnostic mode */
++ goto load_error2;
++ }
++
++ } else {
++ int port = BP_PORT(bp);
++
++ DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
++ load_count[0], load_count[1], load_count[2]);
++ load_count[0]++;
++ load_count[1 + port]++;
++ DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
++ load_count[0], load_count[1], load_count[2]);
++ if (load_count[0] == 1)
++ load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
++ else if (load_count[1 + port] == 1)
++ load_code = FW_MSG_CODE_DRV_LOAD_PORT;
++ else
++ load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
++ }
++
++ if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
++ (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
++ bp->port.pmf = 1;
++ else
++ bp->port.pmf = 0;
++ DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
++
++ /* Initialize HW */
++ rc = bnx2x_init_hw(bp, load_code);
++ if (rc) {
++ BNX2X_ERR("HW init failed, aborting\n");
++ goto load_error2;
++ }
++
++ /* Setup NIC internals and enable interrupts */
++ bnx2x_nic_init(bp, load_code);
++
++ /* Send LOAD_DONE command to MCP */
++ if (!BP_NOMCP(bp)) {
++ load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
++ if (!load_code) {
++ BNX2X_ERR("MCP response failure, aborting\n");
++ rc = -EBUSY;
++ goto load_error3;
++ }
++ }
++
++ bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
++
++ rc = bnx2x_setup_leading(bp);
++ if (rc) {
++ BNX2X_ERR("Setup leading failed!\n");
++ goto load_error3;
++ }
++
++ if (CHIP_IS_E1H(bp))
++ if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
++ BNX2X_ERR("!!! mf_cfg function disabled\n");
++ bp->state = BNX2X_STATE_DISABLED;
++ }
++
++ if (bp->state == BNX2X_STATE_OPEN)
++ for_each_nondefault_queue(bp, i) {
++ rc = bnx2x_setup_multi(bp, i);
++ if (rc)
++ goto load_error3;
++ }
++
++ if (CHIP_IS_E1(bp))
++ bnx2x_set_mac_addr_e1(bp, 1);
++ else
++ bnx2x_set_mac_addr_e1h(bp, 1);
++
++ if (bp->port.pmf)
++ bnx2x_initial_phy_init(bp);
++
++ /* Start fast path */
++ switch (load_mode) {
++ case LOAD_NORMAL:
++ /* Tx queue should be only reenabled */
++ netif_wake_queue(bp->dev);
++ /* Initialize the receive filter. */
++ bnx2x_set_rx_mode(bp->dev);
++ break;
++
++ case LOAD_OPEN:
++ netif_start_queue(bp->dev);
++ /* Initialize the receive filter. */
++ bnx2x_set_rx_mode(bp->dev);
++ break;
++
++ case LOAD_DIAG:
++ /* Initialize the receive filter. */
++ bnx2x_set_rx_mode(bp->dev);
++ bp->state = BNX2X_STATE_DIAG;
++ break;
++
++ default:
++ break;
++ }
++
++ if (!bp->port.pmf)
++ bnx2x__link_status_update(bp);
++
++ /* start the timer */
++ mod_timer(&bp->timer, jiffies + bp->current_interval);
++
++
++ return 0;
++
++load_error3:
++ bnx2x_int_disable_sync(bp, 1);
++ if (!BP_NOMCP(bp)) {
++ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
++ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
++ }
++ bp->port.pmf = 0;
++ /* Free SKBs, SGEs, TPA pool and driver internals */
++ bnx2x_free_skbs(bp);
++ for_each_queue(bp, i)
++ bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
++load_error2:
++ /* Release IRQs */
++ bnx2x_free_irq(bp);
++load_error1:
++ bnx2x_napi_disable(bp);
++ bnx2x_free_mem(bp);
++
++ /* TBD we really need to reset the chip
++ if we want to recover from this */
++ return rc;
++}
++
++static int bnx2x_stop_multi(struct bnx2x *bp, int index)
++{
++ int rc;
++
++ /* halt the connection */
++ bp->fp[index].state = BNX2X_FP_STATE_HALTING;
++ bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
++
++ /* Wait for completion */
++ rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
++ &(bp->fp[index].state), 1);
++ if (rc) /* timeout */
++ return rc;
++
++ /* delete cfc entry */
++ bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
++
++ /* Wait for completion */
++ rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
++ &(bp->fp[index].state), 1);
++ return rc;
++}
++
++static int bnx2x_stop_leading(struct bnx2x *bp)
++{
++ u16 dsb_sp_prod_idx;
++ /* if the other port is handling traffic,
++ this can take a lot of time */
++ int cnt = 500;
++ int rc;
++
++ might_sleep();
++
++ /* Send HALT ramrod */
++ bp->fp[0].state = BNX2X_FP_STATE_HALTING;
++ bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
++
++ /* Wait for completion */
++ rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
++ &(bp->fp[0].state), 1);
++ if (rc) /* timeout */
++ return rc;
++
++ dsb_sp_prod_idx = *bp->dsb_sp_prod;
++
++ /* Send PORT_DELETE ramrod */
++ bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
++
++ /* Wait for completion to arrive on default status block
++ we are going to reset the chip anyway
++ so there is not much to do if this times out
++ */
++ while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
++ if (!cnt) {
++ DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
++ "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
++ *bp->dsb_sp_prod, dsb_sp_prod_idx);
++#ifdef BNX2X_STOP_ON_ERROR
++ bnx2x_panic();
++#else
++ rc = -EBUSY;
++#endif
++ break;
++ }
++ cnt--;
++ msleep(1);
++ rmb(); /* Refresh the dsb_sp_prod */
++ }
++ bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
++ bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
++
++ return rc;
++}
++
++static void bnx2x_reset_func(struct bnx2x *bp)
++{
++ int port = BP_PORT(bp);
++ int func = BP_FUNC(bp);
++ int base, i;
++
++ /* Configure IGU */
++ REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
++ REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
++
++ REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
++
++ /* Clear ILT */
++ base = FUNC_ILT_BASE(func);
++ for (i = base; i < base + ILT_PER_FUNC; i++)
++ bnx2x_ilt_wr(bp, i, 0);
++}
++
++static void bnx2x_reset_port(struct bnx2x *bp)
++{
++ int port = BP_PORT(bp);
++ u32 val;
++
++ REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
++
++ /* Do not rcv packets to BRB */
++ REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
++ /* Do not direct rcv packets that are not for MCP to the BRB */
++ REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
++ NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
++
++ /* Configure AEU */
++ REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
++
++ msleep(100);
++ /* Check for BRB port occupancy */
++ val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
++ if (val)
++ DP(NETIF_MSG_IFDOWN,
++ "BRB1 is not empty %d blocks are occupied\n", val);
++
++ /* TODO: Close Doorbell port? */
++}
++
++static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
++{
++ DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
++ BP_FUNC(bp), reset_code);
++
++ switch (reset_code) {
++ case FW_MSG_CODE_DRV_UNLOAD_COMMON:
++ bnx2x_reset_port(bp);
++ bnx2x_reset_func(bp);
++ bnx2x_reset_common(bp);
++ break;
++
++ case FW_MSG_CODE_DRV_UNLOAD_PORT:
++ bnx2x_reset_port(bp);
++ bnx2x_reset_func(bp);
++ break;
++
++ case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
++ bnx2x_reset_func(bp);
++ break;
++
++ default:
++ BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
++ break;
++ }
++}
++
++/* must be called with rtnl_lock */
++static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
++{
++ int port = BP_PORT(bp);
++ u32 reset_code = 0;
++ int i, cnt, rc;
++
++ bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
++
++ bp->rx_mode = BNX2X_RX_MODE_NONE;
++ bnx2x_set_storm_rx_mode(bp);
++
++ bnx2x_netif_stop(bp, 1);
++
++ del_timer_sync(&bp->timer);
++ SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
++ (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
++ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
++
++ /* Release IRQs */
++ bnx2x_free_irq(bp);
++
++ /* Wait until tx fast path tasks complete */
++ for_each_queue(bp, i) {
++ struct bnx2x_fastpath *fp = &bp->fp[i];
++
++ cnt = 1000;
++ smp_rmb();
++ while (bnx2x_has_tx_work_unload(fp)) {
++
++ bnx2x_tx_int(fp, 1000);
++ if (!cnt) {
++ BNX2X_ERR("timeout waiting for queue[%d]\n",
++ i);
++#ifdef BNX2X_STOP_ON_ERROR
++ bnx2x_panic();
++ return -EBUSY;
++#else
++ break;
++#endif
++ }
++ cnt--;
++ msleep(1);
++ smp_rmb();
++ }
++ }
++ /* Give HW time to discard old tx messages */
++ msleep(1);
++
++ if (CHIP_IS_E1(bp)) {
++ struct mac_configuration_cmd *config =
++ bnx2x_sp(bp, mcast_config);
++
++ bnx2x_set_mac_addr_e1(bp, 0);
++
++ for (i = 0; i < config->hdr.length; i++)
++ CAM_INVALIDATE(config->config_table[i]);
++
++ config->hdr.length = i;
++ if (CHIP_REV_IS_SLOW(bp))
++ config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
++ else
++ config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
++ config->hdr.client_id = BP_CL_ID(bp);
++ config->hdr.reserved1 = 0;
++
++ bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
++ U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
++ U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
++
++ } else { /* E1H */
++ REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
++
++ bnx2x_set_mac_addr_e1h(bp, 0);
++
++ for (i = 0; i < MC_HASH_SIZE; i++)
++ REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
++ }
++
++ if (unload_mode == UNLOAD_NORMAL)
++ reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
++
++ else if (bp->flags & NO_WOL_FLAG) {
++ reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
++ if (CHIP_IS_E1H(bp))
++ REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
++
++ } else if (bp->wol) {
++ u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
++ u8 *mac_addr = bp->dev->dev_addr;
++ u32 val;
++ /* The mac address is written to entries 1-4 to
++ preserve entry 0 which is used by the PMF */
++ u8 entry = (BP_E1HVN(bp) + 1)*8;
++
++ val = (mac_addr[0] << 8) | mac_addr[1];
++ EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
++
++ val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
++ (mac_addr[4] << 8) | mac_addr[5];
++ EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
++
++ reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
++
++ } else
++ reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
++
++ /* Close multi and leading connections
++ Completions for ramrods are collected in a synchronous way */
++ for_each_nondefault_queue(bp, i)
++ if (bnx2x_stop_multi(bp, i))
++ goto unload_error;
++
++ rc = bnx2x_stop_leading(bp);
++ if (rc) {
++ BNX2X_ERR("Stop leading failed!\n");
++#ifdef BNX2X_STOP_ON_ERROR
++ return -EBUSY;
++#else
++ goto unload_error;
++#endif
++ }
++
++unload_error:
++ if (!BP_NOMCP(bp))
++ reset_code = bnx2x_fw_command(bp, reset_code);
++ else {
++ DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
++ load_count[0], load_count[1], load_count[2]);
++ load_count[0]--;
++ load_count[1 + port]--;
++ DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
++ load_count[0], load_count[1], load_count[2]);
++ if (load_count[0] == 0)
++ reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
++ else if (load_count[1 + port] == 0)
++ reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
++ else
++ reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
++ }
++
++ if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
++ (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
++ bnx2x__link_reset(bp);
++
++ /* Reset the chip */
++ bnx2x_reset_chip(bp, reset_code);
++
++ /* Report UNLOAD_DONE to MCP */
++ if (!BP_NOMCP(bp))
++ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
++ bp->port.pmf = 0;
++
++ /* Free SKBs, SGEs, TPA pool and driver internals */
++ bnx2x_free_skbs(bp);
++ for_each_queue(bp, i)
++ bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
++ bnx2x_free_mem(bp);
++
++ bp->state = BNX2X_STATE_CLOSED;
++
++ netif_carrier_off(bp->dev);
++
++ return 0;
++}
++
++static void bnx2x_reset_task(struct work_struct *work)
++{
++ struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
++
++#ifdef BNX2X_STOP_ON_ERROR
++ BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
++ " so reset not done to allow debug dump,\n"
++ KERN_ERR " you will need to reboot when done\n");
++ return;
++#endif
++
++ rtnl_lock();
++
++ if (!netif_running(bp->dev))
++ goto reset_task_exit;
++
++ bnx2x_nic_unload(bp, UNLOAD_NORMAL);
++ bnx2x_nic_load(bp, LOAD_NORMAL);
++
++reset_task_exit:
++ rtnl_unlock();
++}
++
++/* end of nic load/unload */
++
++/* ethtool_ops */
++
++/*
++ * Init service functions
++ */
++
++static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
++{
++ u32 val;
++
++ /* Check if there is any driver already loaded */
++ val = REG_RD(bp, MISC_REG_UNPREPARED);
++ if (val == 0x1) {
++ /* Check if it is the UNDI driver
++ * UNDI driver initializes CID offset for normal bell to 0x7
++ */
++ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
++ val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
++ if (val == 0x7) {
++ u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
++ /* save our func */
++ int func = BP_FUNC(bp);
++ u32 swap_en;
++ u32 swap_val;
++
++ /* clear the UNDI indication */
++ REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
++
++ BNX2X_DEV_INFO("UNDI is active! reset device\n");
++
++ /* try unload UNDI on port 0 */
++ bp->func = 0;
++ bp->fw_seq =
++ (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
++ DRV_MSG_SEQ_NUMBER_MASK);
++ reset_code = bnx2x_fw_command(bp, reset_code);
++
++ /* if UNDI is loaded on the other port */
++ if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
++
++ /* send "DONE" for previous unload */
++ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
++
++ /* unload UNDI on port 1 */
++ bp->func = 1;
++ bp->fw_seq =
++ (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
++ DRV_MSG_SEQ_NUMBER_MASK);
++ reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
++
++ bnx2x_fw_command(bp, reset_code);
++ }
++
++ /* now it's safe to release the lock */
++ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
++
++ REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
++ HC_REG_CONFIG_0), 0x1000);
++
++ /* close input traffic and wait for it */
++ /* Do not rcv packets to BRB */
++ REG_WR(bp,
++ (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
++ NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
++ /* Do not direct rcv packets that are not for MCP to
++ * the BRB */
++ REG_WR(bp,
++ (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
++ NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
++ /* clear AEU */
++ REG_WR(bp,
++ (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
++ MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
++ msleep(10);
++
++ /* save NIG port swap info */
++ swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
++ swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
++ /* reset device */
++ REG_WR(bp,
++ GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
++ 0xd3ffffff);
++ REG_WR(bp,
++ GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
++ 0x1403);
++ /* take the NIG out of reset and restore swap values */
++ REG_WR(bp,
++ GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
++ MISC_REGISTERS_RESET_REG_1_RST_NIG);
++ REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
++ REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
++
++ /* send unload done to the MCP */
++ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
++
++ /* restore our func and fw_seq */
++ bp->func = func;
++ bp->fw_seq =
++ (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
++ DRV_MSG_SEQ_NUMBER_MASK);
++
++ } else
++ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
++ }
++}
++
++static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
++{
++ u32 val, val2, val3, val4, id;
++ u16 pmc;
++
++ /* Get the chip revision id and number. */
++ /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
++ val = REG_RD(bp, MISC_REG_CHIP_NUM);
++ id = ((val & 0xffff) << 16);
++ val = REG_RD(bp, MISC_REG_CHIP_REV);
++ id |= ((val & 0xf) << 12);
++ val = REG_RD(bp, MISC_REG_CHIP_METAL);
++ id |= ((val & 0xff) << 4);
++ val = REG_RD(bp, MISC_REG_BOND_ID);
++ id |= (val & 0xf);
++ bp->common.chip_id = id;
++ bp->link_params.chip_id = bp->common.chip_id;
++ BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
++
++ val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
++ bp->common.flash_size = (NVRAM_1MB_SIZE <<
++ (val & MCPR_NVM_CFG4_FLASH_SIZE));
++ BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
++ bp->common.flash_size, bp->common.flash_size);
++
++ bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
++ bp->link_params.shmem_base = bp->common.shmem_base;
++ BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
++
++ if (!bp->common.shmem_base ||
++ (bp->common.shmem_base < 0xA0000) ||
++ (bp->common.shmem_base >= 0xC0000)) {
++ BNX2X_DEV_INFO("MCP not active\n");
++ bp->flags |= NO_MCP_FLAG;
++ return;
++ }
++
++ val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
++ if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
++ != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
++ BNX2X_ERR("BAD MCP validity signature\n");
++
++ bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
++ bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
++
++ BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
++ bp->common.hw_config, bp->common.board);
++
++ bp->link_params.hw_led_mode = ((bp->common.hw_config &
++ SHARED_HW_CFG_LED_MODE_MASK) >>
++ SHARED_HW_CFG_LED_MODE_SHIFT);
++
++ val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
++ bp->common.bc_ver = val;
++ BNX2X_DEV_INFO("bc_ver %X\n", val);
++ if (val < BNX2X_BC_VER) {
++ /* for now only warn
++ * later we might need to enforce this */
++ BNX2X_ERR("This driver needs bc_ver %X but found %X,"
++ " please upgrade BC\n", BNX2X_BC_VER, val);
++ }
++
++ if (BP_E1HVN(bp) == 0) {
++ pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
++ bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
++ } else {
++ /* no WOL capability for E1HVN != 0 */
++ bp->flags |= NO_WOL_FLAG;
++ }
++ BNX2X_DEV_INFO("%sWoL capable\n",
++ (bp->flags & NO_WOL_FLAG) ? "Not " : "");
++
++ val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
++ val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
++ val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
++ val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
++
++ printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
++ val, val2, val3, val4);
++}
++
++static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
++ u32 switch_cfg)
++{
++ int port = BP_PORT(bp);
++ u32 ext_phy_type;
++
++ switch (switch_cfg) {
++ case SWITCH_CFG_1G:
++ BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
++
++ ext_phy_type =
++ SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
++ switch (ext_phy_type) {
++ case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
++ BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
++ ext_phy_type);
++
++ bp->port.supported |= (SUPPORTED_10baseT_Half |
++ SUPPORTED_10baseT_Full |
++ SUPPORTED_100baseT_Half |
++ SUPPORTED_100baseT_Full |
++ SUPPORTED_1000baseT_Full |
++ SUPPORTED_2500baseX_Full |
++ SUPPORTED_TP |
++ SUPPORTED_FIBRE |
++ SUPPORTED_Autoneg |
++ SUPPORTED_Pause |
++ SUPPORTED_Asym_Pause);
++ break;
++
++ case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
++ BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
++ ext_phy_type);
++
++ bp->port.supported |= (SUPPORTED_10baseT_Half |
++ SUPPORTED_10baseT_Full |
++ SUPPORTED_100baseT_Half |
++ SUPPORTED_100baseT_Full |
++ SUPPORTED_1000baseT_Full |
++ SUPPORTED_TP |
++ SUPPORTED_FIBRE |
++ SUPPORTED_Autoneg |
++ SUPPORTED_Pause |
++ SUPPORTED_Asym_Pause);
++ break;
++
++ default:
++ BNX2X_ERR("NVRAM config error. "
++ "BAD SerDes ext_phy_config 0x%x\n",
++ bp->link_params.ext_phy_config);
++ return;
++ }
++
++ bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
++ port*0x10);
++ BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
++ break;
++
++ case SWITCH_CFG_10G:
++ BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
++
++ ext_phy_type =
++ XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
++ switch (ext_phy_type) {
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
++ BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
++ ext_phy_type);
++
++ bp->port.supported |= (SUPPORTED_10baseT_Half |
++ SUPPORTED_10baseT_Full |
++ SUPPORTED_100baseT_Half |
++ SUPPORTED_100baseT_Full |
++ SUPPORTED_1000baseT_Full |
++ SUPPORTED_2500baseX_Full |
++ SUPPORTED_10000baseT_Full |
++ SUPPORTED_TP |
++ SUPPORTED_FIBRE |
++ SUPPORTED_Autoneg |
++ SUPPORTED_Pause |
++ SUPPORTED_Asym_Pause);
++ break;
++
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
++ BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
++ ext_phy_type);
++
++ bp->port.supported |= (SUPPORTED_10000baseT_Full |
++ SUPPORTED_FIBRE |
++ SUPPORTED_Pause |
++ SUPPORTED_Asym_Pause);
++ break;
++
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
++ BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
++ ext_phy_type);
++
++ bp->port.supported |= (SUPPORTED_10000baseT_Full |
++ SUPPORTED_1000baseT_Full |
++ SUPPORTED_FIBRE |
++ SUPPORTED_Pause |
++ SUPPORTED_Asym_Pause);
++ break;
++
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
++ BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
++ ext_phy_type);
++
++ bp->port.supported |= (SUPPORTED_10000baseT_Full |
++ SUPPORTED_1000baseT_Full |
++ SUPPORTED_FIBRE |
++ SUPPORTED_Autoneg |
++ SUPPORTED_Pause |
++ SUPPORTED_Asym_Pause);
++ break;
++
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
++ BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
++ ext_phy_type);
++
++ bp->port.supported |= (SUPPORTED_10000baseT_Full |
++ SUPPORTED_2500baseX_Full |
++ SUPPORTED_1000baseT_Full |
++ SUPPORTED_FIBRE |
++ SUPPORTED_Autoneg |
++ SUPPORTED_Pause |
++ SUPPORTED_Asym_Pause);
++ break;
++
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
++ BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
++ ext_phy_type);
++
++ bp->port.supported |= (SUPPORTED_10000baseT_Full |
++ SUPPORTED_TP |
++ SUPPORTED_Autoneg |
++ SUPPORTED_Pause |
++ SUPPORTED_Asym_Pause);
++ break;
++
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
++ BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
++ bp->link_params.ext_phy_config);
++ break;
++
++ default:
++ BNX2X_ERR("NVRAM config error. "
++ "BAD XGXS ext_phy_config 0x%x\n",
++ bp->link_params.ext_phy_config);
++ return;
++ }
++
++ bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
++ port*0x18);
++ BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
++
++ break;
++
++ default:
++ BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
++ bp->port.link_config);
++ return;
++ }
++ bp->link_params.phy_addr = bp->port.phy_addr;
++
++ /* mask what we support according to speed_cap_mask */
++ if (!(bp->link_params.speed_cap_mask &
++ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
++ bp->port.supported &= ~SUPPORTED_10baseT_Half;
++
++ if (!(bp->link_params.speed_cap_mask &
++ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
++ bp->port.supported &= ~SUPPORTED_10baseT_Full;
++
++ if (!(bp->link_params.speed_cap_mask &
++ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
++ bp->port.supported &= ~SUPPORTED_100baseT_Half;
++
++ if (!(bp->link_params.speed_cap_mask &
++ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
++ bp->port.supported &= ~SUPPORTED_100baseT_Full;
++
++ if (!(bp->link_params.speed_cap_mask &
++ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
++ bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
++ SUPPORTED_1000baseT_Full);
++
++ if (!(bp->link_params.speed_cap_mask &
++ PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
++ bp->port.supported &= ~SUPPORTED_2500baseX_Full;
++
++ if (!(bp->link_params.speed_cap_mask &
++ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
++ bp->port.supported &= ~SUPPORTED_10000baseT_Full;
++
++ BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
++}
++
++static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
++{
++ bp->link_params.req_duplex = DUPLEX_FULL;
++
++ switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
++ case PORT_FEATURE_LINK_SPEED_AUTO:
++ if (bp->port.supported & SUPPORTED_Autoneg) {
++ bp->link_params.req_line_speed = SPEED_AUTO_NEG;
++ bp->port.advertising = bp->port.supported;
++ } else {
++ u32 ext_phy_type =
++ XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
++
++ if ((ext_phy_type ==
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
++ (ext_phy_type ==
++ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
++ /* force 10G, no AN */
++ bp->link_params.req_line_speed = SPEED_10000;
++ bp->port.advertising =
++ (ADVERTISED_10000baseT_Full |
++ ADVERTISED_FIBRE);
++ break;
++ }
++ BNX2X_ERR("NVRAM config error. "
++ "Invalid link_config 0x%x"
++ " Autoneg not supported\n",
++ bp->port.link_config);
++ return;
++ }
++ break;
++
++ case PORT_FEATURE_LINK_SPEED_10M_FULL:
++ if (bp->port.supported & SUPPORTED_10baseT_Full) {
++ bp->link_params.req_line_speed = SPEED_10;
++ bp->port.advertising = (ADVERTISED_10baseT_Full |
++ ADVERTISED_TP);
++ } else {
++ BNX2X_ERR("NVRAM config error. "
++ "Invalid link_config 0x%x"
++ " speed_cap_mask 0x%x\n",
++ bp->port.link_config,
++ bp->link_params.speed_cap_mask);
++ return;
++ }
++ break;
++
++ case PORT_FEATURE_LINK_SPEED_10M_HALF:
++ if (bp->port.supported & SUPPORTED_10baseT_Half) {
++ bp->link_params.req_line_speed = SPEED_10;
++ bp->link_params.req_duplex = DUPLEX_HALF;
++ bp->port.advertising = (ADVERTISED_10baseT_Half |
++ ADVERTISED_TP);
++ } else {
++ BNX2X_ERR("NVRAM config error. "
++ "Invalid link_config 0x%x"
++ " speed_cap_mask 0x%x\n",
++ bp->port.link_config,
++ bp->link_params.speed_cap_mask);
++ return;
++ }
++ break;
++
++ case PORT_FEATURE_LINK_SPEED_100M_FULL:
++ if (bp->port.supported & SUPPORTED_100baseT_Full) {
++ bp->link_params.req_line_speed = SPEED_100;
++ bp->port.advertising = (ADVERTISED_100baseT_Full |
++ ADVERTISED_TP);
++ } else {
++ BNX2X_ERR("NVRAM config error. "
++ "Invalid link_config 0x%x"
++ " speed_cap_mask 0x%x\n",
++ bp->port.link_config,
++ bp->link_params.speed_cap_mask);
++ return;
++ }
++ break;
++
++ case PORT_FEATURE_LINK_SPEED_100M_HALF:
++ if (bp->port.supported & SUPPORTED_100baseT_Half) {
++ bp->link_params.req_line_speed = SPEED_100;
++ bp->link_params.req_duplex = DUPLEX_HALF;
++ bp->port.advertising = (ADVERTISED_100baseT_Half |
++ ADVERTISED_TP);
++ } else {
++ BNX2X_ERR("NVRAM config error. "
++ "Invalid link_config 0x%x"
++ " speed_cap_mask 0x%x\n",
++ bp->port.link_config,
++ bp->link_params.speed_cap_mask);
++ return;
++ }
++ break;
++
++ case PORT_FEATURE_LINK_SPEED_1G:
++ if (bp->port.supported & SUPPORTED_1000baseT_Full) {
++ bp->link_params.req_line_speed = SPEED_1000;
++ bp->port.advertising = (ADVERTISED_1000baseT_Full |
++ ADVERTISED_TP);
++ } else {
++ BNX2X_ERR("NVRAM config error. "
++ "Invalid link_config 0x%x"
++ " speed_cap_mask 0x%x\n",
++ bp->port.link_config,
++ bp->link_params.speed_cap_mask);
++ return;
++ }
++ break;
++
++ case PORT_FEATURE_LINK_SPEED_2_5G:
++ if (bp->port.supported & SUPPORTED_2500baseX_Full) {
++ bp->link_params.req_line_speed = SPEED_2500;
++ bp->port.advertising = (ADVERTISED_2500baseX_Full |
++ ADVERTISED_TP);
++ } else {
++ BNX2X_ERR("NVRAM config error. "
++ "Invalid link_config 0x%x"
++ " speed_cap_mask 0x%x\n",
++ bp->port.link_config,
++ bp->link_params.speed_cap_mask);
++ return;
++ }
++ break;
++
++ case PORT_FEATURE_LINK_SPEED_10G_CX4:
++ case PORT_FEATURE_LINK_SPEED_10G_KX4:
++ case PORT_FEATURE_LINK_SPEED_10G_KR:
++ if (bp->port.supported & SUPPORTED_10000baseT_Full) {
++ bp->link_params.req_line_speed = SPEED_10000;
++ bp->port.advertising = (ADVERTISED_10000baseT_Full |
++ ADVERTISED_FIBRE);
++ } else {
++ BNX2X_ERR("NVRAM config error. "
++ "Invalid link_config 0x%x"
++ " speed_cap_mask 0x%x\n",
++ bp->port.link_config,
++ bp->link_params.speed_cap_mask);
++ return;
++ }
++ break;
++
++ default:
++ BNX2X_ERR("NVRAM config error. "
++ "BAD link speed link_config 0x%x\n",
++ bp->port.link_config);
++ bp->link_params.req_line_speed = SPEED_AUTO_NEG;
++ bp->port.advertising = bp->port.supported;
++ break;
++ }
++
++ bp->link_params.req_flow_ctrl = (bp->port.link_config &
++ PORT_FEATURE_FLOW_CONTROL_MASK);
++ if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
++ !(bp->port.supported & SUPPORTED_Autoneg))
++ bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
++
++ BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
++ " advertising 0x%x\n",
++ bp->link_params.req_line_speed,
++ bp->link_params.req_duplex,
++ bp->link_params.req_flow_ctrl, bp->port.advertising);
++}
++
++static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
++{
++ int port = BP_PORT(bp);
++ u32 val, val2;
++
++ bp->link_params.bp = bp;
++ bp->link_params.port = port;
++
++ bp->link_params.serdes_config =
++ SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
++ bp->link_params.lane_config =
++ SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
++ bp->link_params.ext_phy_config =
++ SHMEM_RD(bp,
++ dev_info.port_hw_config[port].external_phy_config);
++ bp->link_params.speed_cap_mask =
++ SHMEM_RD(bp,
++ dev_info.port_hw_config[port].speed_capability_mask);
++
++ bp->port.link_config =
++ SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
++
++ BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
++ KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
++ " link_config 0x%08x\n",
++ bp->link_params.serdes_config,
++ bp->link_params.lane_config,
++ bp->link_params.ext_phy_config,
++ bp->link_params.speed_cap_mask, bp->port.link_config);
++
++ bp->link_params.switch_cfg = (bp->port.link_config &
++ PORT_FEATURE_CONNECTED_SWITCH_MASK);
++ bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
++
++ bnx2x_link_settings_requested(bp);
++
++ val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
++ val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
++ bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
++ bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
++ bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
++ bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
++ bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
++ bp->dev->dev_addr[5] = (u8)(val & 0xff);
++ memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
++ memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
++}
++
++static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
++{
++ int func = BP_FUNC(bp);
++ u32 val, val2;
++ int rc = 0;
++
++ bnx2x_get_common_hwinfo(bp);
++
++ bp->e1hov = 0;
++ bp->e1hmf = 0;
++ if (CHIP_IS_E1H(bp)) {
++ bp->mf_config =
++ SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
++
++ val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
++ FUNC_MF_CFG_E1HOV_TAG_MASK);
++ if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
++
++ bp->e1hov = val;
++ bp->e1hmf = 1;
++ BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
++ "(0x%04x)\n",
++ func, bp->e1hov, bp->e1hov);
++ } else {
++ BNX2X_DEV_INFO("Single function mode\n");
++ if (BP_E1HVN(bp)) {
++ BNX2X_ERR("!!! No valid E1HOV for func %d,"
++ " aborting\n", func);
++ rc = -EPERM;
++ }
++ }
++ }
++
++ if (!BP_NOMCP(bp)) {
++ bnx2x_get_port_hwinfo(bp);
++
++ bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
++ DRV_MSG_SEQ_NUMBER_MASK);
++ BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
++ }
++
++ if (IS_E1HMF(bp)) {
++ val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
++ val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
++ if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
++ (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
++ bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
++ bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
++ bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
++ bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
++ bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
++ bp->dev->dev_addr[5] = (u8)(val & 0xff);
++ memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
++ ETH_ALEN);
++ memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
++ ETH_ALEN);
++ }
++
++ return rc;
++ }
++
++ if (BP_NOMCP(bp)) {
++ /* only supposed to happen on emulation/FPGA */
++ BNX2X_ERR("warning random MAC workaround active\n");
++ random_ether_addr(bp->dev->dev_addr);
++ memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
++ }
++
++ return rc;
++}
++
++static int __devinit bnx2x_init_bp(struct bnx2x *bp)
++{
++ int func = BP_FUNC(bp);
++ int rc;
++
++ /* Disable interrupt handling until HW is initialized */
++ atomic_set(&bp->intr_sem, 1);
++
++ mutex_init(&bp->port.phy_mutex);
++
++ INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
++ INIT_WORK(&bp->reset_task, bnx2x_reset_task);
++
++ rc = bnx2x_get_hwinfo(bp);
++
++ /* need to reset chip if undi was active */
++ if (!BP_NOMCP(bp))
++ bnx2x_undi_unload(bp);
++
++ if (CHIP_REV_IS_FPGA(bp))
++ printk(KERN_ERR PFX "FPGA detected\n");
++
++ if (BP_NOMCP(bp) && (func == 0))
++ printk(KERN_ERR PFX
++ "MCP disabled, must load devices in order!\n");
++
++ /* Set TPA flags */
++ if (disable_tpa) {
++ bp->flags &= ~TPA_ENABLE_FLAG;
++ bp->dev->features &= ~NETIF_F_LRO;
++ } else {
++ bp->flags |= TPA_ENABLE_FLAG;
++ bp->dev->features |= NETIF_F_LRO;
++ }
++
++ bp->mrrs = mrrs;
++
++ bp->tx_ring_size = MAX_TX_AVAIL;
++ bp->rx_ring_size = MAX_RX_AVAIL;
++
++ bp->rx_csum = 1;
++ bp->rx_offset = 0;
++
++ bp->tx_ticks = 50;
++ bp->rx_ticks = 25;
++
++ bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
++ bp->current_interval = (poll ? poll : bp->timer_interval);
++
++ init_timer(&bp->timer);
++ bp->timer.expires = jiffies + bp->current_interval;
++ bp->timer.data = (unsigned long) bp;
++ bp->timer.function = bnx2x_timer;
++
++ return rc;
++}
++
++/*
++ * ethtool service functions
++ */
++
++/* All ethtool functions called with rtnl_lock */
++
++static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
++{
++ struct bnx2x *bp = netdev_priv(dev);
++
++ cmd->supported = bp->port.supported;
++ cmd->advertising = bp->port.advertising;
++
++ if (netif_carrier_ok(dev)) {
++ cmd->speed = bp->link_vars.line_speed;
++ cmd->duplex = bp->link_vars.duplex;
++ } else {
++ cmd->speed = bp->link_params.req_line_speed;
++ cmd->duplex = bp->link_params.req_duplex;
++ }
++ if (IS_E1HMF(bp)) {
++ u16 vn_max_rate;
++
++ vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
++ FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
++ if (vn_max_rate < cmd->speed)
++ cmd->speed = vn_max_rate;
++ }
++
++ if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
++ u32 ext_phy_type =
++ XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
++
++ switch (ext_phy_type) {
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
++ cmd->port = PORT_FIBRE;
++ break;
++
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
++ cmd->port = PORT_TP;
++ break;
++
++ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
++ BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
++ bp->link_params.ext_phy_config);
++ break;
++
++ default:
++ DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
++ bp->link_params.ext_phy_config);
++ break;
++ }
++ } else
++ cmd->port = PORT_TP;
++
++ cmd->phy_address = bp->port.phy_addr;
++ cmd->transceiver = XCVR_INTERNAL;
++
++ if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
++ cmd->autoneg = AUTONEG_ENABLE;
++ else
++ cmd->autoneg = AUTONEG_DISABLE;
++
++ cmd->maxtxpkt = 0;
++ cmd->maxrxpkt = 0;
++
++ DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
++ DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
++ DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
++ DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
++ cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
++ cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
++ cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
++
++ return 0;
++}
++
++static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
++{
++ struct bnx2x *bp = netdev_priv(dev);
++ u32 advertising;
++
++ if (IS_E1HMF(bp))
++ return 0;
++
++ DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
++ DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
++ DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
++ DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
++ cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
++ cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
++ cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
++
++ if (cmd->autoneg == AUTONEG_ENABLE) {
++ if (!(bp->port.supported & SUPPORTED_Autoneg)) {
++ DP(NETIF_MSG_LINK, "Autoneg not supported\n");
++ return -EINVAL;
++ }
++
++ /* advertise the requested speed and duplex if supported */
++ cmd->advertising &= bp->port.supported;
++
++ bp->link_params.req_line_speed = SPEED_AUTO_NEG;
++ bp->link_params.req_duplex = DUPLEX_FULL;
++ bp->port.advertising |= (ADVERTISED_Autoneg |
++ cmd->advertising);
++
++ } else { /* forced speed */
++ /* advertise the requested speed and duplex if supported */
++ switch (cmd->speed) {
++ case SPEED_10:
++ if (cmd->duplex == DUPLEX_FULL) {
++ if (!(bp->port.supported &
++ SUPPORTED_10baseT_Full)) {
++ DP(NETIF_MSG_LINK,
++ "10M full not supported\n");
++ return -EINVAL;
++ }
++
++ advertising = (ADVERTISED_10baseT_Full |
++ ADVERTISED_TP);
++ } else {
++ if (!(bp->port.supported &
++ SUPPORTED_10baseT_Half)) {
++ DP(NETIF_MSG_LINK,
++ "10M half not supported\n");
++ return -EINVAL;
++ }
++
++ advertising = (ADVERTISED_10baseT_Half |
++ ADVERTISED_TP);
++ }
++ break;
++
++ case SPEED_100:
++ if (cmd->duplex == DUPLEX_FULL) {
++ if (!(bp->port.supported &
++ SUPPORTED_100baseT_Full)) {
++ DP(NETIF_MSG_LINK,
++ "100M full not supported\n");
++ return -EINVAL;
++ }
++
++ advertising = (ADVERTISED_100baseT_Full |
++ ADVERTISED_TP);
++ } else {
++ if (!(bp->port.supported &
++ SUPPORTED_100baseT_Half)) {
++ DP(NETIF_MSG_LINK,
++ "100M half not supported\n");
++ return -EINVAL;
++ }
++
++ advertising = (ADVERTISED_100baseT_Half |
++ ADVERTISED_TP);
++ }
++ break;
++
++ case SPEED_1000:
++ if (cmd->duplex != DUPLEX_FULL) {
++ DP(NETIF_MSG_LINK, "1G half not supported\n");
++ return -EINVAL;
++ }
++
++ if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
++ DP(NETIF_MSG_LINK, "1G full not supported\n");
++ return -EINVAL;
++ }
++
++ advertising = (ADVERTISED_1000baseT_Full |
++ ADVERTISED_TP);
++ break;
++
++ case SPEED_2500:
++ if (cmd->duplex != DUPLEX_FULL) {
++ DP(NETIF_MSG_LINK,
++ "2.5G half not supported\n");
++ return -EINVAL;
++ }
++
++ if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
++ DP(NETIF_MSG_LINK,
++ "2.5G full not supported\n");
++ return -EINVAL;
++ }
++
++ advertising = (ADVERTISED_2500baseX_Full |
++ ADVERTISED_TP);
++ break;
++
++ case SPEED_10000:
++ if (cmd->duplex != DUPLEX_FULL) {
++ DP(NETIF_MSG_LINK, "10G half not supported\n");
++ return -EINVAL;
++ }
++
++ if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
++ DP(NETIF_MSG_LINK, "10G full not supported\n");
++ return -EINVAL;
++ }
++
++ advertising = (ADVERTISED_10000baseT_Full |
++ ADVERTISED_FIBRE);
++ break;
++
++ default:
++ DP(NETIF_MSG_LINK, "Unsupported speed\n");
++ return -EINVAL;
++ }
++
++ bp->link_params.req_line_speed = cmd->speed;
++ bp->link_params.req_duplex = cmd->duplex;
++ bp->port.advertising = advertising;
++ }
++
++ DP(NETIF_MSG_LINK, "req_line_speed %d\n"
++ DP_LEVEL " req_duplex %d advertising 0x%x\n",
++ bp->link_params.req_line_speed, bp->link_params.req_duplex,
++ bp->port.advertising);
++
++ if (netif_running(dev)) {
++ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
++ bnx2x_link_set(bp);
++ }
++
++ return 0;
++}
++
++#define PHY_FW_VER_LEN 10
++
++static void bnx2x_get_drvinfo(struct net_device *dev,
++ struct ethtool_drvinfo *info)
++{
++ struct bnx2x *bp = netdev_priv(dev);
++ u8 phy_fw_ver[PHY_FW_VER_LEN];
++
++ strcpy(info->driver, DRV_MODULE_NAME);
++ strcpy(info->version, DRV_MODULE_VERSION);
++
++ phy_fw_ver[0] = '\0';
++ if (bp->port.pmf) {
++ bnx2x_acquire_phy_lock(bp);
++ bnx2x_get_ext_phy_fw_version(&bp->link_params,
++ (bp->state != BNX2X_STATE_CLOSED),
++ phy_fw_ver, PHY_FW_VER_LEN);
++ bnx2x_release_phy_lock(bp);
++ }
++
++ snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
++ (bp->common.bc_ver & 0xff0000) >> 16,
++ (bp->common.bc_ver & 0xff00) >> 8,
++ (bp->common.bc_ver & 0xff),
++ ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
++ strcpy(info->bus_info, pci_name(bp->pdev));
++ info->n_stats = BNX2X_NUM_STATS;
++ info->testinfo_len = BNX2X_NUM_TESTS;
++ info->eedump_len = bp->common.flash_size;
++ info->regdump_len = 0;
++}
++
++static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
++{
++ struct bnx2x *bp = netdev_priv(dev);
++
++ if (bp->flags & NO_WOL_FLAG) {
++ wol->supported = 0;
++ wol->wolopts = 0;
++ } else {
++ wol->supported = WAKE_MAGIC;
++ if (bp->wol)
++ wol->wolopts = WAKE_MAGIC;
++ else
++ wol->wolopts = 0;
++ }
++ memset(&wol->sopass, 0, sizeof(wol->sopass));
++}
++
++static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
++{
++ struct bnx2x *bp = netdev_priv(dev);
++
++ if (wol->wolopts & ~WAKE_MAGIC)
++ return -EINVAL;
++
++ if (wol->wolopts & WAKE_MAGIC) {
++ if (bp->flags & NO_WOL_FLAG)
++ return -EINVAL;
++
++ bp->wol = 1;
++ } else
++ bp->wol = 0;
++
++ return 0;
++}
++
++static u32 bnx2x_get_msglevel(struct net_device *dev)
++{
++ struct bnx2x *bp = netdev_priv(dev);
++
++ return bp->msglevel;
++}
++
++static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
++{
++ struct bnx2x *bp = netdev_priv(dev);
++
++ if (capable(CAP_NET_ADMIN))
++ bp->msglevel = level;
++}
++
++static int bnx2x_nway_reset(struct net_device *dev)
++{
++ struct bnx2x *bp = netdev_priv(dev);
++
++ if (!bp->port.pmf)
++ return 0;
++
++ if (netif_running(dev)) {
++ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
++ bnx2x_link_set(bp);
++ }
++
++ return 0;
++}
++
++static int bnx2x_get_eeprom_len(struct net_device *dev)
++{
++ struct bnx2x *bp = netdev_priv(dev);
++
++ return bp->common.flash_size;
++}
++
++static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
++{
++ int port = BP_PORT(bp);
++ int count, i;
++ u32 val = 0;
++
++ /* adjust timeout for emulation/FPGA */
++ count = NVRAM_TIMEOUT_COUNT;
++ if (CHIP_REV_IS_SLOW(bp))
++ count *= 100;
++
++ /* request access to nvram interface */
++ REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
++ (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
++
++ for (i = 0; i < count*10; i++) {
++ val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
++ if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
++ break;
++
++ udelay(5);
++ }
++
++ if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
++ DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
++ return -EBUSY;
++ }
++
++ return 0;
++}
++
++static int bnx2x_release_nvram_lock(struct bnx2x *bp)
++{
++ int port = BP_PORT(bp);
++ int count, i;
++ u32 val = 0;
++
++ /* adjust timeout for emulation/FPGA */
++ count = NVRAM_TIMEOUT_COUNT;
++ if (CHIP_REV_IS_SLOW(bp))
++ count *= 100;
++
++ /* relinquish nvram interface */
++ REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
++ (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
++
++ for (i = 0; i < count*10; i++) {
++ val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
++ if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
++ break;
++
++ udelay(5);
++ }
++
++ if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
++ DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
++ return -EBUSY;
++ }
++
++ return 0;
++}
++
++static void bnx2x_enable_nvram_access(struct bnx2x *bp)
++{
++ u32 val;
++
++ val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
++
++ /* enable both bits, even on read */
++ REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
++ (val | MCPR_NVM_ACCESS_ENABLE_EN |
++ MCPR_NVM_ACCESS_ENABLE_WR_EN));
++}
++
++static void bnx2x_disable_nvram_access(struct bnx2x *bp)
++{
++ u32 val;
++
++ val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
++
++ /* disable both bits, even after read */
++ REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
++ (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
++ MCPR_NVM_ACCESS_ENABLE_WR_EN)));
++}
++
++static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
++ u32 cmd_flags)
++{
++ int count, i, rc;
++ u32 val;
++
++ /* build the command word */
++ cmd_flags |= MCPR_NVM_COMMAND_DOIT;
++
++ /* need to clear DONE bit separately */
++ REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
++
++ /* address of the NVRAM to read from */
++ REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
++ (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
++
++ /* issue a read command */
++ REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
++
++ /* adjust timeout for emulation/FPGA */
++ count = NVRAM_TIMEOUT_COUNT;
++ if (CHIP_REV_IS_SLOW(bp))
++ count *= 100;
++
++ /* wait for completion */
++ *ret_val = 0;
++ rc = -EBUSY;
++ for (i = 0; i < count; i++) {
++ udelay(5);
++ val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
++
++ if (val & MCPR_NVM_COMMAND_DONE) {
++ val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
++ /* we read nvram data in cpu order
++ * but ethtool sees it as an array of bytes
++ * converting to big-endian will do the work */
++ val = cpu_to_be32(val);
++ *ret_val = val;
++ rc = 0;
++ break;
++ }
++ }
++
++ return rc;
++}
++
++static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
++ int buf_size)
++{
++ int rc;
++ u32 cmd_flags;
++ u32 val;
++
++ if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
++ DP(BNX2X_MSG_NVM,
++ "Invalid parameter: offset 0x%x buf_size 0x%x\n",
++ offset, buf_size);
++ return -EINVAL;
++ }
++
++ if (offset + buf_size > bp->common.flash_size) {
++ DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
++ " buf_size (0x%x) > flash_size (0x%x)\n",
++ offset, buf_size, bp->common.flash_size);
++ return -EINVAL;
++ }
++
++ /* request access to nvram interface */
++ rc = bnx2x_acquire_nvram_lock(bp);
++ if (rc)
++ return rc;
++
++ /* enable access to nvram interface */
++ bnx2x_enable_nvram_access(bp);
++
++ /* read the first word(s) */
++ cmd_flags = MCPR_NVM_COMMAND_FIRST;
++ while ((buf_size > sizeof(u32)) && (rc == 0)) {
++ rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
++ memcpy(ret_buf, &val, 4);
++
++ /* advance to the next dword */
++ offset += sizeof(u32);
++ ret_buf += sizeof(u32);
++ buf_size -= sizeof(u32);
++ cmd_flags = 0;
++ }
++
++ if (rc == 0) {
++ cmd_flags |= MCPR_NVM_COMMAND_LAST;
++ rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
++ memcpy(ret_buf, &val, 4);
++ }
++
++ /* disable access to nvram interface */
++ bnx2x_disable_nvram_access(bp);
++ bnx2x_release_nvram_lock(bp);
++
++ return rc;
++}
++
++static int bnx2x_get_eeprom(struct net_device *dev,
++ struct ethtool_eeprom *eeprom, u8 *eebuf)
++{
++ struct bnx2x *bp = netdev_priv(dev);
++ int rc;
++
++ if (!netif_running(dev))
++ return -EAGAIN;
++
++ DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
++ DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
++ eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
++ eeprom->len, eeprom->len);
++
++ /* parameters already validated in ethtool_get_eeprom */
++
++ rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
++
++ return rc;
++}
++
++static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
++ u32 cmd_flags)
++{
++ int count, i, rc;
++
++ /* build the command word */
++ cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
++
++ /* need to clear DONE bit separately */
++ REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
++
++ /* write the data */
++ REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
++
++ /* address of the NVRAM to write to */
++ REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
++ (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
++
++ /* issue the write command */
++ REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
++
++ /* adjust timeout for emulation/FPGA */
++ count = NVRAM_TIMEOUT_COUNT;
++ if (CHIP_REV_IS_SLOW(bp))
++ count *= 100;
++
++ /* wait for completion */
++ rc = -EBUSY;
++ for (i = 0; i < count; i++) {
++ udelay(5);
++ val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
++ if (val & MCPR_NVM_COMMAND_DONE) {
++ rc = 0;
++ break;
++ }
++ }
++
++ return rc;
++}
++
++#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
++
++static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
++ int buf_size)
++{
++ int rc;
++ u32 cmd_flags;
++ u32 align_offset;
++ u32 val;
++
++ if (offset + buf_size > bp->common.flash_size) {
++ DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
++ " buf_size (0x%x) > flash_size (0x%x)\n",
++ offset, buf_size, bp->common.flash_size);
++ return -EINVAL;
++ }
++
++ /* request access to nvram interface */
++ rc = bnx2x_acquire_nvram_lock(bp);
++ if (rc)
++ return rc;
++
++ /* enable access to nvram interface */
++ bnx2x_enable_nvram_access(bp);
++
++ cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
++ align_offset = (offset & ~0x03);
++ rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
++
++ if (rc == 0) {
++ val &= ~(0xff << BYTE_OFFSET(offset));
++ val |= (*data_buf << BYTE_OFFSET(offset));
++
++ /* nvram data is returned as an array of bytes
++ * convert it back to cpu order */
++ val = be32_to_cpu(val);
++
++ rc = bnx2x_nvram_write_dword(bp, align_offset, val,
++ cmd_flags);
++ }
++
++ /* disable access to nvram interface */
++ bnx2x_disable_nvram_access(bp);
++ bnx2x_release_nvram_lock(bp);
++
++ return rc;
++}
++
++static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
++ int buf_size)
++{
++ int rc;
++ u32 cmd_flags;
++ u32 val;
++ u32 written_so_far;
++
++ if (buf_size == 1) /* ethtool */
++ return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
++
++ if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
++ DP(BNX2X_MSG_NVM,
++ "Invalid parameter: offset 0x%x buf_size 0x%x\n",
++ offset, buf_size);
++ return -EINVAL;
++ }
++
++ if (offset + buf_size > bp->common.flash_size) {
++ DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
++ " buf_size (0x%x) > flash_size (0x%x)\n",
++ offset, buf_size, bp->common.flash_size);
++ return -EINVAL;
++ }
++
++ /* request access to nvram interface */
++ rc = bnx2x_acquire_nvram_lock(bp);
++ if (rc)
++ return rc;
++
++ /* enable access to nvram interface */
++ bnx2x_enable_nvram_access(bp);
++
++ written_so_far = 0;
++ cmd_flags = MCPR_NVM_COMMAND_FIRST;
++ while ((written_so_far < buf_size) && (rc == 0)) {
++ if (written_so_far == (buf_size - sizeof(u32)))
++ cmd_flags |= MCPR_NVM_COMMAND_LAST;
++ else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
++ cmd_flags |= MCPR_NVM_COMMAND_LAST;
++ else if ((offset % NVRAM_PAGE_SIZE) == 0)
++ cmd_flags |= MCPR_NVM_COMMAND_FIRST;
++
++ memcpy(&val, data_buf, 4);
++
++ rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
++
++ /* advance to the next dword */
++ offset += sizeof(u32);
++ data_buf += sizeof(u32);
++ written_so_far += sizeof(u32);
++ cmd_flags = 0;
++ }
++
++ /* disable access to nvram interface */
++ bnx2x_disable_nvram_access(bp);
++ bnx2x_release_nvram_lock(bp);
++
++ return rc;
++}
++
++static int bnx2x_set_eeprom(struct net_device *dev,
++ struct ethtool_eeprom *eeprom, u8 *eebuf)
++{
++ struct bnx2x *bp = netdev_priv(dev);
++ int rc;
++
++ if (!netif_running(dev))
++ return -EAGAIN;
++
++ DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
++ DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
++ eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
++ eeprom->len, eeprom->len);
++
++ /* parameters already validated in ethtool_set_eeprom */
++
++ /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
++ if (eeprom->magic == 0x00504859)
++ if (bp->port.pmf) {
++
++ bnx2x_acquire_phy_lock(bp);
++ rc = bnx2x_flash_download(bp, BP_PORT(bp),
++ bp->link_params.ext_phy_config,
++ (bp->state != BNX2X_STATE_CLOSED),
++ eebuf, eeprom->len);
++ if ((bp->state == BNX2X_STATE_OPEN) ||
++ (bp->state == BNX2X_STATE_DISABLED)) {
++ rc |= bnx2x_link_reset(&bp->link_params,
++ &bp->link_vars);
++ rc |= bnx2x_phy_init(&bp->link_params,
++ &bp->link_vars);
++ }
++ bnx2x_release_phy_lock(bp);
++
++ } else /* Only the PMF can access the PHY */
++ return -EINVAL;
++ else
++ rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
++
++ return rc;
++}
++
++static int bnx2x_get_coalesce(struct net_device *dev,
++ struct ethtool_coalesce *coal)
++{
++ struct bnx2x *bp = netdev_priv(dev);
++
++ memset(coal, 0, sizeof(struct ethtool_coalesce));
++
++ coal->rx_coalesce_usecs = bp->rx_ticks;
++ coal->tx_coalesce_usecs = bp->tx_ticks;
++
++ return 0;
++}
++
++static int bnx2x_set_coalesce(struct net_device *dev,
++ struct ethtool_coalesce *coal)
++{
++ struct bnx2x *bp = netdev_priv(dev);
++
++ bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
++ if (bp->rx_ticks > 3000)
++ bp->rx_ticks = 3000;
++
++ bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
++ if (bp->tx_ticks > 0x3000)
++ bp->tx_ticks = 0x3000;
++
++ if (netif_running(dev))
++ bnx2x_update_coalesce(bp);
++
++ return 0;
++}
++
++static void bnx2x_get_ringparam(struct net_device *dev,
++ struct ethtool_ringparam *ering)
++{
++ struct bnx2x *bp = netdev_priv(dev);
++
++ ering->rx_max_pending = MAX_RX_AVAIL;
++ ering->rx_mini_max_pending = 0;
++ ering->rx_jumbo_max_pending = 0;
++
++ ering->rx_pending = bp->rx_ring_size;
++ ering->rx_mini_pending = 0;
++ ering->rx_jumbo_pending = 0;
++
++ ering->tx_max_pending = MAX_TX_AVAIL;
++ ering->tx_pending = bp->tx_ring_size;
++}
++
++static int bnx2x_set_ringparam(struct net_device *dev,
++ struct ethtool_ringparam *ering)
++{
++ struct bnx2x *bp = netdev_priv(dev);
++ int rc = 0;
++
++ if ((ering->rx_pending > MAX_RX_AVAIL) ||
++ (ering->tx_pending > MAX_TX_AVAIL) ||
++ (ering->tx_pending <= MAX_SKB_FRAGS + 4))
++ return -EINVAL;
++
++ bp->rx_ring_size = ering->rx_pending;
++ bp->tx_ring_size = ering->tx_pending;
++
++ if (netif_running(dev)) {
++ bnx2x_nic_unload(bp, UNLOAD_NORMAL);
++ rc = bnx2x_nic_load(bp, LOAD_NORMAL);
++ }
++
++ return rc;
++}
++
++static void bnx2x_get_pauseparam(struct net_device *dev,
++ struct ethtool_pauseparam *epause)
++{
++ struct bnx2x *bp = netdev_priv(dev);
++
++ epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
++ (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
++
++ epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
++ BNX2X_FLOW_CTRL_RX);
++ epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
++ BNX2X_FLOW_CTRL_TX);
++
++ DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
++ DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
++ epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
++}
++
++static int bnx2x_set_pauseparam(struct net_device *dev,
++ struct ethtool_pauseparam *epause)
++{
++ struct bnx2x *bp = netdev_priv(dev);
++
++ if (IS_E1HMF(bp))
++ return 0;
++
++ DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
++ DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
++ epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
++
++ bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
++
++ if (epause->rx_pause)
++ bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
++
++ if (epause->tx_pause)
++ bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
++
++ if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
++ bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
++
++ if (epause->autoneg) {
++ if (!(bp->port.supported & SUPPORTED_Autoneg)) {
++ DP(NETIF_MSG_LINK, "autoneg not supported\n");
++ return -EINVAL;
++ }
++
++ if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
++ bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
++ }
++
++ DP(NETIF_MSG_LINK,
++ "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
++
++ if (netif_running(dev)) {
++ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
++ bnx2x_link_set(bp);
++ }
++
++ return 0;
++}
++
++static int bnx2x_set_flags(struct net_device *dev, u32 data)
++{
++ struct bnx2x *bp = netdev_priv(dev);
++ int changed = 0;
++ int rc = 0;
++
++ /* TPA requires Rx CSUM offloading */
++ if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
++ if (!(dev->features & NETIF_F_LRO)) {
++ dev->features |= NETIF_F_LRO;
++ bp->flags |= TPA_ENABLE_FLAG;
++ changed = 1;
++ }
++
++ } else if (dev->features & NETIF_F_LRO) {
++ dev->features &= ~NETIF_F_LRO;
++ bp->flags &= ~TPA_ENABLE_FLAG;
++ changed = 1;
++ }
++
++ if (changed && netif_running(dev)) {
++ bnx2x_nic_unload(bp, UNLOAD_NORMAL);
++ rc = bnx2x_nic_load(bp, LOAD_NORMAL);
++ }
++
++ return rc;
++}
++
++static u32 bnx2x_get_rx_csum(struct net_device *dev)
++{
++ struct bnx2x *bp = netdev_priv(dev);
++
++ return bp->rx_csum;
++}
++
++static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
++{
++ struct bnx2x *bp = netdev_priv(dev);
++ int rc = 0;
++
++ bp->rx_csum = data;
++
++ /* Disable TPA, when Rx CSUM is disabled. Otherwise all
++ TPA'ed packets will be discarded due to wrong TCP CSUM */
++ if (!data) {
++ u32 flags = ethtool_op_get_flags(dev);
++
++ rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
++ }
++
++ return rc;
++}
++
++static int bnx2x_set_tso(struct net_device *dev, u32 data)
++{
++ if (data) {
++ dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
++ dev->features |= NETIF_F_TSO6;
++ } else {
++ dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
++ dev->features &= ~NETIF_F_TSO6;
++ }
++
++ return 0;
++}
++
++static const struct {
++ char string[ETH_GSTRING_LEN];
++} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
++ { "register_test (offline)" },
++ { "memory_test (offline)" },
++ { "loopback_test (offline)" },
++ { "nvram_test (online)" },
++ { "interrupt_test (online)" },
++ { "link_test (online)" },
++ { "idle check (online)" },
++ { "MC errors (online)" }
++};
++
++static int bnx2x_self_test_count(struct net_device *dev)
++{
++ return BNX2X_NUM_TESTS;
++}
++
++static int bnx2x_test_registers(struct bnx2x *bp)
++{
++ int idx, i, rc = -ENODEV;
++ u32 wr_val = 0;
++ int port = BP_PORT(bp);
++ static const struct {
++ u32 offset0;
++ u32 offset1;
++ u32 mask;
++ } reg_tbl[] = {
++/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
++ { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
++ { HC_REG_AGG_INT_0, 4, 0x000003ff },
++ { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
++ { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
++ { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
++ { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
++ { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
++ { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
++ { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
++/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
++ { QM_REG_CONNNUM_0, 4, 0x000fffff },
++ { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
++ { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
++ { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
++ { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
++ { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
++ { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
++ { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
++ { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
++/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
++ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
++ { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
++ { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
++ { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
++ { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
++ { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
++ { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
++ { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
++ { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
++/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
++ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
++ { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
++ { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
++ { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
++ { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
++ { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
++ { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
++
++ { 0xffffffff, 0, 0x00000000 }
++ };
++
++ if (!netif_running(bp->dev))
++ return rc;
++
++ /* Repeat the test twice:
++ First by writing 0x00000000, second by writing 0xffffffff */
++ for (idx = 0; idx < 2; idx++) {
++
++ switch (idx) {
++ case 0:
++ wr_val = 0;
++ break;
++ case 1:
++ wr_val = 0xffffffff;
++ break;
++ }
++
++ for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
++ u32 offset, mask, save_val, val;
++
++ offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
++ mask = reg_tbl[i].mask;
++
++ save_val = REG_RD(bp, offset);
++
++ REG_WR(bp, offset, wr_val);
++ val = REG_RD(bp, offset);
++
++ /* Restore the original register's value */
++ REG_WR(bp, offset, save_val);
++
++ /* verify that value is as expected value */
++ if ((val & mask) != (wr_val & mask))
++ goto test_reg_exit;
++ }
++ }
++
++ rc = 0;
++
++test_reg_exit:
++ return rc;
++}
++
++static int bnx2x_test_memory(struct bnx2x *bp)
++{
++ int i, j, rc = -ENODEV;
++ u32 val;
++ static const struct {
++ u32 offset;
++ int size;
++ } mem_tbl[] = {
++ { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
++ { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
++ { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
++ { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
++ { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
++ { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
++ { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
++
++ { 0xffffffff, 0 }
++ };
++ static const struct {
++ char *name;
++ u32 offset;
++ u32 e1_mask;
++ u32 e1h_mask;
++ } prty_tbl[] = {
++ { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
++ { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
++ { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
++ { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
++ { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
++ { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
++
++ { NULL, 0xffffffff, 0, 0 }
++ };
++
++ if (!netif_running(bp->dev))
++ return rc;
++
++ /* Go through all the memories */
++ for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
++ for (j = 0; j < mem_tbl[i].size; j++)
++ REG_RD(bp, mem_tbl[i].offset + j*4);
++
++ /* Check the parity status */
++ for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
++ val = REG_RD(bp, prty_tbl[i].offset);
++ if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
++ (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
++ DP(NETIF_MSG_HW,
++ "%s is 0x%x\n", prty_tbl[i].name, val);
++ goto test_mem_exit;
++ }
++ }
++
++ rc = 0;
++
++test_mem_exit:
++ return rc;
++}
++
++static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
++{
++ int cnt = 1000;
++
++ if (link_up)
++ while (bnx2x_link_test(bp) && cnt--)
++ msleep(10);
++}
++
++static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
++{
++ unsigned int pkt_size, num_pkts, i;
++ struct sk_buff *skb;
++ unsigned char *packet;
++ struct bnx2x_fastpath *fp = &bp->fp[0];
++ u16 tx_start_idx, tx_idx;
++ u16 rx_start_idx, rx_idx;
++ u16 pkt_prod;
++ struct sw_tx_bd *tx_buf;
++ struct eth_tx_bd *tx_bd;
++ dma_addr_t mapping;
++ union eth_rx_cqe *cqe;
++ u8 cqe_fp_flags;
++ struct sw_rx_bd *rx_buf;
++ u16 len;
++ int rc = -ENODEV;
++
++ if (loopback_mode == BNX2X_MAC_LOOPBACK) {
++ bp->link_params.loopback_mode = LOOPBACK_BMAC;
++ bnx2x_phy_init(&bp->link_params, &bp->link_vars);
++
++ } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
++ u16 cnt = 1000;
++ bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
++ bnx2x_phy_init(&bp->link_params, &bp->link_vars);
++ /* wait until link state is restored */
++ if (link_up)
++ while (cnt-- && bnx2x_test_link(&bp->link_params,
++ &bp->link_vars))
++ msleep(10);
++ } else
++ return -EINVAL;
++
++ pkt_size = 1514;
++ skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
++ if (!skb) {
++ rc = -ENOMEM;
++ goto test_loopback_exit;
++ }
++ packet = skb_put(skb, pkt_size);
++ memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
++ memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
++ for (i = ETH_HLEN; i < pkt_size; i++)
++ packet[i] = (unsigned char) (i & 0xff);
++
++ num_pkts = 0;
++ tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
++ rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
++
++ pkt_prod = fp->tx_pkt_prod++;
++ tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
++ tx_buf->first_bd = fp->tx_bd_prod;
++ tx_buf->skb = skb;
++
++ tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
++ mapping = pci_map_single(bp->pdev, skb->data,
++ skb_headlen(skb), PCI_DMA_TODEVICE);
++ tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
++ tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
++ tx_bd->nbd = cpu_to_le16(1);
++ tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
++ tx_bd->vlan = cpu_to_le16(pkt_prod);
++ tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
++ ETH_TX_BD_FLAGS_END_BD);
++ tx_bd->general_data = ((UNICAST_ADDRESS <<
++ ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
++
++ wmb();
++
++ fp->hw_tx_prods->bds_prod =
++ cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
++ mb(); /* FW restriction: must not reorder writing nbd and packets */
++ fp->hw_tx_prods->packets_prod =
++ cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
++ DOORBELL(bp, FP_IDX(fp), 0);
++
++ mmiowb();
++
++ num_pkts++;
++ fp->tx_bd_prod++;
++ bp->dev->trans_start = jiffies;
++
++ udelay(100);
++
++ tx_idx = le16_to_cpu(*fp->tx_cons_sb);
++ if (tx_idx != tx_start_idx + num_pkts)
++ goto test_loopback_exit;
++
++ rx_idx = le16_to_cpu(*fp->rx_cons_sb);
++ if (rx_idx != rx_start_idx + num_pkts)
++ goto test_loopback_exit;
++
++ cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
++ cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
++ if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
++ goto test_loopback_rx_exit;
++
++ len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
++ if (len != pkt_size)
++ goto test_loopback_rx_exit;
++
++ rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
++ skb = rx_buf->skb;
++ skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
++ for (i = ETH_HLEN; i < pkt_size; i++)
++ if (*(skb->data + i) != (unsigned char) (i & 0xff))
++ goto test_loopback_rx_exit;
++
++ rc = 0;
++
++test_loopback_rx_exit:
++ bp->dev->last_rx = jiffies;
++
++ fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
++ fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
++ fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
++ fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
++
++ /* Update producers */
++ bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
++ fp->rx_sge_prod);
++
++test_loopback_exit:
++ bp->link_params.loopback_mode = LOOPBACK_NONE;
++
++ return rc;
++}
++
++static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
++{
++ int rc = 0;
++
++ if (!netif_running(bp->dev))
++ return BNX2X_LOOPBACK_FAILED;
++
++ bnx2x_netif_stop(bp, 1);
++ bnx2x_acquire_phy_lock(bp);
++
++ if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
++ DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
++ rc |= BNX2X_MAC_LOOPBACK_FAILED;
++ }
++
++ if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
++ DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
++ rc |= BNX2X_PHY_LOOPBACK_FAILED;
++ }
++
++ bnx2x_release_phy_lock(bp);
++ bnx2x_netif_start(bp);
++
++ return rc;
++}
++
++#define CRC32_RESIDUAL 0xdebb20e3
++
++static int bnx2x_test_nvram(struct bnx2x *bp)
++{
++ static const struct {
++ int offset;
++ int size;
++ } nvram_tbl[] = {
++ { 0, 0x14 }, /* bootstrap */
++ { 0x14, 0xec }, /* dir */
++ { 0x100, 0x350 }, /* manuf_info */
++ { 0x450, 0xf0 }, /* feature_info */
++ { 0x640, 0x64 }, /* upgrade_key_info */
++ { 0x6a4, 0x64 },
++ { 0x708, 0x70 }, /* manuf_key_info */
++ { 0x778, 0x70 },
++ { 0, 0 }
++ };
++ u32 buf[0x350 / 4];
++ u8 *data = (u8 *)buf;
++ int i, rc;
++ u32 magic, csum;
++
++ rc = bnx2x_nvram_read(bp, 0, data, 4);
++ if (rc) {
++ DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
++ goto test_nvram_exit;
++ }
++
++ magic = be32_to_cpu(buf[0]);
++ if (magic != 0x669955aa) {
++ DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
++ rc = -ENODEV;
++ goto test_nvram_exit;
++ }
++
++ for (i = 0; nvram_tbl[i].size; i++) {
++
++ rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
++ nvram_tbl[i].size);
++ if (rc) {
++ DP(NETIF_MSG_PROBE,
++ "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
++ goto test_nvram_exit;
++ }
++
++ csum = ether_crc_le(nvram_tbl[i].size, data);
++ if (csum != CRC32_RESIDUAL) {
++ DP(NETIF_MSG_PROBE,
++ "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
++ rc = -ENODEV;
++ goto test_nvram_exit;
++ }
++ }
++
++test_nvram_exit:
++ return rc;
++}
++
++static int bnx2x_test_intr(struct bnx2x *bp)
++{
++ struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
++ int i, rc;
++
++ if (!netif_running(bp->dev))
++ return -ENODEV;
++
++ config->hdr.length = 0;
++ if (CHIP_IS_E1(bp))
++ config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
++ else
++ config->hdr.offset = BP_FUNC(bp);
++ config->hdr.client_id = BP_CL_ID(bp);
++ config->hdr.reserved1 = 0;
++
++ rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
++ U64_HI(bnx2x_sp_mapping(bp, mac_config)),
++ U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
++ if (rc == 0) {
++ bp->set_mac_pending++;
++ for (i = 0; i < 10; i++) {
++ if (!bp->set_mac_pending)
++ break;
++ msleep_interruptible(10);
++ }
++ if (i == 10)
++ rc = -ENODEV;
++ }
++
++ return rc;
++}
++
++static void bnx2x_self_test(struct net_device *dev,
++ struct ethtool_test *etest, u64 *buf)
++{
++ struct bnx2x *bp = netdev_priv(dev);
++
++ memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
++
++ if (!netif_running(dev))
++ return;
++
++ /* offline tests are not supported in MF mode */
++ if (IS_E1HMF(bp))
++ etest->flags &= ~ETH_TEST_FL_OFFLINE;
++
++ if (etest->flags & ETH_TEST_FL_OFFLINE) {
++ u8 link_up;
++
++ link_up = bp->link_vars.link_up;
++ bnx2x_nic_unload(bp, UNLOAD_NORMAL);
++ bnx2x_nic_load(bp, LOAD_DIAG);
++ /* wait until link state is restored */
++ bnx2x_wait_for_link(bp, link_up);
++
++ if (bnx2x_test_registers(bp) != 0) {
++ buf[0] = 1;
++ etest->flags |= ETH_TEST_FL_FAILED;
++ }
++ if (bnx2x_test_memory(bp) != 0) {
++ buf[1] = 1;
++ etest->flags |= ETH_TEST_FL_FAILED;
++ }
++ buf[2] = bnx2x_test_loopback(bp, link_up);
++ if (buf[2] != 0)
++ etest->flags |= ETH_TEST_FL_FAILED;
++
++ bnx2x_nic_unload(bp, UNLOAD_NORMAL);
++ bnx2x_nic_load(bp, LOAD_NORMAL);
++ /* wait until link state is restored */
++ bnx2x_wait_for_link(bp, link_up);
++ }
++ if (bnx2x_test_nvram(bp) != 0) {
++ buf[3] = 1;
++ etest->flags |= ETH_TEST_FL_FAILED;
++ }
++ if (bnx2x_test_intr(bp) != 0) {
++ buf[4] = 1;
++ etest->flags |= ETH_TEST_FL_FAILED;
++ }
++ if (bp->port.pmf)
++ if (bnx2x_link_test(bp) != 0) {
++ buf[5] = 1;
++ etest->flags |= ETH_TEST_FL_FAILED;
++ }
++ buf[7] = bnx2x_mc_assert(bp);
++ if (buf[7] != 0)
++ etest->flags |= ETH_TEST_FL_FAILED;
++
++#ifdef BNX2X_EXTRA_DEBUG
++ bnx2x_panic_dump(bp);
++#endif
++}
++
++static const struct {
++ long offset;
++ int size;
++ u32 flags;
++#define STATS_FLAGS_PORT 1
++#define STATS_FLAGS_FUNC 2
++ u8 string[ETH_GSTRING_LEN];
++} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
++/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
++ 8, STATS_FLAGS_FUNC, "rx_bytes" },
++ { STATS_OFFSET32(error_bytes_received_hi),
++ 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
++ { STATS_OFFSET32(total_bytes_transmitted_hi),
++ 8, STATS_FLAGS_FUNC, "tx_bytes" },
++ { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
++ 8, STATS_FLAGS_PORT, "tx_error_bytes" },
++ { STATS_OFFSET32(total_unicast_packets_received_hi),
++ 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
++ { STATS_OFFSET32(total_multicast_packets_received_hi),
++ 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
++ { STATS_OFFSET32(total_broadcast_packets_received_hi),
++ 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
++ { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
++ 8, STATS_FLAGS_FUNC, "tx_packets" },
++ { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
++ 8, STATS_FLAGS_PORT, "tx_mac_errors" },
++/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
++ 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
++ { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
++ 8, STATS_FLAGS_PORT, "rx_crc_errors" },
++ { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
++ 8, STATS_FLAGS_PORT, "rx_align_errors" },
++ { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
++ 8, STATS_FLAGS_PORT, "tx_single_collisions" },
++ { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
++ 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
++ { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
++ 8, STATS_FLAGS_PORT, "tx_deferred" },
++ { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
++ 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
++ { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
++ 8, STATS_FLAGS_PORT, "tx_late_collisions" },
++ { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
++ 8, STATS_FLAGS_PORT, "tx_total_collisions" },
++ { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
++ 8, STATS_FLAGS_PORT, "rx_fragments" },
++/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
++ 8, STATS_FLAGS_PORT, "rx_jabbers" },
++ { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
++ 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
++ { STATS_OFFSET32(jabber_packets_received),
++ 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
++ { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
++ 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
++ { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
++ 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
++ { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
++ 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
++ { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
++ 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
++ { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
++ 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
++ { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
++ 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
++ { STATS_OFFSET32(etherstatspktsover1522octets_hi),
++ 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
++/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
++ 8, STATS_FLAGS_PORT, "rx_xon_frames" },
++ { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
++ 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
++ { STATS_OFFSET32(tx_stat_outxonsent_hi),
++ 8, STATS_FLAGS_PORT, "tx_xon_frames" },
++ { STATS_OFFSET32(tx_stat_outxoffsent_hi),
++ 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
++ { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
++ 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
++ { STATS_OFFSET32(mac_filter_discard),
++ 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
++ { STATS_OFFSET32(no_buff_discard),
++ 4, STATS_FLAGS_FUNC, "rx_discards" },
++ { STATS_OFFSET32(xxoverflow_discard),
++ 4, STATS_FLAGS_PORT, "rx_fw_discards" },
++ { STATS_OFFSET32(brb_drop_hi),
++ 8, STATS_FLAGS_PORT, "brb_discard" },
++ { STATS_OFFSET32(brb_truncate_hi),
++ 8, STATS_FLAGS_PORT, "brb_truncate" },
++/* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
++ 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
++ { STATS_OFFSET32(rx_skb_alloc_failed),
++ 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
++/* 42 */{ STATS_OFFSET32(hw_csum_err),
++ 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
++};
++
++#define IS_NOT_E1HMF_STAT(bp, i) \
++ (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
++
++static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
++{
++ struct bnx2x *bp = netdev_priv(dev);
++ int i, j;
++
++ switch (stringset) {
++ case ETH_SS_STATS:
++ for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
++ if (IS_NOT_E1HMF_STAT(bp, i))
++ continue;
++ strcpy(buf + j*ETH_GSTRING_LEN,
++ bnx2x_stats_arr[i].string);
++ j++;
++ }
++ break;
++
++ case ETH_SS_TEST:
++ memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
++ break;
++ }
++}
++
++static int bnx2x_get_stats_count(struct net_device *dev)
++{
++ struct bnx2x *bp = netdev_priv(dev);
++ int i, num_stats = 0;
++
++ for (i = 0; i < BNX2X_NUM_STATS; i++) {
++ if (IS_NOT_E1HMF_STAT(bp, i))
++ continue;
++ num_stats++;
++ }
++ return num_stats;
++}
++
++static void bnx2x_get_ethtool_stats(struct net_device *dev,
++ struct ethtool_stats *stats, u64 *buf)
++{
++ struct bnx2x *bp = netdev_priv(dev);
++ u32 *hw_stats = (u32 *)&bp->eth_stats;
++ int i, j;
++
++ for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
++ if (IS_NOT_E1HMF_STAT(bp, i))
++ continue;
++
++ if (bnx2x_stats_arr[i].size == 0) {
++ /* skip this counter */
++ buf[j] = 0;
++ j++;
++ continue;
++ }
++ if (bnx2x_stats_arr[i].size == 4) {
++ /* 4-byte counter */
++ buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
++ j++;
++ continue;
++ }
++ /* 8-byte counter */
++ buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
++ *(hw_stats + bnx2x_stats_arr[i].offset + 1));
++ j++;
++ }
++}
++
++static int bnx2x_phys_id(struct net_device *dev, u32 data)
++{
++ struct bnx2x *bp = netdev_priv(dev);
++ int port = BP_PORT(bp);
++ int i;
++
++ if (!netif_running(dev))
++ return 0;
++
++ if (!bp->port.pmf)
++ return 0;
++
++ if (data == 0)
++ data = 2;
++
++ for (i = 0; i < (data * 2); i++) {
++ if ((i % 2) == 0)
++ bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
++ bp->link_params.hw_led_mode,
++ bp->link_params.chip_id);
++ else
++ bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
++ bp->link_params.hw_led_mode,
++ bp->link_params.chip_id);
++
++ msleep_interruptible(500);
++ if (signal_pending(current))
++ break;
++ }
++
++ if (bp->link_vars.link_up)
++ bnx2x_set_led(bp, port, LED_MODE_OPER,
++ bp->link_vars.line_speed,
++ bp->link_params.hw_led_mode,
++ bp->link_params.chip_id);
++
++ return 0;
++}
++
++static struct ethtool_ops bnx2x_ethtool_ops = {
++ .get_settings = bnx2x_get_settings,
++ .set_settings = bnx2x_set_settings,
++ .get_drvinfo = bnx2x_get_drvinfo,
++ .get_wol = bnx2x_get_wol,
++ .set_wol = bnx2x_set_wol,
++ .get_msglevel = bnx2x_get_msglevel,
++ .set_msglevel = bnx2x_set_msglevel,
++ .nway_reset = bnx2x_nway_reset,
++ .get_link = ethtool_op_get_link,
++ .get_eeprom_len = bnx2x_get_eeprom_len,
++ .get_eeprom = bnx2x_get_eeprom,
++ .set_eeprom = bnx2x_set_eeprom,
++ .get_coalesce = bnx2x_get_coalesce,
++ .set_coalesce = bnx2x_set_coalesce,
++ .get_ringparam = bnx2x_get_ringparam,
++ .set_ringparam = bnx2x_set_ringparam,
++ .get_pauseparam = bnx2x_get_pauseparam,
++ .set_pauseparam = bnx2x_set_pauseparam,
++ .get_rx_csum = bnx2x_get_rx_csum,
++ .set_rx_csum = bnx2x_set_rx_csum,
++ .get_tx_csum = ethtool_op_get_tx_csum,
++ .set_tx_csum = ethtool_op_set_tx_hw_csum,
++ .set_flags = bnx2x_set_flags,
++ .get_flags = ethtool_op_get_flags,
++ .get_sg = ethtool_op_get_sg,
++ .set_sg = ethtool_op_set_sg,
++ .get_tso = ethtool_op_get_tso,
++ .set_tso = bnx2x_set_tso,
++ .self_test_count = bnx2x_self_test_count,
++ .self_test = bnx2x_self_test,
++ .get_strings = bnx2x_get_strings,
++ .phys_id = bnx2x_phys_id,
++ .get_stats_count = bnx2x_get_stats_count,
++ .get_ethtool_stats = bnx2x_get_ethtool_stats,
++};
++
++/* end of ethtool_ops */
++
++/****************************************************************************
++* General service functions
++****************************************************************************/
++
++static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
++{
++ u16 pmcsr;
++
++ pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
++
++ switch (state) {
++ case PCI_D0:
++ pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
++ ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
++ PCI_PM_CTRL_PME_STATUS));
++
++ if (pmcsr & PCI_PM_CTRL_STATE_MASK)
++ /* delay required during transition out of D3hot */
++ msleep(20);
++ break;
++
++ case PCI_D3hot:
++ pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
++ pmcsr |= 3;
++
++ if (bp->wol)
++ pmcsr |= PCI_PM_CTRL_PME_ENABLE;
++
++ pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
++ pmcsr);
++
++ /* No more memory access after this point until
++ * device is brought back to D0.
++ */
++ break;
++
++ default:
++ return -EINVAL;
++ }
++ return 0;
++}
++
++static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
++{
++ u16 rx_cons_sb;
++
++ /* Tell compiler that status block fields can change */
++ barrier();
++ rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
++ if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
++ rx_cons_sb++;
++ return (fp->rx_comp_cons != rx_cons_sb);
++}
++
++/*
++ * net_device service functions
++ */
++
++static int bnx2x_poll(struct napi_struct *napi, int budget)
++{
++ struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
++ napi);
++ struct bnx2x *bp = fp->bp;
++ int work_done = 0;
++
++#ifdef BNX2X_STOP_ON_ERROR
++ if (unlikely(bp->panic))
++ goto poll_panic;
++#endif
++
++ prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
++ prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
++ prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
++
++ bnx2x_update_fpsb_idx(fp);
++
++ if (bnx2x_has_tx_work(fp))
++ bnx2x_tx_int(fp, budget);
++
++ if (bnx2x_has_rx_work(fp))
++ work_done = bnx2x_rx_int(fp, budget);
++ rmb(); /* BNX2X_HAS_WORK() reads the status block */
++
++ /* must not complete if we consumed full budget */
++ if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
++
++#ifdef BNX2X_STOP_ON_ERROR
++poll_panic:
++#endif
++ netif_rx_complete(bp->dev, napi);
++
++ bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
++ le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
++ bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
++ le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
++ }
++ return work_done;
++}
++
++
++/* we split the first BD into headers and data BDs
++ * to ease the pain of our fellow microcode engineers
++ * we use one mapping for both BDs
++ * So far this has only been observed to happen
++ * in Other Operating Systems(TM)
++ */
++static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
++ struct bnx2x_fastpath *fp,
++ struct eth_tx_bd **tx_bd, u16 hlen,
++ u16 bd_prod, int nbd)
++{
++ struct eth_tx_bd *h_tx_bd = *tx_bd;
++ struct eth_tx_bd *d_tx_bd;
++ dma_addr_t mapping;
++ int old_len = le16_to_cpu(h_tx_bd->nbytes);
++
++ /* first fix first BD */
++ h_tx_bd->nbd = cpu_to_le16(nbd);
++ h_tx_bd->nbytes = cpu_to_le16(hlen);
++
++ DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
++ "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
++ h_tx_bd->addr_lo, h_tx_bd->nbd);
++
++ /* now get a new data BD
++ * (after the pbd) and fill it */
++ bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
++ d_tx_bd = &fp->tx_desc_ring[bd_prod];
++
++ mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
++ le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
++
++ d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
++ d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
++ d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
++ d_tx_bd->vlan = 0;
++ /* this marks the BD as one that has no individual mapping
++ * the FW ignores this flag in a BD not marked start
++ */
++ d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
++ DP(NETIF_MSG_TX_QUEUED,
++ "TSO split data size is %d (%x:%x)\n",
++ d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
++
++ /* update tx_bd for marking the last BD flag */
++ *tx_bd = d_tx_bd;
++
++ return bd_prod;
++}
++
++static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
++{
++ if (fix > 0)
++ csum = (u16) ~csum_fold(csum_sub(csum,
++ csum_partial(t_header - fix, fix, 0)));
++
++ else if (fix < 0)
++ csum = (u16) ~csum_fold(csum_add(csum,
++ csum_partial(t_header, -fix, 0)));
++
++ return swab16(csum);
++}
++
++static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
++{
++ u32 rc;
++
++ if (skb->ip_summed != CHECKSUM_PARTIAL)
++ rc = XMIT_PLAIN;
++
++ else {
++ if (skb->protocol == ntohs(ETH_P_IPV6)) {
++ rc = XMIT_CSUM_V6;
++ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
++ rc |= XMIT_CSUM_TCP;
++
++ } else {
++ rc = XMIT_CSUM_V4;
++ if (ip_hdr(skb)->protocol == IPPROTO_TCP)
++ rc |= XMIT_CSUM_TCP;
++ }
++ }
++
++ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
++ rc |= XMIT_GSO_V4;
++
++ else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
++ rc |= XMIT_GSO_V6;
++
++ return rc;
++}
++
++#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
++/* check if packet requires linearization (packet is too fragmented) */
++static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
++ u32 xmit_type)
++{
++ int to_copy = 0;
++ int hlen = 0;
++ int first_bd_sz = 0;
++
++ /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
++ if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
++
++ if (xmit_type & XMIT_GSO) {
++ unsigned short lso_mss = skb_shinfo(skb)->gso_size;
++ /* Check if LSO packet needs to be copied:
++ 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
++ int wnd_size = MAX_FETCH_BD - 3;
++ /* Number of windows to check */
++ int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
++ int wnd_idx = 0;
++ int frag_idx = 0;
++ u32 wnd_sum = 0;
++
++ /* Headers length */
++ hlen = (int)(skb_transport_header(skb) - skb->data) +
++ tcp_hdrlen(skb);
++
++ /* Amount of data (w/o headers) on linear part of SKB*/
++ first_bd_sz = skb_headlen(skb) - hlen;
++
++ wnd_sum = first_bd_sz;
++
++ /* Calculate the first sum - it's special */
++ for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
++ wnd_sum +=
++ skb_shinfo(skb)->frags[frag_idx].size;
++
++ /* If there was data on linear skb data - check it */
++ if (first_bd_sz > 0) {
++ if (unlikely(wnd_sum < lso_mss)) {
++ to_copy = 1;
++ goto exit_lbl;
++ }
++
++ wnd_sum -= first_bd_sz;
++ }
++
++ /* Others are easier: run through the frag list and
++ check all windows */
++ for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
++ wnd_sum +=
++ skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
++
++ if (unlikely(wnd_sum < lso_mss)) {
++ to_copy = 1;
++ break;
++ }
++ wnd_sum -=
++ skb_shinfo(skb)->frags[wnd_idx].size;
++ }
++
++ } else {
++ /* in non-LSO too fragmented packet should always
++ be linearized */
++ to_copy = 1;
++ }
++ }
++
++exit_lbl:
++ if (unlikely(to_copy))
++ DP(NETIF_MSG_TX_QUEUED,
++ "Linearization IS REQUIRED for %s packet. "
++ "num_frags %d hlen %d first_bd_sz %d\n",
++ (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
++ skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
++
++ return to_copy;
++}
++#endif
++
++/* called with netif_tx_lock
++ * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
++ * netif_wake_queue()
++ */
++static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++ struct bnx2x *bp = netdev_priv(dev);
++ struct bnx2x_fastpath *fp;
++ struct sw_tx_bd *tx_buf;
++ struct eth_tx_bd *tx_bd;
++ struct eth_tx_parse_bd *pbd = NULL;
++ u16 pkt_prod, bd_prod;
++ int nbd, fp_index;
++ dma_addr_t mapping;
++ u32 xmit_type = bnx2x_xmit_type(bp, skb);
++ int vlan_off = (bp->e1hov ? 4 : 0);
++ int i;
++ u8 hlen = 0;
++
++#ifdef BNX2X_STOP_ON_ERROR
++ if (unlikely(bp->panic))
++ return NETDEV_TX_BUSY;
++#endif
++
++ fp_index = (smp_processor_id() % bp->num_queues);
++ fp = &bp->fp[fp_index];
++
++ if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
++ bp->eth_stats.driver_xoff++,
++ netif_stop_queue(dev);
++ BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
++ return NETDEV_TX_BUSY;
++ }
++
++ DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
++ " gso type %x xmit_type %x\n",
++ skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
++ ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
++
++#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
++ /* First, check if we need to linearize the skb
++ (due to FW restrictions) */
++ if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
++ /* Statistics of linearization */
++ bp->lin_cnt++;
++ if (skb_linearize(skb) != 0) {
++ DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
++ "silently dropping this SKB\n");
++ dev_kfree_skb_any(skb);
++ return NETDEV_TX_OK;
++ }
++ }
++#endif
++
++ /*
++ Please read carefully. First we use one BD which we mark as start,
++ then for TSO or xsum we have a parsing info BD,
++ and only then we have the rest of the TSO BDs.
++ (don't forget to mark the last one as last,
++ and to unmap only AFTER you write to the BD ...)
++ And above all, all pdb sizes are in words - NOT DWORDS!
++ */
++
++ pkt_prod = fp->tx_pkt_prod++;
++ bd_prod = TX_BD(fp->tx_bd_prod);
++
++ /* get a tx_buf and first BD */
++ tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
++ tx_bd = &fp->tx_desc_ring[bd_prod];
++
++ tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
++ tx_bd->general_data = (UNICAST_ADDRESS <<
++ ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
++ /* header nbd */
++ tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
++
++ /* remember the first BD of the packet */
++ tx_buf->first_bd = fp->tx_bd_prod;
++ tx_buf->skb = skb;
++
++ DP(NETIF_MSG_TX_QUEUED,
++ "sending pkt %u @%p next_idx %u bd %u @%p\n",
++ pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
++
++#ifdef BCM_VLAN
++ if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
++ (bp->flags & HW_VLAN_TX_FLAG)) {
++ tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
++ tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
++ vlan_off += 4;
++ } else
++#endif
++ tx_bd->vlan = cpu_to_le16(pkt_prod);
++
++ if (xmit_type) {
++ /* turn on parsing and get a BD */
++ bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
++ pbd = (void *)&fp->tx_desc_ring[bd_prod];
++
++ memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
++ }
++
++ if (xmit_type & XMIT_CSUM) {
++ hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
++
++ /* for now NS flag is not used in Linux */
++ pbd->global_data = (hlen |
++ ((skb->protocol == ntohs(ETH_P_8021Q)) <<
++ ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
++
++ pbd->ip_hlen = (skb_transport_header(skb) -
++ skb_network_header(skb)) / 2;
++
++ hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
++
++ pbd->total_hlen = cpu_to_le16(hlen);
++ hlen = hlen*2 - vlan_off;
++
++ tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
++
++ if (xmit_type & XMIT_CSUM_V4)
++ tx_bd->bd_flags.as_bitfield |=
++ ETH_TX_BD_FLAGS_IP_CSUM;
++ else
++ tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
++
++ if (xmit_type & XMIT_CSUM_TCP) {
++ pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
++
++ } else {
++ s8 fix = SKB_CS_OFF(skb); /* signed! */
++
++ pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
++ pbd->cs_offset = fix / 2;
++
++ DP(NETIF_MSG_TX_QUEUED,
++ "hlen %d offset %d fix %d csum before fix %x\n",
++ le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
++ SKB_CS(skb));
++
++ /* HW bug: fixup the CSUM */
++ pbd->tcp_pseudo_csum =
++ bnx2x_csum_fix(skb_transport_header(skb),
++ SKB_CS(skb), fix);
++
++ DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
++ pbd->tcp_pseudo_csum);
++ }
++ }
++
++ mapping = pci_map_single(bp->pdev, skb->data,
++ skb_headlen(skb), PCI_DMA_TODEVICE);
++
++ tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
++ tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
++ nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
++ tx_bd->nbd = cpu_to_le16(nbd);
++ tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
++
++ DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
++ " nbytes %d flags %x vlan %x\n",
++ tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
++ le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
++ le16_to_cpu(tx_bd->vlan));
++
++ if (xmit_type & XMIT_GSO) {
++
++ DP(NETIF_MSG_TX_QUEUED,
++ "TSO packet len %d hlen %d total len %d tso size %d\n",
++ skb->len, hlen, skb_headlen(skb),
++ skb_shinfo(skb)->gso_size);
++
++ tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
++
++ if (unlikely(skb_headlen(skb) > hlen))
++ bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
++ bd_prod, ++nbd);
++
++ pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
++ pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
++ pbd->tcp_flags = pbd_tcp_flags(skb);
++
++ if (xmit_type & XMIT_GSO_V4) {
++ pbd->ip_id = swab16(ip_hdr(skb)->id);
++ pbd->tcp_pseudo_csum =
++ swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
++ ip_hdr(skb)->daddr,
++ 0, IPPROTO_TCP, 0));
++
++ } else
++ pbd->tcp_pseudo_csum =
++ swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
++ &ipv6_hdr(skb)->daddr,
++ 0, IPPROTO_TCP, 0));
++
++ pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
++ }
++
++ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
++ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
++
++ bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
++ tx_bd = &fp->tx_desc_ring[bd_prod];
++
++ mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
++ frag->size, PCI_DMA_TODEVICE);
++
++ tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
++ tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
++ tx_bd->nbytes = cpu_to_le16(frag->size);
++ tx_bd->vlan = cpu_to_le16(pkt_prod);
++ tx_bd->bd_flags.as_bitfield = 0;
++
++ DP(NETIF_MSG_TX_QUEUED,
++ "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
++ i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
++ le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
++ }
++
++ /* now at last mark the BD as the last BD */
++ tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
++
++ DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
++ tx_bd, tx_bd->bd_flags.as_bitfield);
++
++ bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
++
++ /* now send a tx doorbell, counting the next BD
++ * if the packet contains or ends with it
++ */
++ if (TX_BD_POFF(bd_prod) < nbd)
++ nbd++;
++
++ if (pbd)
++ DP(NETIF_MSG_TX_QUEUED,
++ "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
++ " tcp_flags %x xsum %x seq %u hlen %u\n",
++ pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
++ pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
++ pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
++
++ DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
++
++ /*
++ * Make sure that the BD data is updated before updating the producer
++ * since FW might read the BD right after the producer is updated.
++ * This is only applicable for weak-ordered memory model archs such
++ * as IA-64. The following barrier is also mandatory since FW will
++ * assumes packets must have BDs.
++ */
++ wmb();
++
++ fp->hw_tx_prods->bds_prod =
++ cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
++ mb(); /* FW restriction: must not reorder writing nbd and packets */
++ fp->hw_tx_prods->packets_prod =
++ cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
++ DOORBELL(bp, FP_IDX(fp), 0);
++
++ mmiowb();
++
++ fp->tx_bd_prod += nbd;
++ dev->trans_start = jiffies;
++
++ if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
++ /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
++ if we put Tx into XOFF state. */
++ smp_mb();
++ netif_stop_queue(dev);
++ bp->eth_stats.driver_xoff++;
++ if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
++ netif_wake_queue(dev);
++ }
++ fp->tx_pkt++;
++
++ return NETDEV_TX_OK;
++}
++
++/* called with rtnl_lock */
++static int bnx2x_open(struct net_device *dev)
++{
++ struct bnx2x *bp = netdev_priv(dev);
++
++ netif_carrier_off(dev);
++
++ bnx2x_set_power_state(bp, PCI_D0);
++
++ return bnx2x_nic_load(bp, LOAD_OPEN);
++}
++
++/* called with rtnl_lock */
++static int bnx2x_close(struct net_device *dev)
++{
++ struct bnx2x *bp = netdev_priv(dev);
++
++ /* Unload the driver, release IRQs */
++ bnx2x_nic_unload(bp, UNLOAD_CLOSE);
++ if (atomic_read(&bp->pdev->enable_cnt) == 1)
++ if (!CHIP_REV_IS_SLOW(bp))
++ bnx2x_set_power_state(bp, PCI_D3hot);
++
++ return 0;
++}
++
++/* called with netif_tx_lock from set_multicast */
++static void bnx2x_set_rx_mode(struct net_device *dev)
++{
++ struct bnx2x *bp = netdev_priv(dev);
++ u32 rx_mode = BNX2X_RX_MODE_NORMAL;
++ int port = BP_PORT(bp);
++
++ if (bp->state != BNX2X_STATE_OPEN) {
++ DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
++ return;
++ }
++
++ DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
++
++ if (dev->flags & IFF_PROMISC)
++ rx_mode = BNX2X_RX_MODE_PROMISC;
++
++ else if ((dev->flags & IFF_ALLMULTI) ||
++ ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
++ rx_mode = BNX2X_RX_MODE_ALLMULTI;
++
++ else { /* some multicasts */
++ if (CHIP_IS_E1(bp)) {
++ int i, old, offset;
++ struct dev_mc_list *mclist;
++ struct mac_configuration_cmd *config =
++ bnx2x_sp(bp, mcast_config);
++
++ for (i = 0, mclist = dev->mc_list;
++ mclist && (i < dev->mc_count);
++ i++, mclist = mclist->next) {
++
++ config->config_table[i].
++ cam_entry.msb_mac_addr =
++ swab16(*(u16 *)&mclist->dmi_addr[0]);
++ config->config_table[i].
++ cam_entry.middle_mac_addr =
++ swab16(*(u16 *)&mclist->dmi_addr[2]);
++ config->config_table[i].
++ cam_entry.lsb_mac_addr =
++ swab16(*(u16 *)&mclist->dmi_addr[4]);
++ config->config_table[i].cam_entry.flags =
++ cpu_to_le16(port);
++ config->config_table[i].
++ target_table_entry.flags = 0;
++ config->config_table[i].
++ target_table_entry.client_id = 0;
++ config->config_table[i].
++ target_table_entry.vlan_id = 0;
++
++ DP(NETIF_MSG_IFUP,
++ "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
++ config->config_table[i].
++ cam_entry.msb_mac_addr,
++ config->config_table[i].
++ cam_entry.middle_mac_addr,
++ config->config_table[i].
++ cam_entry.lsb_mac_addr);
++ }
++ old = config->hdr.length;
++ if (old > i) {
++ for (; i < old; i++) {
++ if (CAM_IS_INVALID(config->
++ config_table[i])) {
++ /* already invalidated */
++ break;
++ }
++ /* invalidate */
++ CAM_INVALIDATE(config->
++ config_table[i]);
++ }
++ }
++
++ if (CHIP_REV_IS_SLOW(bp))
++ offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
++ else
++ offset = BNX2X_MAX_MULTICAST*(1 + port);
++
++ config->hdr.length = i;
++ config->hdr.offset = offset;
++ config->hdr.client_id = bp->fp->cl_id;
++ config->hdr.reserved1 = 0;
++
++ bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
++ U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
++ U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
++ 0);
++ } else { /* E1H */
++ /* Accept one or more multicasts */
++ struct dev_mc_list *mclist;
++ u32 mc_filter[MC_HASH_SIZE];
++ u32 crc, bit, regidx;
++ int i;
++
++ memset(mc_filter, 0, 4 * MC_HASH_SIZE);
++
++ for (i = 0, mclist = dev->mc_list;
++ mclist && (i < dev->mc_count);
++ i++, mclist = mclist->next) {
++
++ DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
++ mclist->dmi_addr);
++
++ crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
++ bit = (crc >> 24) & 0xff;
++ regidx = bit >> 5;
++ bit &= 0x1f;
++ mc_filter[regidx] |= (1 << bit);
++ }
++
++ for (i = 0; i < MC_HASH_SIZE; i++)
++ REG_WR(bp, MC_HASH_OFFSET(bp, i),
++ mc_filter[i]);
++ }
++ }
++
++ bp->rx_mode = rx_mode;
++ bnx2x_set_storm_rx_mode(bp);
++}
++
++/* called with rtnl_lock */
++static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
++{
++ struct sockaddr *addr = p;
++ struct bnx2x *bp = netdev_priv(dev);
++
++ if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
++ return -EINVAL;
++
++ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
++ if (netif_running(dev)) {
++ if (CHIP_IS_E1(bp))
++ bnx2x_set_mac_addr_e1(bp, 1);
++ else
++ bnx2x_set_mac_addr_e1h(bp, 1);
++ }
++
++ return 0;
++}
++
++/* called with rtnl_lock */
++static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
++{
++ struct mii_ioctl_data *data = if_mii(ifr);
++ struct bnx2x *bp = netdev_priv(dev);
++ int port = BP_PORT(bp);
++ int err;
++
++ switch (cmd) {
++ case SIOCGMIIPHY:
++ data->phy_id = bp->port.phy_addr;
++
++ /* fallthrough */
++
++ case SIOCGMIIREG: {
++ u16 mii_regval;
++
++ if (!netif_running(dev))
++ return -EAGAIN;
++
++ mutex_lock(&bp->port.phy_mutex);
++ err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
++ DEFAULT_PHY_DEV_ADDR,
++ (data->reg_num & 0x1f), &mii_regval);
++ data->val_out = mii_regval;
++ mutex_unlock(&bp->port.phy_mutex);
++ return err;
++ }
++
++ case SIOCSMIIREG:
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++
++ if (!netif_running(dev))
++ return -EAGAIN;
++
++ mutex_lock(&bp->port.phy_mutex);
++ err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
++ DEFAULT_PHY_DEV_ADDR,
++ (data->reg_num & 0x1f), data->val_in);
++ mutex_unlock(&bp->port.phy_mutex);
++ return err;
++
++ default:
++ /* do nothing */
++ break;
++ }
++
++ return -EOPNOTSUPP;
++}
++
++/* called with rtnl_lock */
++static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
++{
++ struct bnx2x *bp = netdev_priv(dev);
++ int rc = 0;
++
++ if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
++ ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
++ return -EINVAL;
++
++ /* This does not race with packet allocation
++ * because the actual alloc size is
++ * only updated as part of load
++ */
++ dev->mtu = new_mtu;
++
++ if (netif_running(dev)) {
++ bnx2x_nic_unload(bp, UNLOAD_NORMAL);
++ rc = bnx2x_nic_load(bp, LOAD_NORMAL);
++ }
++
++ return rc;
++}
++
++static void bnx2x_tx_timeout(struct net_device *dev)
++{
++ struct bnx2x *bp = netdev_priv(dev);
++
++#ifdef BNX2X_STOP_ON_ERROR
++ if (!bp->panic)
++ bnx2x_panic();
++#endif
++ /* This allows the netif to be shutdown gracefully before resetting */
++ schedule_work(&bp->reset_task);
++}
++
++#ifdef BCM_VLAN
++/* called with rtnl_lock */
++static void bnx2x_vlan_rx_register(struct net_device *dev,
++ struct vlan_group *vlgrp)
++{
++ struct bnx2x *bp = netdev_priv(dev);
++
++ bp->vlgrp = vlgrp;
++
++ /* Set flags according to the required capabilities */
++ bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
++
++ if (dev->features & NETIF_F_HW_VLAN_TX)
++ bp->flags |= HW_VLAN_TX_FLAG;
++
++ if (dev->features & NETIF_F_HW_VLAN_RX)
++ bp->flags |= HW_VLAN_RX_FLAG;
++
++ if (netif_running(dev))
++ bnx2x_set_client_config(bp);
++}
++
++#endif
++
++#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
++static void poll_bnx2x(struct net_device *dev)
++{
++ struct bnx2x *bp = netdev_priv(dev);
++
++ disable_irq(bp->pdev->irq);
++ bnx2x_interrupt(bp->pdev->irq, dev);
++ enable_irq(bp->pdev->irq);
++}
++#endif
++
++static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
++ struct net_device *dev)
++{
++ struct bnx2x *bp;
++ int rc;
++
++ SET_NETDEV_DEV(dev, &pdev->dev);
++ bp = netdev_priv(dev);
++
++ bp->dev = dev;
++ bp->pdev = pdev;
++ bp->flags = 0;
++ bp->func = PCI_FUNC(pdev->devfn);
++
++ rc = pci_enable_device(pdev);
++ if (rc) {
++ printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
++ goto err_out;
++ }
++
++ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
++ printk(KERN_ERR PFX "Cannot find PCI device base address,"
++ " aborting\n");
++ rc = -ENODEV;
++ goto err_out_disable;
++ }
++
++ if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
++ printk(KERN_ERR PFX "Cannot find second PCI device"
++ " base address, aborting\n");
++ rc = -ENODEV;
++ goto err_out_disable;
++ }
++
++ if (atomic_read(&pdev->enable_cnt) == 1) {
++ rc = pci_request_regions(pdev, DRV_MODULE_NAME);
++ if (rc) {
++ printk(KERN_ERR PFX "Cannot obtain PCI resources,"
++ " aborting\n");
++ goto err_out_disable;
++ }
++
++ pci_set_master(pdev);
++ pci_save_state(pdev);
++ }
++
++ bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
++ if (bp->pm_cap == 0) {
++ printk(KERN_ERR PFX "Cannot find power management"
++ " capability, aborting\n");
++ rc = -EIO;
++ goto err_out_release;
++ }
++
++ bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
++ if (bp->pcie_cap == 0) {
++ printk(KERN_ERR PFX "Cannot find PCI Express capability,"
++ " aborting\n");
++ rc = -EIO;
++ goto err_out_release;
++ }
++
++ if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
++ bp->flags |= USING_DAC_FLAG;
++ if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
++ printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
++ " failed, aborting\n");
++ rc = -EIO;
++ goto err_out_release;
++ }
++
++ } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
++ printk(KERN_ERR PFX "System does not support DMA,"
++ " aborting\n");
++ rc = -EIO;
++ goto err_out_release;
++ }
++
++ dev->mem_start = pci_resource_start(pdev, 0);
++ dev->base_addr = dev->mem_start;
++ dev->mem_end = pci_resource_end(pdev, 0);
++
++ dev->irq = pdev->irq;
++
++ bp->regview = ioremap_nocache(dev->base_addr,
++ pci_resource_len(pdev, 0));
++ if (!bp->regview) {
++ printk(KERN_ERR PFX "Cannot map register space, aborting\n");
++ rc = -ENOMEM;
++ goto err_out_release;
++ }
++
++ bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
++ min_t(u64, BNX2X_DB_SIZE,
++ pci_resource_len(pdev, 2)));
++ if (!bp->doorbells) {
++ printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
++ rc = -ENOMEM;
++ goto err_out_unmap;
++ }
++
++ bnx2x_set_power_state(bp, PCI_D0);
++
++ /* clean indirect addresses */
++ pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
++ PCICFG_VENDOR_ID_OFFSET);
++ REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
++ REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
++ REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
++ REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
++
++ dev->hard_start_xmit = bnx2x_start_xmit;
++ dev->watchdog_timeo = TX_TIMEOUT;
++
++ dev->ethtool_ops = &bnx2x_ethtool_ops;
++ dev->open = bnx2x_open;
++ dev->stop = bnx2x_close;
++ dev->set_multicast_list = bnx2x_set_rx_mode;
++ dev->set_mac_address = bnx2x_change_mac_addr;
++ dev->do_ioctl = bnx2x_ioctl;
++ dev->change_mtu = bnx2x_change_mtu;
++ dev->tx_timeout = bnx2x_tx_timeout;
++#ifdef BCM_VLAN
++ dev->vlan_rx_register = bnx2x_vlan_rx_register;
++#endif
++#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
++ dev->poll_controller = poll_bnx2x;
++#endif
++ dev->features |= NETIF_F_SG;
++ dev->features |= NETIF_F_HW_CSUM;
++ if (bp->flags & USING_DAC_FLAG)
++ dev->features |= NETIF_F_HIGHDMA;
++#ifdef BCM_VLAN
++ dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
++ bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
++#endif
++ dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
++ dev->features |= NETIF_F_TSO6;
++
++ return 0;
++
++err_out_unmap:
++ if (bp->regview) {
++ iounmap(bp->regview);
++ bp->regview = NULL;
++ }
++ if (bp->doorbells) {
++ iounmap(bp->doorbells);
++ bp->doorbells = NULL;
++ }
++
++err_out_release:
++ if (atomic_read(&pdev->enable_cnt) == 1)
++ pci_release_regions(pdev);
++
++err_out_disable:
++ pci_disable_device(pdev);
++ pci_set_drvdata(pdev, NULL);
++
++err_out:
++ return rc;
++}
++
++static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
++{
++ u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
++
++ val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
++ return val;
++}
++
++/* return value of 1=2.5GHz 2=5GHz */
++static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
++{
++ u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
++
++ val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
++ return val;
++}
++
++static int __devinit bnx2x_init_one(struct pci_dev *pdev,
++ const struct pci_device_id *ent)
++{
++ static int version_printed;
++ struct net_device *dev = NULL;
++ struct bnx2x *bp;
++ int rc;
++
++ if (version_printed++ == 0)
++ printk(KERN_INFO "%s", version);
++
++ /* dev zeroed in init_etherdev */
++ dev = alloc_etherdev(sizeof(*bp));
++ if (!dev) {
++ printk(KERN_ERR PFX "Cannot allocate net device\n");
++ return -ENOMEM;
++ }
++
++ bp = netdev_priv(dev);
++ bp->msglevel = debug;
++
++ rc = bnx2x_init_dev(pdev, dev);
++ if (rc < 0) {
++ free_netdev(dev);
++ return rc;
++ }
++
++ pci_set_drvdata(pdev, dev);
++
++ rc = bnx2x_init_bp(bp);
++ if (rc)
++ goto init_one_exit;
++
++ rc = register_netdev(dev);
++ if (rc) {
++ dev_err(&pdev->dev, "Cannot register net device\n");
++ goto init_one_exit;
++ }
++
++ bp->common.name = board_info[ent->driver_data].name;
++ printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
++ " IRQ %d, ", dev->name, bp->common.name,
++ (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
++ bnx2x_get_pcie_width(bp),
++ (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
++ dev->base_addr, bp->pdev->irq);
++ printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
++ return 0;
++
++init_one_exit:
++ if (bp->regview)
++ iounmap(bp->regview);
++
++ if (bp->doorbells)
++ iounmap(bp->doorbells);
++
++ free_netdev(dev);
++
++ if (atomic_read(&pdev->enable_cnt) == 1)
++ pci_release_regions(pdev);
++
++ pci_disable_device(pdev);
++ pci_set_drvdata(pdev, NULL);
++
++ return rc;
++}
++
++static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
++{
++ struct net_device *dev = pci_get_drvdata(pdev);
++ struct bnx2x *bp;
++
++ if (!dev) {
++ printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
++ return;
++ }
++ bp = netdev_priv(dev);
++
++ unregister_netdev(dev);
++
++ if (bp->regview)
++ iounmap(bp->regview);
++
++ if (bp->doorbells)
++ iounmap(bp->doorbells);
++
++ free_netdev(dev);
++
++ if (atomic_read(&pdev->enable_cnt) == 1)
++ pci_release_regions(pdev);
++
++ pci_disable_device(pdev);
++ pci_set_drvdata(pdev, NULL);
++}
++
++static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
++{
++ struct net_device *dev = pci_get_drvdata(pdev);
++ struct bnx2x *bp;
++
++ if (!dev) {
++ printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
++ return -ENODEV;
++ }
++ bp = netdev_priv(dev);
++
++ rtnl_lock();
++
++ pci_save_state(pdev);
++
++ if (!netif_running(dev)) {
++ rtnl_unlock();
++ return 0;
++ }
++
++ netif_device_detach(dev);
++
++ bnx2x_nic_unload(bp, UNLOAD_CLOSE);
++
++ bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
++
++ rtnl_unlock();
++
++ return 0;
++}
++
++static int bnx2x_resume(struct pci_dev *pdev)
++{
++ struct net_device *dev = pci_get_drvdata(pdev);
++ struct bnx2x *bp;
++ int rc;
++
++ if (!dev) {
++ printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
++ return -ENODEV;
++ }
++ bp = netdev_priv(dev);
++
++ rtnl_lock();
++
++ pci_restore_state(pdev);
++
++ if (!netif_running(dev)) {
++ rtnl_unlock();
++ return 0;
++ }
++
++ bnx2x_set_power_state(bp, PCI_D0);
++ netif_device_attach(dev);
++
++ rc = bnx2x_nic_load(bp, LOAD_OPEN);
++
++ rtnl_unlock();
++
++ return rc;
++}
++
++static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
++{
++ int i;
++
++ bp->state = BNX2X_STATE_ERROR;
++
++ bp->rx_mode = BNX2X_RX_MODE_NONE;
++
++ bnx2x_netif_stop(bp, 0);
++
++ del_timer_sync(&bp->timer);
++ bp->stats_state = STATS_STATE_DISABLED;
++ DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
++
++ /* Release IRQs */
++ bnx2x_free_irq(bp);
++
++ if (CHIP_IS_E1(bp)) {
++ struct mac_configuration_cmd *config =
++ bnx2x_sp(bp, mcast_config);
++
++ for (i = 0; i < config->hdr.length; i++)
++ CAM_INVALIDATE(config->config_table[i]);
++ }
++
++ /* Free SKBs, SGEs, TPA pool and driver internals */
++ bnx2x_free_skbs(bp);
++ for_each_queue(bp, i)
++ bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
++ bnx2x_free_mem(bp);
++
++ bp->state = BNX2X_STATE_CLOSED;
++
++ netif_carrier_off(bp->dev);
++
++ return 0;
++}
++
++static void bnx2x_eeh_recover(struct bnx2x *bp)
++{
++ u32 val;
++
++ mutex_init(&bp->port.phy_mutex);
++
++ bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
++ bp->link_params.shmem_base = bp->common.shmem_base;
++ BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
++
++ if (!bp->common.shmem_base ||
++ (bp->common.shmem_base < 0xA0000) ||
++ (bp->common.shmem_base >= 0xC0000)) {
++ BNX2X_DEV_INFO("MCP not active\n");
++ bp->flags |= NO_MCP_FLAG;
++ return;
++ }
++
++ val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
++ if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
++ != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
++ BNX2X_ERR("BAD MCP validity signature\n");
++
++ if (!BP_NOMCP(bp)) {
++ bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
++ & DRV_MSG_SEQ_NUMBER_MASK);
++ BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
++ }
++}
++
++/**
++ * bnx2x_io_error_detected - called when PCI error is detected
++ * @pdev: Pointer to PCI device
++ * @state: The current pci connection state
++ *
++ * This function is called after a PCI bus error affecting
++ * this device has been detected.
++ */
++static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
++ pci_channel_state_t state)
++{
++ struct net_device *dev = pci_get_drvdata(pdev);
++ struct bnx2x *bp = netdev_priv(dev);
++
++ rtnl_lock();
++
++ netif_device_detach(dev);
++
++ if (netif_running(dev))
++ bnx2x_eeh_nic_unload(bp);
++
++ pci_disable_device(pdev);
++
++ rtnl_unlock();
++
++ /* Request a slot reset */
++ return PCI_ERS_RESULT_NEED_RESET;
++}
++
++/**
++ * bnx2x_io_slot_reset - called after the PCI bus has been reset
++ * @pdev: Pointer to PCI device
++ *
++ * Restart the card from scratch, as if from a cold-boot.
++ */
++static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
++{
++ struct net_device *dev = pci_get_drvdata(pdev);
++ struct bnx2x *bp = netdev_priv(dev);
++
++ rtnl_lock();
++
++ if (pci_enable_device(pdev)) {
++ dev_err(&pdev->dev,
++ "Cannot re-enable PCI device after reset\n");
++ rtnl_unlock();
++ return PCI_ERS_RESULT_DISCONNECT;
++ }
++
++ pci_set_master(pdev);
++ pci_restore_state(pdev);
++
++ if (netif_running(dev))
++ bnx2x_set_power_state(bp, PCI_D0);
++
++ rtnl_unlock();
++
++ return PCI_ERS_RESULT_RECOVERED;
++}
++
++/**
++ * bnx2x_io_resume - called when traffic can start flowing again
++ * @pdev: Pointer to PCI device
++ *
++ * This callback is called when the error recovery driver tells us that
++ * its OK to resume normal operation.
++ */
++static void bnx2x_io_resume(struct pci_dev *pdev)
++{
++ struct net_device *dev = pci_get_drvdata(pdev);
++ struct bnx2x *bp = netdev_priv(dev);
++
++ rtnl_lock();
++
++ bnx2x_eeh_recover(bp);
++
++ if (netif_running(dev))
++ bnx2x_nic_load(bp, LOAD_NORMAL);
++
++ netif_device_attach(dev);
++
++ rtnl_unlock();
++}
++
++static struct pci_error_handlers bnx2x_err_handler = {
++ .error_detected = bnx2x_io_error_detected,
++ .slot_reset = bnx2x_io_slot_reset,
++ .resume = bnx2x_io_resume,
++};
++
++static struct pci_driver bnx2x_pci_driver = {
++ .name = DRV_MODULE_NAME,
++ .id_table = bnx2x_pci_tbl,
++ .probe = bnx2x_init_one,
++ .remove = __devexit_p(bnx2x_remove_one),
++ .suspend = bnx2x_suspend,
++ .resume = bnx2x_resume,
++ .err_handler = &bnx2x_err_handler,
++};
++
++static int __init bnx2x_init(void)
++{
++ bnx2x_wq = create_singlethread_workqueue("bnx2x");
++ if (bnx2x_wq == NULL) {
++ printk(KERN_ERR PFX "Cannot create workqueue\n");
++ return -ENOMEM;
++ }
++
++ return pci_register_driver(&bnx2x_pci_driver);
++}
++
++static void __exit bnx2x_cleanup(void)
++{
++ pci_unregister_driver(&bnx2x_pci_driver);
++
++ destroy_workqueue(bnx2x_wq);
++}
++
++module_init(bnx2x_init);
++module_exit(bnx2x_cleanup);
++
+diff --git a/drivers/net/bnx2x_reg.h b/drivers/net/bnx2x_reg.h
+index 5a1aa0b..fc957fa 100644
+--- a/drivers/net/bnx2x_reg.h
++++ b/drivers/net/bnx2x_reg.h
+@@ -1,12 +1,12 @@
+ /* bnx2x_reg.h: Broadcom Everest network driver.
+ *
+- * Copyright (c) 2007-2008 Broadcom Corporation
++ * Copyright (c) 2007-2009 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+- * The registers description starts with the regsister Access type followed
++ * The registers description starts with the register Access type followed
+ * by size in bits. For example [RW 32]. The access types are:
+ * R - Read only
+ * RC - Clear on read
+@@ -38,21 +38,19 @@
+ was asserted. */
+ #define BRB1_REG_NUM_OF_FULL_CYCLES_0 0x600c8
+ #define BRB1_REG_NUM_OF_FULL_CYCLES_1 0x600cc
+-#define BRB1_REG_NUM_OF_FULL_CYCLES_2 0x600d0
+-#define BRB1_REG_NUM_OF_FULL_CYCLES_3 0x600d4
+ #define BRB1_REG_NUM_OF_FULL_CYCLES_4 0x600d8
+ /* [ST 32] The number of cycles that the pause signal towards MAC #0 was
+ asserted. */
+ #define BRB1_REG_NUM_OF_PAUSE_CYCLES_0 0x600b8
+ #define BRB1_REG_NUM_OF_PAUSE_CYCLES_1 0x600bc
+-#define BRB1_REG_NUM_OF_PAUSE_CYCLES_2 0x600c0
+-#define BRB1_REG_NUM_OF_PAUSE_CYCLES_3 0x600c4
+ /* [RW 10] Write client 0: De-assert pause threshold. */
+ #define BRB1_REG_PAUSE_HIGH_THRESHOLD_0 0x60078
+ #define BRB1_REG_PAUSE_HIGH_THRESHOLD_1 0x6007c
+ /* [RW 10] Write client 0: Assert pause threshold. */
+ #define BRB1_REG_PAUSE_LOW_THRESHOLD_0 0x60068
+ #define BRB1_REG_PAUSE_LOW_THRESHOLD_1 0x6006c
++/* [R 24] The number of full blocks occupied by port. */
++#define BRB1_REG_PORT_NUM_OCC_BLOCKS_0 0x60094
+ /* [RW 1] Reset the design by software. */
+ #define BRB1_REG_SOFT_RESET 0x600dc
+ /* [R 5] Used to read the value of the XX protection CAM occupancy counter. */
+@@ -72,6 +70,8 @@
+ #define CCM_REG_CCM_INT_MASK 0xd01e4
+ /* [R 11] Interrupt register #0 read */
+ #define CCM_REG_CCM_INT_STS 0xd01d8
++/* [R 27] Parity register #0 read */
++#define CCM_REG_CCM_PRTY_STS 0xd01e8
+ /* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
+ REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
+ Is used to determine the number of the AG context REG-pairs written back;
+@@ -125,6 +125,10 @@
+ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+ #define CCM_REG_CQM_P_WEIGHT 0xd00b8
++/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
++ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
++ prioritised); 2 stands for weight 2; tc. */
++#define CCM_REG_CQM_S_WEIGHT 0xd00bc
+ /* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+@@ -132,6 +136,10 @@
+ /* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the SDM interface is detected. */
+ #define CCM_REG_CSDM_LENGTH_MIS 0xd0170
++/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for
++ weight 8 (the most prioritised); 1 stands for weight 1(least
++ prioritised); 2 stands for weight 2; tc. */
++#define CCM_REG_CSDM_WEIGHT 0xd00b4
+ /* [RW 28] The CM header for QM formatting in case of an error in the QM
+ inputs. */
+ #define CCM_REG_ERR_CCM_HDR 0xd0094
+@@ -190,25 +198,20 @@
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+ #define CCM_REG_PBF_WEIGHT 0xd00ac
+-/* [RW 6] The physical queue number of queue number 1 per port index. */
+ #define CCM_REG_PHYS_QNUM1_0 0xd0134
+ #define CCM_REG_PHYS_QNUM1_1 0xd0138
+-/* [RW 6] The physical queue number of queue number 2 per port index. */
+ #define CCM_REG_PHYS_QNUM2_0 0xd013c
+ #define CCM_REG_PHYS_QNUM2_1 0xd0140
+-/* [RW 6] The physical queue number of queue number 3 per port index. */
+ #define CCM_REG_PHYS_QNUM3_0 0xd0144
+-/* [RW 6] The physical queue number of queue number 0 with QOS equal 0 port
+- index 0. */
++#define CCM_REG_PHYS_QNUM3_1 0xd0148
+ #define CCM_REG_QOS_PHYS_QNUM0_0 0xd0114
+ #define CCM_REG_QOS_PHYS_QNUM0_1 0xd0118
+-/* [RW 6] The physical queue number of queue number 0 with QOS equal 1 port
+- index 0. */
+ #define CCM_REG_QOS_PHYS_QNUM1_0 0xd011c
+ #define CCM_REG_QOS_PHYS_QNUM1_1 0xd0120
+-/* [RW 6] The physical queue number of queue number 0 with QOS equal 2 port
+- index 0. */
+ #define CCM_REG_QOS_PHYS_QNUM2_0 0xd0124
++#define CCM_REG_QOS_PHYS_QNUM2_1 0xd0128
++#define CCM_REG_QOS_PHYS_QNUM3_0 0xd012c
++#define CCM_REG_QOS_PHYS_QNUM3_1 0xd0130
+ /* [RW 1] STORM - CM Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+@@ -216,6 +219,11 @@
+ /* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the STORM interface is detected. */
+ #define CCM_REG_STORM_LENGTH_MIS 0xd016c
++/* [RW 3] The weight of the STORM input in the WRR (Weighted Round robin)
++ mechanism. 0 stands for weight 8 (the most prioritised); 1 stands for
++ weight 1(least prioritised); 2 stands for weight 2 (more prioritised);
++ tc. */
++#define CCM_REG_STORM_WEIGHT 0xd009c
+ /* [RW 1] Input tsem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+@@ -253,6 +261,7 @@
+ mechanism. The fields are: [5:0] - message length; [12:6] - message
+ pointer; 18:13] - next pointer. */
+ #define CCM_REG_XX_DESCR_TABLE 0xd0300
++#define CCM_REG_XX_DESCR_TABLE_SIZE 36
+ /* [R 7] Used to read the value of XX protection Free counter. */
+ #define CCM_REG_XX_FREE 0xd0184
+ /* [RW 6] Initial value for the credit counter; responsible for fulfilling
+@@ -296,6 +305,8 @@
+ /* [WB 24] MATT ram access. each entry has the following
+ format:{RegionLength[11:0]; egionOffset[11:0]} */
+ #define CDU_REG_MATT 0x101100
++/* [RW 1] when this bit is set the CDU operates in e1hmf mode */
++#define CDU_REG_MF_MODE 0x101050
+ /* [R 1] indication the initializing the activity counter by the hardware
+ was done. */
+ #define CFC_REG_AC_INIT_DONE 0x104078
+@@ -325,23 +336,67 @@
+ set one of these bits. the bit description can be found in CFC
+ specifications */
+ #define CFC_REG_ERROR_VECTOR 0x10403c
++/* [WB 93] LCID info ram access */
++#define CFC_REG_INFO_RAM 0x105000
++#define CFC_REG_INFO_RAM_SIZE 1024
+ #define CFC_REG_INIT_REG 0x10404c
++#define CFC_REG_INTERFACES 0x104058
+ /* [RW 24] {weight_load_client7[2:0] to weight_load_client0[2:0]}. this
+ field allows changing the priorities of the weighted-round-robin arbiter
+ which selects which CFC load client should be served next */
+ #define CFC_REG_LCREQ_WEIGHTS 0x104084
++/* [RW 16] Link List ram access; data = {prev_lcid; ext_lcid} */
++#define CFC_REG_LINK_LIST 0x104c00
++#define CFC_REG_LINK_LIST_SIZE 256
+ /* [R 1] indication the initializing the link list by the hardware was done. */
+ #define CFC_REG_LL_INIT_DONE 0x104074
+ /* [R 9] Number of allocated LCIDs which are at empty state */
+ #define CFC_REG_NUM_LCIDS_ALLOC 0x104020
+ /* [R 9] Number of Arriving LCIDs in Link List Block */
+ #define CFC_REG_NUM_LCIDS_ARRIVING 0x104004
+-/* [R 9] Number of Inside LCIDs in Link List Block */
+-#define CFC_REG_NUM_LCIDS_INSIDE 0x104008
+ /* [R 9] Number of Leaving LCIDs in Link List Block */
+ #define CFC_REG_NUM_LCIDS_LEAVING 0x104018
+ /* [RW 8] The event id for aggregated interrupt 0 */
+ #define CSDM_REG_AGG_INT_EVENT_0 0xc2038
++#define CSDM_REG_AGG_INT_EVENT_1 0xc203c
++#define CSDM_REG_AGG_INT_EVENT_10 0xc2060
++#define CSDM_REG_AGG_INT_EVENT_11 0xc2064
++#define CSDM_REG_AGG_INT_EVENT_12 0xc2068
++#define CSDM_REG_AGG_INT_EVENT_13 0xc206c
++#define CSDM_REG_AGG_INT_EVENT_14 0xc2070
++#define CSDM_REG_AGG_INT_EVENT_15 0xc2074
++#define CSDM_REG_AGG_INT_EVENT_16 0xc2078
++#define CSDM_REG_AGG_INT_EVENT_17 0xc207c
++#define CSDM_REG_AGG_INT_EVENT_18 0xc2080
++#define CSDM_REG_AGG_INT_EVENT_19 0xc2084
++#define CSDM_REG_AGG_INT_EVENT_2 0xc2040
++#define CSDM_REG_AGG_INT_EVENT_20 0xc2088
++#define CSDM_REG_AGG_INT_EVENT_21 0xc208c
++#define CSDM_REG_AGG_INT_EVENT_22 0xc2090
++#define CSDM_REG_AGG_INT_EVENT_23 0xc2094
++#define CSDM_REG_AGG_INT_EVENT_24 0xc2098
++#define CSDM_REG_AGG_INT_EVENT_25 0xc209c
++#define CSDM_REG_AGG_INT_EVENT_26 0xc20a0
++#define CSDM_REG_AGG_INT_EVENT_27 0xc20a4
++#define CSDM_REG_AGG_INT_EVENT_28 0xc20a8
++#define CSDM_REG_AGG_INT_EVENT_29 0xc20ac
++#define CSDM_REG_AGG_INT_EVENT_3 0xc2044
++#define CSDM_REG_AGG_INT_EVENT_30 0xc20b0
++#define CSDM_REG_AGG_INT_EVENT_31 0xc20b4
++#define CSDM_REG_AGG_INT_EVENT_4 0xc2048
++/* [RW 1] The T bit for aggregated interrupt 0 */
++#define CSDM_REG_AGG_INT_T_0 0xc20b8
++#define CSDM_REG_AGG_INT_T_1 0xc20bc
++#define CSDM_REG_AGG_INT_T_10 0xc20e0
++#define CSDM_REG_AGG_INT_T_11 0xc20e4
++#define CSDM_REG_AGG_INT_T_12 0xc20e8
++#define CSDM_REG_AGG_INT_T_13 0xc20ec
++#define CSDM_REG_AGG_INT_T_14 0xc20f0
++#define CSDM_REG_AGG_INT_T_15 0xc20f4
++#define CSDM_REG_AGG_INT_T_16 0xc20f8
++#define CSDM_REG_AGG_INT_T_17 0xc20fc
++#define CSDM_REG_AGG_INT_T_18 0xc2100
++#define CSDM_REG_AGG_INT_T_19 0xc2104
+ /* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
+ #define CSDM_REG_CFC_RSP_START_ADDR 0xc2008
+ /* [RW 16] The maximum value of the competion counter #0 */
+@@ -358,6 +413,9 @@
+ /* [RW 32] Interrupt mask register #0 read/write */
+ #define CSDM_REG_CSDM_INT_MASK_0 0xc229c
+ #define CSDM_REG_CSDM_INT_MASK_1 0xc22ac
++/* [R 32] Interrupt register #0 read */
++#define CSDM_REG_CSDM_INT_STS_0 0xc2290
++#define CSDM_REG_CSDM_INT_STS_1 0xc22a0
+ /* [RW 11] Parity mask register #0 read/write */
+ #define CSDM_REG_CSDM_PRTY_MASK 0xc22bc
+ /* [R 11] Parity register #0 read */
+@@ -443,6 +501,9 @@
+ /* [RW 32] Interrupt mask register #0 read/write */
+ #define CSEM_REG_CSEM_INT_MASK_0 0x200110
+ #define CSEM_REG_CSEM_INT_MASK_1 0x200120
++/* [R 32] Interrupt register #0 read */
++#define CSEM_REG_CSEM_INT_STS_0 0x200104
++#define CSEM_REG_CSEM_INT_STS_1 0x200114
+ /* [RW 32] Parity mask register #0 read/write */
+ #define CSEM_REG_CSEM_PRTY_MASK_0 0x200130
+ #define CSEM_REG_CSEM_PRTY_MASK_1 0x200140
+@@ -453,9 +514,8 @@
+ #define CSEM_REG_ENABLE_OUT 0x2000a8
+ /* [RW 32] This address space contains all registers and memories that are
+ placed in SEM_FAST block. The SEM_FAST registers are described in
+- appendix B. In order to access the SEM_FAST registers the base address
+- CSEM_REGISTERS_FAST_MEMORY (Offset: 0x220000) should be added to each
+- SEM_FAST register offset. */
++ appendix B. In order to access the sem_fast registers the base address
++ ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
+ #define CSEM_REG_FAST_MEMORY 0x220000
+ /* [RW 1] Disables input messages from FIC0 May be updated during run_time
+ by the microcode */
+@@ -539,13 +599,10 @@
+ #define DBG_REG_DBG_PRTY_MASK 0xc0a8
+ /* [R 1] Parity register #0 read */
+ #define DBG_REG_DBG_PRTY_STS 0xc09c
+-/* [RW 2] debug only: These bits indicate the credit for PCI request type 4
+- interface; MUST be configured AFTER pci_ext_buffer_strt_addr_lsb/msb are
+- configured */
+-#define DBG_REG_PCI_REQ_CREDIT 0xc120
+ /* [RW 32] Commands memory. The address to command X; row Y is to calculated
+ as 14*X+Y. */
+ #define DMAE_REG_CMD_MEM 0x102400
++#define DMAE_REG_CMD_MEM_SIZE 224
+ /* [RW 1] If 0 - the CRC-16c initial value is all zeroes; if 1 - the CRC-16c
+ initial value is all ones. */
+ #define DMAE_REG_CRC16C_INIT 0x10201c
+@@ -630,6 +687,8 @@
+ #define DORQ_REG_AGG_CMD3 0x17006c
+ /* [RW 28] UCM Header. */
+ #define DORQ_REG_CMHEAD_RX 0x170050
++/* [RW 32] Doorbell address for RBC doorbells (function 0). */
++#define DORQ_REG_DB_ADDR0 0x17008c
+ /* [RW 5] Interrupt mask register #0 read/write */
+ #define DORQ_REG_DORQ_INT_MASK 0x170180
+ /* [R 5] Interrupt register #0 read */
+@@ -690,75 +749,34 @@
+ #define HC_CONFIG_0_REG_SINGLE_ISR_EN_0 (0x1<<1)
+ #define HC_REG_AGG_INT_0 0x108050
+ #define HC_REG_AGG_INT_1 0x108054
+-/* [RW 16] attention bit and attention acknowledge bits status for port 0
+- and 1 according to the following address map: addr 0 - attn_bit_0; addr 1
+- - attn_ack_bit_0; addr 2 - attn_bit_1; addr 3 - attn_ack_bit_1; */
+ #define HC_REG_ATTN_BIT 0x108120
+-/* [RW 16] attn bits status index for attn bit msg; addr 0 - function 0;
+- addr 1 - functin 1 */
+ #define HC_REG_ATTN_IDX 0x108100
+-/* [RW 32] port 0 lower 32 bits address field for attn messag. */
+ #define HC_REG_ATTN_MSG0_ADDR_L 0x108018
+-/* [RW 32] port 1 lower 32 bits address field for attn messag. */
+ #define HC_REG_ATTN_MSG1_ADDR_L 0x108020
+-/* [RW 8] status block number for attn bit msg - function 0; */
+ #define HC_REG_ATTN_NUM_P0 0x108038
+-/* [RW 8] status block number for attn bit msg - function 1 */
+ #define HC_REG_ATTN_NUM_P1 0x10803c
++#define HC_REG_COMMAND_REG 0x108180
+ #define HC_REG_CONFIG_0 0x108000
+ #define HC_REG_CONFIG_1 0x108004
++#define HC_REG_FUNC_NUM_P0 0x1080ac
++#define HC_REG_FUNC_NUM_P1 0x1080b0
+ /* [RW 3] Parity mask register #0 read/write */
+ #define HC_REG_HC_PRTY_MASK 0x1080a0
+ /* [R 3] Parity register #0 read */
+ #define HC_REG_HC_PRTY_STS 0x108094
+-/* [RW 17] status block interrupt mask; one in each bit means unmask; zerow
+- in each bit means mask; bit 0 - default SB; bit 1 - SB_0; bit 2 - SB_1...
+- bit 16- SB_15; addr 0 - port 0; addr 1 - port 1 */
+ #define HC_REG_INT_MASK 0x108108
+-/* [RW 16] port 0 attn bit condition monitoring; each bit that is set will
+- lock a change fron 0 to 1 in the corresponding attention signals that
+- comes from the AEU */
+ #define HC_REG_LEADING_EDGE_0 0x108040
+ #define HC_REG_LEADING_EDGE_1 0x108048
+-/* [RW 16] all producer and consumer of port 0 according to the following
+- addresses; U_prod: 0-15; C_prod: 16-31; U_cons: 32-47; C_cons:48-63;
+- Defoult_prod: U/C/X/T/Attn-64/65/66/67/68; Defoult_cons:
+- U/C/X/T/Attn-69/70/71/72/73 */
+ #define HC_REG_P0_PROD_CONS 0x108200
+-/* [RW 16] all producer and consumer of port 1according to the following
+- addresses; U_prod: 0-15; C_prod: 16-31; U_cons: 32-47; C_cons:48-63;
+- Defoult_prod: U/C/X/T/Attn-64/65/66/67/68; Defoult_cons:
+- U/C/X/T/Attn-69/70/71/72/73 */
+ #define HC_REG_P1_PROD_CONS 0x108400
+-/* [W 1] This register is write only and has 4 addresses as follow: 0 =
+- clear all PBA bits port 0; 1 = clear all pending interrupts request
+- port0; 2 = clear all PBA bits port 1; 3 = clear all pending interrupts
+- request port1; here is no meaning for the data in this register */
+ #define HC_REG_PBA_COMMAND 0x108140
+ #define HC_REG_PCI_CONFIG_0 0x108010
+ #define HC_REG_PCI_CONFIG_1 0x108014
+-/* [RW 24] all counters acording to the following address: LSB: 0=read; 1=
+- read_clear; 0-71 = HW counters (the inside order is the same as the
+- interrupt table in the spec); 72-219 = SW counters 1 (stops after first
+- consumer upd) the inside order is: 72-103 - U_non_default_p0; 104-135
+- C_non_defaul_p0; 36-145 U/C/X/T/Attn_default_p0; 146-177
+- U_non_default_p1; 178-209 C_non_defaul_p1; 10-219 U/C/X/T/Attn_default_p1
+- ; 220-367 = SW counters 2 (stops when prod=cons) the inside order is:
+- 220-251 - U_non_default_p0; 252-283 C_non_defaul_p0; 84-293
+- U/C/X/T/Attn_default_p0; 294-325 U_non_default_p1; 326-357
+- C_non_defaul_p1; 58-367 U/C/X/T/Attn_default_p1 ; 368-515 = mailbox
+- counters; (the inside order of the mailbox counter is 368-431 U and C
+- non_default_p0; 432-441 U/C/X/T/Attn_default_p0; 442-505 U and C
+- non_default_p1; 506-515 U/C/X/T/Attn_default_p1) */
+ #define HC_REG_STATISTIC_COUNTERS 0x109000
+-/* [RW 16] port 0 attn bit condition monitoring; each bit that is set will
+- lock a change fron 1 to 0 in the corresponding attention signals that
+- comes from the AEU */
+ #define HC_REG_TRAILING_EDGE_0 0x108044
+ #define HC_REG_TRAILING_EDGE_1 0x10804c
+ #define HC_REG_UC_RAM_ADDR_0 0x108028
+ #define HC_REG_UC_RAM_ADDR_1 0x108030
+-/* [RW 16] ustorm address for coalesc now message */
+ #define HC_REG_USTORM_ADDR_FOR_COALESCE 0x108068
+ #define HC_REG_VQID_0 0x108008
+ #define HC_REG_VQID_1 0x10800c
+@@ -883,14 +901,16 @@
+ rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP Latched
+ ump_tx_parity; [31] MCP Latched scpad_parity; */
+ #define MISC_REG_AEU_AFTER_INVERT_4_MCP 0xa458
+-/* [W 11] write to this register results with the clear of the latched
++/* [W 14] write to this register results with the clear of the latched
+ signals; one in d0 clears RBCR latch; one in d1 clears RBCT latch; one in
+ d2 clears RBCN latch; one in d3 clears RBCU latch; one in d4 clears RBCP
+ latch; one in d5 clears GRC Latched timeout attention; one in d6 clears
+ GRC Latched reserved access attention; one in d7 clears Latched
+ rom_parity; one in d8 clears Latched ump_rx_parity; one in d9 clears
+- Latched ump_tx_parity; one in d10 clears Latched scpad_parity; read from
+- this register return zero */
++ Latched ump_tx_parity; one in d10 clears Latched scpad_parity (both
++ ports); one in d11 clears pxpv_misc_mps_attn; one in d12 clears
++ pxp_misc_exp_rom_attn0; one in d13 clears pxp_misc_exp_rom_attn1; read
++ from this register return zero */
+ #define MISC_REG_AEU_CLR_LATCH_SIGNAL 0xa45c
+ /* [RW 32] first 32b for enabling the output for function 0 output0. mapped
+ as follows: [0] NIG attention for function0; [1] NIG attention for
+@@ -907,7 +927,11 @@
+ TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
+ #define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0 0xa06c
+ #define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1 0xa07c
++#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2 0xa08c
+ #define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_3 0xa09c
++#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_5 0xa0bc
++#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_6 0xa0cc
++#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_7 0xa0dc
+ /* [RW 32] first 32b for enabling the output for function 1 output0. mapped
+ as follows: [0] NIG attention for function0; [1] NIG attention for
+ function1; [2] GPIO1 function 1; [3] GPIO2 function 1; [4] GPIO3 function
+@@ -923,9 +947,13 @@
+ TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
+ #define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 0xa10c
+ #define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 0xa11c
++#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 0xa12c
+ #define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_3 0xa13c
+-/* [RW 32] first 32b for enabling the output for close the gate nig 0.
+- mapped as follows: [0] NIG attention for function0; [1] NIG attention for
++#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_5 0xa15c
++#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_6 0xa16c
++#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_7 0xa17c
++/* [RW 32] first 32b for enabling the output for close the gate nig. mapped
++ as follows: [0] NIG attention for function0; [1] NIG attention for
+ function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function
+ 0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8]
+ GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
+@@ -939,8 +967,8 @@
+ TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
+ #define MISC_REG_AEU_ENABLE1_NIG_0 0xa0ec
+ #define MISC_REG_AEU_ENABLE1_NIG_1 0xa18c
+-/* [RW 32] first 32b for enabling the output for close the gate pxp 0.
+- mapped as follows: [0] NIG attention for function0; [1] NIG attention for
++/* [RW 32] first 32b for enabling the output for close the gate pxp. mapped
++ as follows: [0] NIG attention for function0; [1] NIG attention for
+ function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function
+ 0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8]
+ GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
+@@ -984,34 +1012,34 @@
+ interrupt; */
+ #define MISC_REG_AEU_ENABLE2_FUNC_1_OUT_0 0xa110
+ #define MISC_REG_AEU_ENABLE2_FUNC_1_OUT_1 0xa120
+-/* [RW 32] second 32b for enabling the output for close the gate nig 0.
+- mapped as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt;
+- [2] QM Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5]
+- Timers Hw interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8]
+- XCM Parity error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11]
+- XSEMI Hw interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw
+- interrupt; [14] NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI
+- core Parity error; [17] Vaux PCI core Hw interrupt; [18] Debug Parity
+- error; [19] Debug Hw interrupt; [20] USDM Parity error; [21] USDM Hw
+- interrupt; [22] UCM Parity error; [23] UCM Hw interrupt; [24] USEMI
+- Parity error; [25] USEMI Hw interrupt; [26] UPB Parity error; [27] UPB Hw
+- interrupt; [28] CSDM Parity error; [29] CSDM Hw interrupt; [30] CCM
+- Parity error; [31] CCM Hw interrupt; */
++/* [RW 32] second 32b for enabling the output for close the gate nig. mapped
++ as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
++ Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
++ interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
++ error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
++ interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
++ NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
++ [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
++ interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
++ Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
++ Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
++ Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
++ interrupt; */
+ #define MISC_REG_AEU_ENABLE2_NIG_0 0xa0f0
+ #define MISC_REG_AEU_ENABLE2_NIG_1 0xa190
+-/* [RW 32] second 32b for enabling the output for close the gate pxp 0.
+- mapped as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt;
+- [2] QM Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5]
+- Timers Hw interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8]
+- XCM Parity error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11]
+- XSEMI Hw interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw
+- interrupt; [14] NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI
+- core Parity error; [17] Vaux PCI core Hw interrupt; [18] Debug Parity
+- error; [19] Debug Hw interrupt; [20] USDM Parity error; [21] USDM Hw
+- interrupt; [22] UCM Parity error; [23] UCM Hw interrupt; [24] USEMI
+- Parity error; [25] USEMI Hw interrupt; [26] UPB Parity error; [27] UPB Hw
+- interrupt; [28] CSDM Parity error; [29] CSDM Hw interrupt; [30] CCM
+- Parity error; [31] CCM Hw interrupt; */
++/* [RW 32] second 32b for enabling the output for close the gate pxp. mapped
++ as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
++ Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
++ interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
++ error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
++ interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
++ NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
++ [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
++ interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
++ Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
++ Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
++ Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
++ interrupt; */
+ #define MISC_REG_AEU_ENABLE2_PXP_0 0xa100
+ #define MISC_REG_AEU_ENABLE2_PXP_1 0xa1a0
+ /* [RW 32] third 32b for enabling the output for function 0 output0. mapped
+@@ -1044,34 +1072,34 @@
+ attn1; */
+ #define MISC_REG_AEU_ENABLE3_FUNC_1_OUT_0 0xa114
+ #define MISC_REG_AEU_ENABLE3_FUNC_1_OUT_1 0xa124
+-/* [RW 32] third 32b for enabling the output for close the gate nig 0.
+- mapped as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2]
+- PXP Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity
+- error; [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC
+- Hw interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE
+- Parity error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13]
+- IGU (HC) Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt;
+- [16] pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0;
+- [20] MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0;
+- [23] SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST;
+- [26] SW timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers
+- attn_3 func1; [29] SW timers attn_4 func1; [30] General attn0; [31]
+- General attn1; */
++/* [RW 32] third 32b for enabling the output for close the gate nig. mapped
++ as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP
++ Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error;
++ [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
++ interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
++ error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
++ Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
++ pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
++ MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
++ SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
++ timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
++ func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
++ attn1; */
+ #define MISC_REG_AEU_ENABLE3_NIG_0 0xa0f4
+ #define MISC_REG_AEU_ENABLE3_NIG_1 0xa194
+-/* [RW 32] third 32b for enabling the output for close the gate pxp 0.
+- mapped as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2]
+- PXP Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity
+- error; [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC
+- Hw interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE
+- Parity error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13]
+- IGU (HC) Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt;
+- [16] pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0;
+- [20] MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0;
+- [23] SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST;
+- [26] SW timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers
+- attn_3 func1; [29] SW timers attn_4 func1; [30] General attn0; [31]
+- General attn1; */
++/* [RW 32] third 32b for enabling the output for close the gate pxp. mapped
++ as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP
++ Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error;
++ [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
++ interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
++ error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
++ Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
++ pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
++ MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
++ SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
++ timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
++ func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
++ attn1; */
+ #define MISC_REG_AEU_ENABLE3_PXP_0 0xa104
+ #define MISC_REG_AEU_ENABLE3_PXP_1 0xa1a4
+ /* [RW 32] fourth 32b for enabling the output for function 0 output0.mapped
+@@ -1088,6 +1116,10 @@
+ Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
+ #define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0 0xa078
+ #define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_2 0xa098
++#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_4 0xa0b8
++#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_5 0xa0c8
++#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_6 0xa0d8
++#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_7 0xa0e8
+ /* [RW 32] fourth 32b for enabling the output for function 1 output0.mapped
+ as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
+ General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
+@@ -1102,34 +1134,36 @@
+ Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
+ #define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0 0xa118
+ #define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_2 0xa138
+-/* [RW 32] fourth 32b for enabling the output for close the gate nig
+- 0.mapped as follows: [0] General attn2; [1] General attn3; [2] General
+- attn4; [3] General attn5; [4] General attn6; [5] General attn7; [6]
+- General attn8; [7] General attn9; [8] General attn10; [9] General attn11;
+- [10] General attn12; [11] General attn13; [12] General attn14; [13]
+- General attn15; [14] General attn16; [15] General attn17; [16] General
+- attn18; [17] General attn19; [18] General attn20; [19] General attn21;
+- [20] Main power interrupt; [21] RBCR Latched attn; [22] RBCT Latched
+- attn; [23] RBCN Latched attn; [24] RBCU Latched attn; [25] RBCP Latched
+- attn; [26] GRC Latched timeout attention; [27] GRC Latched reserved
+- access attention; [28] MCP Latched rom_parity; [29] MCP Latched
+- ump_rx_parity; [30] MCP Latched ump_tx_parity; [31] MCP Latched
+- scpad_parity; */
++#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_4 0xa158
++#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_5 0xa168
++#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_6 0xa178
++#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_7 0xa188
++/* [RW 32] fourth 32b for enabling the output for close the gate nig.mapped
++ as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
++ General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
++ [7] General attn9; [8] General attn10; [9] General attn11; [10] General
++ attn12; [11] General attn13; [12] General attn14; [13] General attn15;
++ [14] General attn16; [15] General attn17; [16] General attn18; [17]
++ General attn19; [18] General attn20; [19] General attn21; [20] Main power
++ interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
++ Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
++ Latched timeout attention; [27] GRC Latched reserved access attention;
++ [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
++ Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
+ #define MISC_REG_AEU_ENABLE4_NIG_0 0xa0f8
+ #define MISC_REG_AEU_ENABLE4_NIG_1 0xa198
+-/* [RW 32] fourth 32b for enabling the output for close the gate pxp
+- 0.mapped as follows: [0] General attn2; [1] General attn3; [2] General
+- attn4; [3] General attn5; [4] General attn6; [5] General attn7; [6]
+- General attn8; [7] General attn9; [8] General attn10; [9] General attn11;
+- [10] General attn12; [11] General attn13; [12] General attn14; [13]
+- General attn15; [14] General attn16; [15] General attn17; [16] General
+- attn18; [17] General attn19; [18] General attn20; [19] General attn21;
+- [20] Main power interrupt; [21] RBCR Latched attn; [22] RBCT Latched
+- attn; [23] RBCN Latched attn; [24] RBCU Latched attn; [25] RBCP Latched
+- attn; [26] GRC Latched timeout attention; [27] GRC Latched reserved
+- access attention; [28] MCP Latched rom_parity; [29] MCP Latched
+- ump_rx_parity; [30] MCP Latched ump_tx_parity; [31] MCP Latched
+- scpad_parity; */
++/* [RW 32] fourth 32b for enabling the output for close the gate pxp.mapped
++ as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
++ General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
++ [7] General attn9; [8] General attn10; [9] General attn11; [10] General
++ attn12; [11] General attn13; [12] General attn14; [13] General attn15;
++ [14] General attn16; [15] General attn17; [16] General attn18; [17]
++ General attn19; [18] General attn20; [19] General attn21; [20] Main power
++ interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
++ Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
++ Latched timeout attention; [27] GRC Latched reserved access attention;
++ [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
++ Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
+ #define MISC_REG_AEU_ENABLE4_PXP_0 0xa108
+ #define MISC_REG_AEU_ENABLE4_PXP_1 0xa1a8
+ /* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu
+@@ -1148,6 +1182,7 @@
+ #define MISC_REG_AEU_GENERAL_ATTN_19 0xa04c
+ #define MISC_REG_AEU_GENERAL_ATTN_10 0xa028
+ #define MISC_REG_AEU_GENERAL_ATTN_11 0xa02c
++#define MISC_REG_AEU_GENERAL_ATTN_12 0xa030
+ #define MISC_REG_AEU_GENERAL_ATTN_2 0xa008
+ #define MISC_REG_AEU_GENERAL_ATTN_20 0xa050
+ #define MISC_REG_AEU_GENERAL_ATTN_21 0xa054
+@@ -1158,6 +1193,7 @@
+ #define MISC_REG_AEU_GENERAL_ATTN_7 0xa01c
+ #define MISC_REG_AEU_GENERAL_ATTN_8 0xa020
+ #define MISC_REG_AEU_GENERAL_ATTN_9 0xa024
++#define MISC_REG_AEU_GENERAL_MASK 0xa61c
+ /* [RW 32] first 32b for inverting the input for function 0; for each bit:
+ 0= do not invert; 1= invert; mapped as follows: [0] NIG attention for
+ function0; [1] NIG attention for function1; [2] GPIO1 mcp; [3] GPIO2 mcp;
+@@ -1189,10 +1225,29 @@
+ #define MISC_REG_AEU_INVERTER_2_FUNC_0 0xa230
+ #define MISC_REG_AEU_INVERTER_2_FUNC_1 0xa240
+ /* [RW 10] [7:0] = mask 8 attention output signals toward IGU function0;
+- [9:8] = mask close the gates signals of function 0 toward PXP [8] and NIG
+- [9]. Zero = mask; one = unmask */
++ [9:8] = raserved. Zero = mask; one = unmask */
+ #define MISC_REG_AEU_MASK_ATTN_FUNC_0 0xa060
+ #define MISC_REG_AEU_MASK_ATTN_FUNC_1 0xa064
++/* [RW 1] If set a system kill occurred */
++#define MISC_REG_AEU_SYS_KILL_OCCURRED 0xa610
++/* [RW 32] Represent the status of the input vector to the AEU when a system
++ kill occurred. The register is reset in por reset. Mapped as follows: [0]
++ NIG attention for function0; [1] NIG attention for function1; [2] GPIO1
++ mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1;
++ [7] GPIO2 function 1; [8] GPIO3 function 1; [9] GPIO4 function 1; [10]
++ PCIE glue/PXP VPD event function0; [11] PCIE glue/PXP VPD event
++ function1; [12] PCIE glue/PXP Expansion ROM event0; [13] PCIE glue/PXP
++ Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16] MSI/X indication for
++ mcp; [17] MSI/X indication for function 1; [18] BRB Parity error; [19]
++ BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC
++ Parity error; [23] SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw
++ interrupt; [26] TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI
++ Parity error; [29] TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw
++ interrupt; */
++#define MISC_REG_AEU_SYS_KILL_STATUS_0 0xa600
++#define MISC_REG_AEU_SYS_KILL_STATUS_1 0xa604
++#define MISC_REG_AEU_SYS_KILL_STATUS_2 0xa608
++#define MISC_REG_AEU_SYS_KILL_STATUS_3 0xa60c
+ /* [R 4] This field indicates the type of the device. '0' - 2 Ports; '1' - 1
+ Port. */
+ #define MISC_REG_BOND_ID 0xa400
+@@ -1206,8 +1261,80 @@
+ starts at 0x0 for the A0 tape-out and increments by one for each
+ all-layer tape-out. */
+ #define MISC_REG_CHIP_REV 0xa40c
+-/* [RW 32] The following driver registers(1..6) represent 6 drivers and 32
+- clients. Each client can be controlled by one driver only. One in each
++/* [RW 32] The following driver registers(1...16) represent 16 drivers and
++ 32 clients. Each client can be controlled by one driver only. One in each
++ bit represent that this driver control the appropriate client (Ex: bit 5
++ is set means this driver control client number 5). addr1 = set; addr0 =
++ clear; read from both addresses will give the same result = status. write
++ to address 1 will set a request to control all the clients that their
++ appropriate bit (in the write command) is set. if the client is free (the
++ appropriate bit in all the other drivers is clear) one will be written to
++ that driver register; if the client isn't free the bit will remain zero.
++ if the appropriate bit is set (the driver request to gain control on a
++ client it already controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW
++ interrupt will be asserted). write to address 0 will set a request to
++ free all the clients that their appropriate bit (in the write command) is
++ set. if the appropriate bit is clear (the driver request to free a client
++ it doesn't controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW interrupt will
++ be asserted). */
++#define MISC_REG_DRIVER_CONTROL_10 0xa3e0
++#define MISC_REG_DRIVER_CONTROL_10_SIZE 2
++/* [RW 32] The following driver registers(1...16) represent 16 drivers and
++ 32 clients. Each client can be controlled by one driver only. One in each
++ bit represent that this driver control the appropriate client (Ex: bit 5
++ is set means this driver control client number 5). addr1 = set; addr0 =
++ clear; read from both addresses will give the same result = status. write
++ to address 1 will set a request to control all the clients that their
++ appropriate bit (in the write command) is set. if the client is free (the
++ appropriate bit in all the other drivers is clear) one will be written to
++ that driver register; if the client isn't free the bit will remain zero.
++ if the appropriate bit is set (the driver request to gain control on a
++ client it already controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW
++ interrupt will be asserted). write to address 0 will set a request to
++ free all the clients that their appropriate bit (in the write command) is
++ set. if the appropriate bit is clear (the driver request to free a client
++ it doesn't controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW interrupt will
++ be asserted). */
++#define MISC_REG_DRIVER_CONTROL_11 0xa3e8
++#define MISC_REG_DRIVER_CONTROL_11_SIZE 2
++/* [RW 32] The following driver registers(1...16) represent 16 drivers and
++ 32 clients. Each client can be controlled by one driver only. One in each
++ bit represent that this driver control the appropriate client (Ex: bit 5
++ is set means this driver control client number 5). addr1 = set; addr0 =
++ clear; read from both addresses will give the same result = status. write
++ to address 1 will set a request to control all the clients that their
++ appropriate bit (in the write command) is set. if the client is free (the
++ appropriate bit in all the other drivers is clear) one will be written to
++ that driver register; if the client isn't free the bit will remain zero.
++ if the appropriate bit is set (the driver request to gain control on a
++ client it already controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW
++ interrupt will be asserted). write to address 0 will set a request to
++ free all the clients that their appropriate bit (in the write command) is
++ set. if the appropriate bit is clear (the driver request to free a client
++ it doesn't controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW interrupt will
++ be asserted). */
++#define MISC_REG_DRIVER_CONTROL_12 0xa3f0
++#define MISC_REG_DRIVER_CONTROL_12_SIZE 2
++/* [RW 32] The following driver registers(1...16) represent 16 drivers and
++ 32 clients. Each client can be controlled by one driver only. One in each
++ bit represent that this driver control the appropriate client (Ex: bit 5
++ is set means this driver control client number 5). addr1 = set; addr0 =
++ clear; read from both addresses will give the same result = status. write
++ to address 1 will set a request to control all the clients that their
++ appropriate bit (in the write command) is set. if the client is free (the
++ appropriate bit in all the other drivers is clear) one will be written to
++ that driver register; if the client isn't free the bit will remain zero.
++ if the appropriate bit is set (the driver request to gain control on a
++ client it already controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW
++ interrupt will be asserted). write to address 0 will set a request to
++ free all the clients that their appropriate bit (in the write command) is
++ set. if the appropriate bit is clear (the driver request to free a client
++ it doesn't controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW interrupt will
++ be asserted). */
++#define MISC_REG_DRIVER_CONTROL_13 0xa3f8
++#define MISC_REG_DRIVER_CONTROL_13_SIZE 2
++/* [RW 32] The following driver registers(1...16) represent 16 drivers and
++ 32 clients. Each client can be controlled by one driver only. One in each
+ bit represent that this driver control the appropriate client (Ex: bit 5
+ is set means this driver control client number 5). addr1 = set; addr0 =
+ clear; read from both addresses will give the same result = status. write
+@@ -1223,6 +1350,64 @@
+ it doesn't controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW interrupt will
+ be asserted). */
+ #define MISC_REG_DRIVER_CONTROL_1 0xa510
++#define MISC_REG_DRIVER_CONTROL_14 0xa5e0
++#define MISC_REG_DRIVER_CONTROL_14_SIZE 2
++/* [RW 32] The following driver registers(1...16) represent 16 drivers and
++ 32 clients. Each client can be controlled by one driver only. One in each
++ bit represent that this driver control the appropriate client (Ex: bit 5
++ is set means this driver control client number 5). addr1 = set; addr0 =
++ clear; read from both addresses will give the same result = status. write
++ to address 1 will set a request to control all the clients that their
++ appropriate bit (in the write command) is set. if the client is free (the
++ appropriate bit in all the other drivers is clear) one will be written to
++ that driver register; if the client isn't free the bit will remain zero.
++ if the appropriate bit is set (the driver request to gain control on a
++ client it already controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW
++ interrupt will be asserted). write to address 0 will set a request to
++ free all the clients that their appropriate bit (in the write command) is
++ set. if the appropriate bit is clear (the driver request to free a client
++ it doesn't controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW interrupt will
++ be asserted). */
++#define MISC_REG_DRIVER_CONTROL_15 0xa5e8
++#define MISC_REG_DRIVER_CONTROL_15_SIZE 2
++/* [RW 32] The following driver registers(1...16) represent 16 drivers and
++ 32 clients. Each client can be controlled by one driver only. One in each
++ bit represent that this driver control the appropriate client (Ex: bit 5
++ is set means this driver control client number 5). addr1 = set; addr0 =
++ clear; read from both addresses will give the same result = status. write
++ to address 1 will set a request to control all the clients that their
++ appropriate bit (in the write command) is set. if the client is free (the
++ appropriate bit in all the other drivers is clear) one will be written to
++ that driver register; if the client isn't free the bit will remain zero.
++ if the appropriate bit is set (the driver request to gain control on a
++ client it already controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW
++ interrupt will be asserted). write to address 0 will set a request to
++ free all the clients that their appropriate bit (in the write command) is
++ set. if the appropriate bit is clear (the driver request to free a client
++ it doesn't controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW interrupt will
++ be asserted). */
++#define MISC_REG_DRIVER_CONTROL_16 0xa5f0
++#define MISC_REG_DRIVER_CONTROL_16_SIZE 2
++/* [RW 32] The following driver registers(1...16) represent 16 drivers and
++ 32 clients. Each client can be controlled by one driver only. One in each
++ bit represent that this driver control the appropriate client (Ex: bit 5
++ is set means this driver control client number 5). addr1 = set; addr0 =
++ clear; read from both addresses will give the same result = status. write
++ to address 1 will set a request to control all the clients that their
++ appropriate bit (in the write command) is set. if the client is free (the
++ appropriate bit in all the other drivers is clear) one will be written to
++ that driver register; if the client isn't free the bit will remain zero.
++ if the appropriate bit is set (the driver request to gain control on a
++ client it already controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW
++ interrupt will be asserted). write to address 0 will set a request to
++ free all the clients that their appropriate bit (in the write command) is
++ set. if the appropriate bit is clear (the driver request to free a client
++ it doesn't controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW interrupt will
++ be asserted). */
++#define MISC_REG_DRIVER_CONTROL_7 0xa3c8
++/* [RW 1] e1hmf for WOL. If clr WOL signal o the PXP will be send on bit 0
++ only. */
++#define MISC_REG_E1HMF_MODE 0xa5f8
+ /* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of
+ these bits is written as a '1'; the corresponding SPIO bit will turn off
+ it's drivers and become an input. This is the reset state of all GPIO
+@@ -1240,6 +1425,18 @@
+ This is the result value of the pin; not the drive value. Writing these
+ bits will have not effect. */
+ #define MISC_REG_GPIO 0xa490
++/* [R 28] this field hold the last information that caused reserved
++ attention. bits [19:0] - address; [22:20] function; [23] reserved;
++ [27:24] the master that caused the attention - according to the following
++ encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
++ dbu; 8 = dmae */
++#define MISC_REG_GRC_RSV_ATTN 0xa3c0
++/* [R 28] this field hold the last information that caused timeout
++ attention. bits [19:0] - address; [22:20] function; [23] reserved;
++ [27:24] the master that caused the attention - according to the following
++ encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
++ dbu; 8 = dmae */
++#define MISC_REG_GRC_TIMEOUT_ATTN 0xa3c4
+ /* [RW 1] Setting this bit enables a timer in the GRC block to timeout any
+ access that does not finish within
+ ~misc_registers_grc_timout_val.grc_timeout_val cycles. When this bit is
+@@ -1282,6 +1479,11 @@
+ #define MISC_REG_MISC_PRTY_MASK 0xa398
+ /* [R 1] Parity register #0 read */
+ #define MISC_REG_MISC_PRTY_STS 0xa38c
++#define MISC_REG_NIG_WOL_P0 0xa270
++#define MISC_REG_NIG_WOL_P1 0xa274
++/* [R 1] If set indicate that the pcie_rst_b was asserted without perst
++ assertion */
++#define MISC_REG_PCIE_HOT_RESET 0xa618
+ /* [RW 32] 32 LSB of storm PLL first register; reset val = 0x 071d2911.
+ inside order of the bits is: [0] P1 divider[0] (reset value 1); [1] P1
+ divider[1] (reset value 0); [2] P1 divider[2] (reset value 0); [3] P1
+@@ -1303,7 +1505,7 @@
+ #define MISC_REG_PLL_STORM_CTRL_2 0xa298
+ #define MISC_REG_PLL_STORM_CTRL_3 0xa29c
+ #define MISC_REG_PLL_STORM_CTRL_4 0xa2a0
+-/* [RW 32] reset reg#1; rite/read one = the specific block is out of reset;
++/* [RW 32] reset reg#2; rite/read one = the specific block is out of reset;
+ write/read zero = the specific block is in reset; addr 0-wr- the write
+ value will be written to the register; addr 1-set - one will be written
+ to all the bits that have the value of one in the data written (bits that
+@@ -1311,14 +1513,12 @@
+ written to all the bits that have the value of one in the data written
+ (bits that have the value of zero will not be change); addr 3-ignore;
+ read ignore from all addr except addr 00; inside order of the bits is:
+- [0] rst_brb1; [1] rst_prs; [2] rst_src; [3] rst_tsdm; [4] rst_tsem; [5]
+- rst_tcm; [6] rst_rbcr; [7] rst_nig; [8] rst_usdm; [9] rst_ucm; [10]
+- rst_usem; [11] rst_upb; [12] rst_ccm; [13] rst_csem; [14] rst_csdm; [15]
+- rst_rbcu; [16] rst_pbf; [17] rst_qm; [18] rst_tm; [19] rst_dorq; [20]
+- rst_xcm; [21] rst_xsdm; [22] rst_xsem; [23] rst_rbct; [24] rst_cdu; [25]
+- rst_cfc; [26] rst_pxp; [27] rst_pxpv; [28] rst_rbcp; [29] rst_hc; [30]
+- rst_dmae; [31] rst_semi_rtc; */
+-#define MISC_REG_RESET_REG_1 0xa580
++ [0] rst_bmac0; [1] rst_bmac1; [2] rst_emac0; [3] rst_emac1; [4] rst_grc;
++ [5] rst_mcp_n_reset_reg_hard_core; [6] rst_ mcp_n_hard_core_rst_b; [7]
++ rst_ mcp_n_reset_cmn_cpu; [8] rst_ mcp_n_reset_cmn_core; [9] rst_rbcn;
++ [10] rst_dbg; [11] rst_misc_core; [12] rst_dbue (UART); [13]
++ Pci_resetmdio_n; [14] rst_emac0_hard_core; [15] rst_emac1_hard_core; 16]
++ rst_pxp_rq_rd_wr; 31:17] reserved */
+ #define MISC_REG_RESET_REG_2 0xa590
+ /* [RW 20] 20 bit GRC address where the scratch-pad of the MCP that is
+ shared with the driver resides */
+@@ -1345,7 +1545,7 @@
+ select VAUX supply. (This is an output pin only; it is not controlled by
+ the SET and CLR fields; it is controlled by the Main Power SM; the FLOAT
+ field is not applicable for this pin; only the VALUE fields is relevant -
+- it reflects the output value); [3] reserved; [4] spio_4; [5] spio_5; [6]
++ it reflects the output value); [3] port swap [4] spio_4; [5] spio_5; [6]
+ Bit 0 of UMP device ID select; read by UMP firmware; [7] Bit 1 of UMP
+ device ID select; read by UMP firmware. */
+ #define MISC_REG_SPIO 0xa4fc
+@@ -1369,6 +1569,14 @@
+ command bit is written. This bit is set when the SPIO input does not
+ match the current value in #OLD_VALUE (reset value 0). */
+ #define MISC_REG_SPIO_INT 0xa500
++/* [RW 32] reload value for counter 4 if reload; the value will be reload if
++ the counter reached zero and the reload bit
++ (~misc_registers_sw_timer_cfg_4.sw_timer_cfg_4[1] ) is set */
++#define MISC_REG_SW_TIMER_RELOAD_VAL_4 0xa2fc
++/* [RW 32] the value of the counter for sw timers1-8. there are 8 addresses
++ in this register. addres 0 - timer 1; address - timer 2�address 7 -
++ timer 8 */
++#define MISC_REG_SW_TIMER_VAL 0xa5c0
+ /* [RW 1] Set by the MCP to remember if one or more of the drivers is/are
+ loaded; 0-prepare; -unprepare */
+ #define MISC_REG_UNPREPARED 0xa424
+@@ -1394,8 +1602,9 @@
+ #define NIG_REG_BRB1_PAUSE_IN_EN 0x100c8
+ /* [RW 1] output enable for RX BRB1 LP IF */
+ #define NIG_REG_BRB_LB_OUT_EN 0x10100
+-/* [WB_W 72] Debug packet to LP from RBC; Data spelling:[63:0] data; 64]
+- error; [67:65]eop_bvalid; [68]eop; [69]sop; [70]port_id; 71]flush */
++/* [WB_W 82] Debug packet to LP from RBC; Data spelling:[63:0] data; 64]
++ error; [67:65]eop_bvalid; [68]eop; [69]sop; [70]port_id; 71]flush;
++ 72:73]-vnic_num; 81:74]-sideband_info */
+ #define NIG_REG_DEBUG_PACKET_LB 0x10800
+ /* [RW 1] Input enable for TX Debug packet */
+ #define NIG_REG_EGRESS_DEBUG_IN_EN 0x100dc
+@@ -1409,6 +1618,8 @@
+ /* [RW 1] MAC configuration for packets of port0. If 1 - all packet outputs
+ to emac for port0; other way to bmac for port0 */
+ #define NIG_REG_EGRESS_EMAC0_PORT 0x10058
++/* [RW 32] TX_MNG_FIFO in NIG_TX_PORT0; data[31:0] written in FIFO order. */
++#define NIG_REG_EGRESS_MNG0_FIFO 0x1045c
+ /* [RW 1] Input enable for TX PBF user packet port0 IF */
+ #define NIG_REG_EGRESS_PBF0_IN_EN 0x100cc
+ /* [RW 1] Input enable for TX PBF user packet port1 IF */
+@@ -1438,6 +1649,8 @@
+ #define NIG_REG_INGRESS_EOP_LB_FIFO 0x104e4
+ /* [RW 1] led 10g for port 0 */
+ #define NIG_REG_LED_10G_P0 0x10320
++/* [RW 1] led 10g for port 1 */
++#define NIG_REG_LED_10G_P1 0x10324
+ /* [RW 1] Port0: This bit is set to enable the use of the
+ ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 field
+ defined below. If this bit is cleared; then the blink rate will be about
+@@ -1448,7 +1661,7 @@
+ is reset to 0x080; giving a default blink period of approximately 8Hz. */
+ #define NIG_REG_LED_CONTROL_BLINK_RATE_P0 0x10310
+ /* [RW 1] Port0: If set along with the
+- nig_registers_led_control_override_traffic_p0.led_control_override_traffic_p0
++ ~nig_registers_led_control_override_traffic_p0.led_control_override_traffic_p0
+ bit and ~nig_registers_led_control_traffic_p0.led_control_traffic_p0 LED
+ bit; the Traffic LED will blink with the blink rate specified in
+ ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 and
+@@ -1470,19 +1683,48 @@
+ /* [RW 4] led mode for port0: 0 MAC; 1-3 PHY1; 4 MAC2; 5-7 PHY4; 8-MAC3;
+ 9-11PHY7; 12 MAC4; 13-15 PHY10; */
+ #define NIG_REG_LED_MODE_P0 0x102f0
++#define NIG_REG_LLH0_ACPI_PAT_0_CRC 0x1015c
++#define NIG_REG_LLH0_ACPI_PAT_6_LEN 0x10154
+ #define NIG_REG_LLH0_BRB1_DRV_MASK 0x10244
++#define NIG_REG_LLH0_BRB1_DRV_MASK_MF 0x16048
+ /* [RW 1] send to BRB1 if no match on any of RMP rules. */
+ #define NIG_REG_LLH0_BRB1_NOT_MCP 0x1025c
++/* [RW 2] Determine the classification participants. 0: no classification.1:
++ classification upon VLAN id. 2: classification upon MAC address. 3:
++ classification upon both VLAN id & MAC addr. */
++#define NIG_REG_LLH0_CLS_TYPE 0x16080
+ /* [RW 32] cm header for llh0 */
+ #define NIG_REG_LLH0_CM_HEADER 0x1007c
++#define NIG_REG_LLH0_DEST_IP_0_1 0x101dc
++#define NIG_REG_LLH0_DEST_MAC_0_0 0x101c0
++/* [RW 16] destination TCP address 1. The LLH will look for this address in
++ all incoming packets. */
++#define NIG_REG_LLH0_DEST_TCP_0 0x10220
++/* [RW 16] destination UDP address 1 The LLH will look for this address in
++ all incoming packets. */
++#define NIG_REG_LLH0_DEST_UDP_0 0x10214
+ #define NIG_REG_LLH0_ERROR_MASK 0x1008c
+ /* [RW 8] event id for llh0 */
+ #define NIG_REG_LLH0_EVENT_ID 0x10084
++#define NIG_REG_LLH0_FUNC_EN 0x160fc
++#define NIG_REG_LLH0_FUNC_VLAN_ID 0x16100
++/* [RW 1] Determine the IP version to look for in
++ ~nig_registers_llh0_dest_ip_0.llh0_dest_ip_0. 0 - IPv6; 1-IPv4 */
++#define NIG_REG_LLH0_IPV4_IPV6_0 0x10208
++/* [RW 1] t bit for llh0 */
++#define NIG_REG_LLH0_T_BIT 0x10074
++/* [RW 12] VLAN ID 1. In case of VLAN packet the LLH will look for this ID. */
++#define NIG_REG_LLH0_VLAN_ID_0 0x1022c
+ /* [RW 8] init credit counter for port0 in LLH */
+ #define NIG_REG_LLH0_XCM_INIT_CREDIT 0x10554
+ #define NIG_REG_LLH0_XCM_MASK 0x10130
++#define NIG_REG_LLH1_BRB1_DRV_MASK 0x10248
+ /* [RW 1] send to BRB1 if no match on any of RMP rules. */
+ #define NIG_REG_LLH1_BRB1_NOT_MCP 0x102dc
++/* [RW 2] Determine the classification participants. 0: no classification.1:
++ classification upon VLAN id. 2: classification upon MAC address. 3:
++ classification upon both VLAN id & MAC addr. */
++#define NIG_REG_LLH1_CLS_TYPE 0x16084
+ /* [RW 32] cm header for llh1 */
+ #define NIG_REG_LLH1_CM_HEADER 0x10080
+ #define NIG_REG_LLH1_ERROR_MASK 0x10090
+@@ -1491,13 +1733,26 @@
+ /* [RW 8] init credit counter for port1 in LLH */
+ #define NIG_REG_LLH1_XCM_INIT_CREDIT 0x10564
+ #define NIG_REG_LLH1_XCM_MASK 0x10134
++/* [RW 1] When this bit is set; the LLH will expect all packets to be with
++ e1hov */
++#define NIG_REG_LLH_E1HOV_MODE 0x160d8
++/* [RW 1] When this bit is set; the LLH will classify the packet before
++ sending it to the BRB or calculating WoL on it. */
++#define NIG_REG_LLH_MF_MODE 0x16024
+ #define NIG_REG_MASK_INTERRUPT_PORT0 0x10330
+ #define NIG_REG_MASK_INTERRUPT_PORT1 0x10334
+ /* [RW 1] Output signal from NIG to EMAC0. When set enables the EMAC0 block. */
+ #define NIG_REG_NIG_EMAC0_EN 0x1003c
++/* [RW 1] Output signal from NIG to EMAC1. When set enables the EMAC1 block. */
++#define NIG_REG_NIG_EMAC1_EN 0x10040
+ /* [RW 1] Output signal from NIG to TX_EMAC0. When set indicates to the
+ EMAC0 to strip the CRC from the ingress packets. */
+ #define NIG_REG_NIG_INGRESS_EMAC0_NO_CRC 0x10044
++/* [R 32] Interrupt register #0 read */
++#define NIG_REG_NIG_INT_STS_0 0x103b0
++#define NIG_REG_NIG_INT_STS_1 0x103c0
++/* [R 32] Parity register #0 read */
++#define NIG_REG_NIG_PRTY_STS 0x103d0
+ /* [RW 1] Input enable for RX PBF LP IF */
+ #define NIG_REG_PBF_LB_IN_EN 0x100b4
+ /* [RW 1] Value of this register will be transmitted to port swap when
+@@ -1514,9 +1769,24 @@
+ /* [R 32] Rx statistics : In user packets discarded due to BRB backpressure
+ for port0 */
+ #define NIG_REG_STAT0_BRB_DISCARD 0x105f0
++/* [R 32] Rx statistics : In user packets truncated due to BRB backpressure
++ for port0 */
++#define NIG_REG_STAT0_BRB_TRUNCATE 0x105f8
++/* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that
++ between 1024 and 1522 bytes for port0 */
++#define NIG_REG_STAT0_EGRESS_MAC_PKT0 0x10750
++/* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that
++ between 1523 bytes and above for port0 */
++#define NIG_REG_STAT0_EGRESS_MAC_PKT1 0x10760
+ /* [R 32] Rx statistics : In user packets discarded due to BRB backpressure
+ for port1 */
+ #define NIG_REG_STAT1_BRB_DISCARD 0x10628
++/* [WB_R 36] Tx statistics : Number of packets from emac1 or bmac1 that
++ between 1024 and 1522 bytes for port1 */
++#define NIG_REG_STAT1_EGRESS_MAC_PKT0 0x107a0
++/* [WB_R 36] Tx statistics : Number of packets from emac1 or bmac1 that
++ between 1523 bytes and above for port1 */
++#define NIG_REG_STAT1_EGRESS_MAC_PKT1 0x107b0
+ /* [WB_R 64] Rx statistics : User octets received for LP */
+ #define NIG_REG_STAT2_BRB_OCTET 0x107e0
+ #define NIG_REG_STATUS_INTERRUPT_PORT0 0x10328
+@@ -1529,8 +1799,12 @@
+ #define NIG_REG_XCM0_OUT_EN 0x100f0
+ /* [RW 1] output enable for RX_XCM1 IF */
+ #define NIG_REG_XCM1_OUT_EN 0x100f4
++/* [RW 1] control to xgxs - remote PHY in-band MDIO */
++#define NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST 0x10348
+ /* [RW 5] control to xgxs - CL45 DEVAD */
+ #define NIG_REG_XGXS0_CTRL_MD_DEVAD 0x1033c
++/* [RW 1] control to xgxs; 0 - clause 45; 1 - clause 22 */
++#define NIG_REG_XGXS0_CTRL_MD_ST 0x10338
+ /* [RW 5] control to xgxs - CL22 PHY_ADD and CL45 PRTAD */
+ #define NIG_REG_XGXS0_CTRL_PHY_ADDR 0x10340
+ /* [R 1] status from xgxs0 that inputs to interrupt logic of link10g. */
+@@ -1626,7 +1900,6 @@
+ #define PRS_REG_CFC_SEARCH_INITIAL_CREDIT 0x4011c
+ /* [RW 24] CID for port 0 if no match */
+ #define PRS_REG_CID_PORT_0 0x400fc
+-#define PRS_REG_CID_PORT_1 0x40100
+ /* [RW 32] The CM header for flush message where 'load existed' bit in CFC
+ load response is reset and packet type is 0. Used in packet start message
+ to TCM. */
+@@ -1635,6 +1908,7 @@
+ #define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_2 0x400e4
+ #define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_3 0x400e8
+ #define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_4 0x400ec
++#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_5 0x400f0
+ /* [RW 32] The CM header for flush message where 'load existed' bit in CFC
+ load response is set and packet type is 0. Used in packet start message
+ to TCM. */
+@@ -1643,6 +1917,7 @@
+ #define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_2 0x400c4
+ #define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_3 0x400c8
+ #define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_4 0x400cc
++#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_5 0x400d0
+ /* [RW 32] The CM header for a match and packet type 1 for loopback port.
+ Used in packet start message to TCM. */
+ #define PRS_REG_CM_HDR_LOOPBACK_TYPE_1 0x4009c
+@@ -1658,11 +1933,15 @@
+ #define PRS_REG_CM_HDR_TYPE_4 0x40088
+ /* [RW 32] The CM header in case there was not a match on the connection */
+ #define PRS_REG_CM_NO_MATCH_HDR 0x400b8
++/* [RW 1] Indicates if in e1hov mode. 0=non-e1hov mode; 1=e1hov mode. */
++#define PRS_REG_E1HOV_MODE 0x401c8
+ /* [RW 8] The 8-bit event ID for a match and packet type 1. Used in packet
+ start message to TCM. */
+ #define PRS_REG_EVENT_ID_1 0x40054
+ #define PRS_REG_EVENT_ID_2 0x40058
+ #define PRS_REG_EVENT_ID_3 0x4005c
++/* [RW 16] The Ethernet type value for FCoE */
++#define PRS_REG_FCOE_TYPE 0x401d0
+ /* [RW 8] Context region for flush packet with packet type 0. Used in CFC
+ load request message. */
+ #define PRS_REG_FLUSH_REGIONS_TYPE_0 0x40004
+@@ -1730,8 +2009,17 @@
+ #define PXP2_REG_HST_DATA_FIFO_STATUS 0x12047c
+ /* [R 7] Debug only: Number of used entries in the header FIFO */
+ #define PXP2_REG_HST_HEADER_FIFO_STATUS 0x120478
++#define PXP2_REG_PGL_ADDR_88_F0 0x120534
++#define PXP2_REG_PGL_ADDR_8C_F0 0x120538
++#define PXP2_REG_PGL_ADDR_90_F0 0x12053c
++#define PXP2_REG_PGL_ADDR_94_F0 0x120540
+ #define PXP2_REG_PGL_CONTROL0 0x120490
+ #define PXP2_REG_PGL_CONTROL1 0x120514
++/* [RW 32] third dword data of expansion rom request. this register is
++ special. reading from it provides a vector outstanding read requests. if
++ a bit is zero it means that a read request on the corresponding tag did
++ not finish yet (not all completions have arrived for it) */
++#define PXP2_REG_PGL_EXP_ROM2 0x120808
+ /* [RW 32] Inbound interrupt table for CSDM: bits[31:16]-mask;
+ its[15:0]-address */
+ #define PXP2_REG_PGL_INT_CSDM_0 0x1204f4
+@@ -1775,8 +2063,7 @@
+ /* [R 1] this bit indicates that a read request was blocked because of
+ bus_master_en was deasserted */
+ #define PXP2_REG_PGL_READ_BLOCKED 0x120568
+-/* [R 6] debug only */
+-#define PXP2_REG_PGL_TXR_CDTS 0x120528
++#define PXP2_REG_PGL_TAGS_LIMIT 0x1205a8
+ /* [R 18] debug only */
+ #define PXP2_REG_PGL_TXW_CDTS 0x12052c
+ /* [R 1] this bit indicates that a write request was blocked because of
+@@ -1828,12 +2115,14 @@
+ #define PXP2_REG_PSWRQ_QM0_L2P 0x120038
+ #define PXP2_REG_PSWRQ_SRC0_L2P 0x120054
+ #define PXP2_REG_PSWRQ_TM0_L2P 0x12001c
+-/* [RW 25] Interrupt mask register #0 read/write */
+-#define PXP2_REG_PXP2_INT_MASK 0x120578
+-/* [R 25] Interrupt register #0 read */
+-#define PXP2_REG_PXP2_INT_STS 0x12056c
+-/* [RC 25] Interrupt register #0 read clear */
+-#define PXP2_REG_PXP2_INT_STS_CLR 0x120570
++#define PXP2_REG_PSWRQ_TSDM0_L2P 0x1200e0
++/* [RW 32] Interrupt mask register #0 read/write */
++#define PXP2_REG_PXP2_INT_MASK_0 0x120578
++/* [R 32] Interrupt register #0 read */
++#define PXP2_REG_PXP2_INT_STS_0 0x12056c
++#define PXP2_REG_PXP2_INT_STS_1 0x120608
++/* [RC 32] Interrupt register #0 read clear */
++#define PXP2_REG_PXP2_INT_STS_CLR_0 0x120570
+ /* [RW 32] Parity mask register #0 read/write */
+ #define PXP2_REG_PXP2_PRTY_MASK_0 0x120588
+ #define PXP2_REG_PXP2_PRTY_MASK_1 0x120598
+@@ -2016,8 +2305,12 @@
+ #define PXP2_REG_RQ_BW_WR_UBOUND29 0x1202a4
+ /* [RW 7] Bandwidth upper bound for VQ30 */
+ #define PXP2_REG_RQ_BW_WR_UBOUND30 0x1202a8
++/* [RW 18] external first_mem_addr field in L2P table for CDU module port 0 */
++#define PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR 0x120008
+ /* [RW 2] Endian mode for cdu */
+ #define PXP2_REG_RQ_CDU_ENDIAN_M 0x1201a0
++#define PXP2_REG_RQ_CDU_FIRST_ILT 0x12061c
++#define PXP2_REG_RQ_CDU_LAST_ILT 0x120620
+ /* [RW 3] page size in L2P table for CDU module; -4k; -8k; -16k; -32k; -64k;
+ -128k */
+ #define PXP2_REG_RQ_CDU_P_SIZE 0x120018
+@@ -2029,18 +2322,30 @@
+ /* [RW 1] When '1'; requests will enter input buffers but wont get out
+ towards the glue */
+ #define PXP2_REG_RQ_DISABLE_INPUTS 0x120330
++/* [RW 1] 1 - SR will be aligned by 64B; 0 - SR will be aligned by 8B */
++#define PXP2_REG_RQ_DRAM_ALIGN 0x1205b0
++/* [RW 1] If 1 ILT failiue will not result in ELT access; An interrupt will
++ be asserted */
++#define PXP2_REG_RQ_ELT_DISABLE 0x12066c
+ /* [RW 2] Endian mode for hc */
+ #define PXP2_REG_RQ_HC_ENDIAN_M 0x1201a8
++/* [RW 1] when '0' ILT logic will work as in A0; otherwise B0; for back
++ compatibility needs; Note that different registers are used per mode */
++#define PXP2_REG_RQ_ILT_MODE 0x1205b4
+ /* [WB 53] Onchip address table */
+ #define PXP2_REG_RQ_ONCHIP_AT 0x122000
++/* [WB 53] Onchip address table - B0 */
++#define PXP2_REG_RQ_ONCHIP_AT_B0 0x128000
+ /* [RW 13] Pending read limiter threshold; in Dwords */
+ #define PXP2_REG_RQ_PDR_LIMIT 0x12033c
+ /* [RW 2] Endian mode for qm */
+ #define PXP2_REG_RQ_QM_ENDIAN_M 0x120194
++#define PXP2_REG_RQ_QM_FIRST_ILT 0x120634
++#define PXP2_REG_RQ_QM_LAST_ILT 0x120638
+ /* [RW 3] page size in L2P table for QM module; -4k; -8k; -16k; -32k; -64k;
+ -128k */
+ #define PXP2_REG_RQ_QM_P_SIZE 0x120050
+-/* [RW 1] 1' indicates that the RBC has finished configurating the PSWRQ */
++/* [RW 1] 1' indicates that the RBC has finished configuring the PSWRQ */
+ #define PXP2_REG_RQ_RBC_DONE 0x1201b0
+ /* [RW 3] Max burst size filed for read requests port 0; 000 - 128B;
+ 001:256B; 010: 512B; 11:1K:100:2K; 01:4K */
+@@ -2050,16 +2355,22 @@
+ #define PXP2_REG_RQ_RD_MBS1 0x120168
+ /* [RW 2] Endian mode for src */
+ #define PXP2_REG_RQ_SRC_ENDIAN_M 0x12019c
++#define PXP2_REG_RQ_SRC_FIRST_ILT 0x12063c
++#define PXP2_REG_RQ_SRC_LAST_ILT 0x120640
+ /* [RW 3] page size in L2P table for SRC module; -4k; -8k; -16k; -32k; -64k;
+ -128k */
+ #define PXP2_REG_RQ_SRC_P_SIZE 0x12006c
+ /* [RW 2] Endian mode for tm */
+ #define PXP2_REG_RQ_TM_ENDIAN_M 0x120198
++#define PXP2_REG_RQ_TM_FIRST_ILT 0x120644
++#define PXP2_REG_RQ_TM_LAST_ILT 0x120648
+ /* [RW 3] page size in L2P table for TM module; -4k; -8k; -16k; -32k; -64k;
+ -128k */
+ #define PXP2_REG_RQ_TM_P_SIZE 0x120034
+ /* [R 5] Number of entries in the ufifo; his fifo has l2p completions */
+ #define PXP2_REG_RQ_UFIFO_NUM_OF_ENTRY 0x12080c
++/* [RW 18] external first_mem_addr field in L2P table for USDM module port 0 */
++#define PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR 0x120094
+ /* [R 8] Number of entries occupied by vq 0 in pswrq memory */
+ #define PXP2_REG_RQ_VQ0_ENTRY_CNT 0x120810
+ /* [R 8] Number of entries occupied by vq 10 in pswrq memory */
+@@ -2130,19 +2441,63 @@
+ /* [RW 3] Max burst size filed for write requests port 1; 000 - 128B;
+ 001:256B; 010: 512B; */
+ #define PXP2_REG_RQ_WR_MBS1 0x120164
+-/* [RW 10] if Number of entries in dmae fifo will be higer than this
++/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
++ buffer reaches this number has_payload will be asserted */
++#define PXP2_REG_WR_CDU_MPS 0x1205f0
++/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
++ buffer reaches this number has_payload will be asserted */
++#define PXP2_REG_WR_CSDM_MPS 0x1205d0
++/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
++ buffer reaches this number has_payload will be asserted */
++#define PXP2_REG_WR_DBG_MPS 0x1205e8
++/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
++ buffer reaches this number has_payload will be asserted */
++#define PXP2_REG_WR_DMAE_MPS 0x1205ec
++/* [RW 10] if Number of entries in dmae fifo will be higher than this
+ threshold then has_payload indication will be asserted; the default value
+ should be equal to > write MBS size! */
+ #define PXP2_REG_WR_DMAE_TH 0x120368
+-/* [RW 10] if Number of entries in usdmdp fifo will be higer than this
++/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
++ buffer reaches this number has_payload will be asserted */
++#define PXP2_REG_WR_HC_MPS 0x1205c8
++/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
++ buffer reaches this number has_payload will be asserted */
++#define PXP2_REG_WR_QM_MPS 0x1205dc
++/* [RW 1] 0 - working in A0 mode; - working in B0 mode */
++#define PXP2_REG_WR_REV_MODE 0x120670
++/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
++ buffer reaches this number has_payload will be asserted */
++#define PXP2_REG_WR_SRC_MPS 0x1205e4
++/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
++ buffer reaches this number has_payload will be asserted */
++#define PXP2_REG_WR_TM_MPS 0x1205e0
++/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
++ buffer reaches this number has_payload will be asserted */
++#define PXP2_REG_WR_TSDM_MPS 0x1205d4
++/* [RW 10] if Number of entries in usdmdp fifo will be higher than this
+ threshold then has_payload indication will be asserted; the default value
+ should be equal to > write MBS size! */
+ #define PXP2_REG_WR_USDMDP_TH 0x120348
++/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
++ buffer reaches this number has_payload will be asserted */
++#define PXP2_REG_WR_USDM_MPS 0x1205cc
++/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
++ buffer reaches this number has_payload will be asserted */
++#define PXP2_REG_WR_XSDM_MPS 0x1205d8
+ /* [R 1] debug only: Indication if PSWHST arbiter is idle */
+ #define PXP_REG_HST_ARB_IS_IDLE 0x103004
+ /* [R 8] debug only: A bit mask for all PSWHST arbiter clients. '1' means
+ this client is waiting for the arbiter. */
+ #define PXP_REG_HST_CLIENTS_WAITING_TO_ARB 0x103008
++/* [R 1] debug only: '1' means this PSWHST is discarding doorbells. This bit
++ should update accoring to 'hst_discard_doorbells' register when the state
++ machine is idle */
++#define PXP_REG_HST_DISCARD_DOORBELLS_STATUS 0x1030a0
++/* [R 6] debug only: A bit mask for all PSWHST internal write clients. '1'
++ means this PSWHST is discarding inputs from this client. Each bit should
++ update accoring to 'hst_discard_internal_writes' register when the state
++ machine is idle. */
++#define PXP_REG_HST_DISCARD_INTERNAL_WRITES_STATUS 0x10309c
+ /* [WB 160] Used for initialization of the inbound interrupts memory */
+ #define PXP_REG_HST_INBOUND_INT 0x103800
+ /* [RW 32] Interrupt mask register #0 read/write */
+@@ -2165,18 +2520,30 @@
+ #define QM_REG_ACTCTRINITVAL_3 0x16804c
+ /* [RW 32] The base logical address (in bytes) of each physical queue. The
+ index I represents the physical queue number. The 12 lsbs are ignore and
+- considered zero so practically there are only 20 bits in this register. */
++ considered zero so practically there are only 20 bits in this register;
++ queues 63-0 */
+ #define QM_REG_BASEADDR 0x168900
++/* [RW 32] The base logical address (in bytes) of each physical queue. The
++ index I represents the physical queue number. The 12 lsbs are ignore and
++ considered zero so practically there are only 20 bits in this register;
++ queues 127-64 */
++#define QM_REG_BASEADDR_EXT_A 0x16e100
+ /* [RW 16] The byte credit cost for each task. This value is for both ports */
+ #define QM_REG_BYTECRDCOST 0x168234
+ /* [RW 16] The initial byte credit value for both ports. */
+ #define QM_REG_BYTECRDINITVAL 0x168238
+ /* [RW 32] A bit per physical queue. If the bit is cleared then the physical
+- queue uses port 0 else it uses port 1. */
++ queue uses port 0 else it uses port 1; queues 31-0 */
+ #define QM_REG_BYTECRDPORT_LSB 0x168228
+ /* [RW 32] A bit per physical queue. If the bit is cleared then the physical
+- queue uses port 0 else it uses port 1. */
++ queue uses port 0 else it uses port 1; queues 95-64 */
++#define QM_REG_BYTECRDPORT_LSB_EXT_A 0x16e520
++/* [RW 32] A bit per physical queue. If the bit is cleared then the physical
++ queue uses port 0 else it uses port 1; queues 63-32 */
+ #define QM_REG_BYTECRDPORT_MSB 0x168224
++/* [RW 32] A bit per physical queue. If the bit is cleared then the physical
++ queue uses port 0 else it uses port 1; queues 127-96 */
++#define QM_REG_BYTECRDPORT_MSB_EXT_A 0x16e51c
+ /* [RW 16] The byte credit value that if above the QM is considered almost
+ full */
+ #define QM_REG_BYTECREDITAFULLTHR 0x168094
+@@ -2203,7 +2570,7 @@
+ #define QM_REG_CMINTVOQMASK_6 0x16820c
+ #define QM_REG_CMINTVOQMASK_7 0x168210
+ /* [RW 20] The number of connections divided by 16 which dictates the size
+- of each queue per port 0 */
++ of each queue which belongs to even function number. */
+ #define QM_REG_CONNNUM_0 0x168020
+ /* [R 6] Keep the fill level of the fifo from write client 4 */
+ #define QM_REG_CQM_WRC_FIFOLVL 0x168018
+@@ -2216,74 +2583,179 @@
+ bypass enable */
+ #define QM_REG_ENBYPVOQMASK 0x16823c
+ /* [RW 32] A bit mask per each physical queue. If a bit is set then the
+- physical queue uses the byte credit */
++ physical queue uses the byte credit; queues 31-0 */
+ #define QM_REG_ENBYTECRD_LSB 0x168220
+ /* [RW 32] A bit mask per each physical queue. If a bit is set then the
+- physical queue uses the byte credit */
++ physical queue uses the byte credit; queues 95-64 */
++#define QM_REG_ENBYTECRD_LSB_EXT_A 0x16e518
++/* [RW 32] A bit mask per each physical queue. If a bit is set then the
++ physical queue uses the byte credit; queues 63-32 */
+ #define QM_REG_ENBYTECRD_MSB 0x16821c
++/* [RW 32] A bit mask per each physical queue. If a bit is set then the
++ physical queue uses the byte credit; queues 127-96 */
++#define QM_REG_ENBYTECRD_MSB_EXT_A 0x16e514
+ /* [RW 4] If cleared then the secondary interface will not be served by the
+ RR arbiter */
+ #define QM_REG_ENSEC 0x1680f0
+-/* [RW 32] A bit vector per each physical queue which selects which function
+- number to use on PCI access for that queue. */
++/* [RW 32] NA */
+ #define QM_REG_FUNCNUMSEL_LSB 0x168230
+-/* [RW 32] A bit vector per each physical queue which selects which function
+- number to use on PCI access for that queue. */
++/* [RW 32] NA */
+ #define QM_REG_FUNCNUMSEL_MSB 0x16822c
+ /* [RW 32] A mask register to mask the Almost empty signals which will not
+- be use for the almost empty indication to the HW block */
++ be use for the almost empty indication to the HW block; queues 31:0 */
+ #define QM_REG_HWAEMPTYMASK_LSB 0x168218
+ /* [RW 32] A mask register to mask the Almost empty signals which will not
+- be use for the almost empty indication to the HW block */
++ be use for the almost empty indication to the HW block; queues 95-64 */
++#define QM_REG_HWAEMPTYMASK_LSB_EXT_A 0x16e510
++/* [RW 32] A mask register to mask the Almost empty signals which will not
++ be use for the almost empty indication to the HW block; queues 63:32 */
+ #define QM_REG_HWAEMPTYMASK_MSB 0x168214
++/* [RW 32] A mask register to mask the Almost empty signals which will not
++ be use for the almost empty indication to the HW block; queues 127-96 */
++#define QM_REG_HWAEMPTYMASK_MSB_EXT_A 0x16e50c
+ /* [RW 4] The number of outstanding request to CFC */
+ #define QM_REG_OUTLDREQ 0x168804
+ /* [RC 1] A flag to indicate that overflow error occurred in one of the
+ queues. */
+ #define QM_REG_OVFERROR 0x16805c
+-/* [RC 6] the Q were the qverflow occurs */
++/* [RC 7] the Q were the qverflow occurs */
+ #define QM_REG_OVFQNUM 0x168058
+-/* [R 32] Pause state for physical queues 31-0 */
++/* [R 16] Pause state for physical queues 15-0 */
+ #define QM_REG_PAUSESTATE0 0x168410
+-/* [R 32] Pause state for physical queues 64-32 */
++/* [R 16] Pause state for physical queues 31-16 */
+ #define QM_REG_PAUSESTATE1 0x168414
++/* [R 16] Pause state for physical queues 47-32 */
++#define QM_REG_PAUSESTATE2 0x16e684
++/* [R 16] Pause state for physical queues 63-48 */
++#define QM_REG_PAUSESTATE3 0x16e688
++/* [R 16] Pause state for physical queues 79-64 */
++#define QM_REG_PAUSESTATE4 0x16e68c
++/* [R 16] Pause state for physical queues 95-80 */
++#define QM_REG_PAUSESTATE5 0x16e690
++/* [R 16] Pause state for physical queues 111-96 */
++#define QM_REG_PAUSESTATE6 0x16e694
++/* [R 16] Pause state for physical queues 127-112 */
++#define QM_REG_PAUSESTATE7 0x16e698
+ /* [RW 2] The PCI attributes field used in the PCI request. */
+ #define QM_REG_PCIREQAT 0x168054
+ /* [R 16] The byte credit of port 0 */
+ #define QM_REG_PORT0BYTECRD 0x168300
+ /* [R 16] The byte credit of port 1 */
+ #define QM_REG_PORT1BYTECRD 0x168304
+-/* [WB 54] Pointer Table Memory; The mapping is as follow: ptrtbl[53:30]
+- read pointer; ptrtbl[29:6] write pointer; ptrtbl[5:4] read bank0;
+- ptrtbl[3:2] read bank 1; ptrtbl[1:0] write bank; */
++/* [RW 3] pci function number of queues 15-0 */
++#define QM_REG_PQ2PCIFUNC_0 0x16e6bc
++#define QM_REG_PQ2PCIFUNC_1 0x16e6c0
++#define QM_REG_PQ2PCIFUNC_2 0x16e6c4
++#define QM_REG_PQ2PCIFUNC_3 0x16e6c8
++#define QM_REG_PQ2PCIFUNC_4 0x16e6cc
++#define QM_REG_PQ2PCIFUNC_5 0x16e6d0
++#define QM_REG_PQ2PCIFUNC_6 0x16e6d4
++#define QM_REG_PQ2PCIFUNC_7 0x16e6d8
++/* [WB 54] Pointer Table Memory for queues 63-0; The mapping is as follow:
++ ptrtbl[53:30] read pointer; ptrtbl[29:6] write pointer; ptrtbl[5:4] read
++ bank0; ptrtbl[3:2] read bank 1; ptrtbl[1:0] write bank; */
+ #define QM_REG_PTRTBL 0x168a00
++/* [WB 54] Pointer Table Memory for queues 127-64; The mapping is as follow:
++ ptrtbl[53:30] read pointer; ptrtbl[29:6] write pointer; ptrtbl[5:4] read
++ bank0; ptrtbl[3:2] read bank 1; ptrtbl[1:0] write bank; */
++#define QM_REG_PTRTBL_EXT_A 0x16e200
+ /* [RW 2] Interrupt mask register #0 read/write */
+ #define QM_REG_QM_INT_MASK 0x168444
+ /* [R 2] Interrupt register #0 read */
+ #define QM_REG_QM_INT_STS 0x168438
+-/* [RW 9] Parity mask register #0 read/write */
++/* [RW 12] Parity mask register #0 read/write */
+ #define QM_REG_QM_PRTY_MASK 0x168454
+-/* [R 9] Parity register #0 read */
++/* [R 12] Parity register #0 read */
+ #define QM_REG_QM_PRTY_STS 0x168448
+ /* [R 32] Current queues in pipeline: Queues from 32 to 63 */
+ #define QM_REG_QSTATUS_HIGH 0x16802c
++/* [R 32] Current queues in pipeline: Queues from 96 to 127 */
++#define QM_REG_QSTATUS_HIGH_EXT_A 0x16e408
+ /* [R 32] Current queues in pipeline: Queues from 0 to 31 */
+ #define QM_REG_QSTATUS_LOW 0x168028
+-/* [R 24] The number of tasks queued for each queue */
++/* [R 32] Current queues in pipeline: Queues from 64 to 95 */
++#define QM_REG_QSTATUS_LOW_EXT_A 0x16e404
++/* [R 24] The number of tasks queued for each queue; queues 63-0 */
+ #define QM_REG_QTASKCTR_0 0x168308
++/* [R 24] The number of tasks queued for each queue; queues 127-64 */
++#define QM_REG_QTASKCTR_EXT_A_0 0x16e584
+ /* [RW 4] Queue tied to VOQ */
+ #define QM_REG_QVOQIDX_0 0x1680f4
+ #define QM_REG_QVOQIDX_10 0x16811c
++#define QM_REG_QVOQIDX_100 0x16e49c
++#define QM_REG_QVOQIDX_101 0x16e4a0
++#define QM_REG_QVOQIDX_102 0x16e4a4
++#define QM_REG_QVOQIDX_103 0x16e4a8
++#define QM_REG_QVOQIDX_104 0x16e4ac
++#define QM_REG_QVOQIDX_105 0x16e4b0
++#define QM_REG_QVOQIDX_106 0x16e4b4
++#define QM_REG_QVOQIDX_107 0x16e4b8
++#define QM_REG_QVOQIDX_108 0x16e4bc
++#define QM_REG_QVOQIDX_109 0x16e4c0
++#define QM_REG_QVOQIDX_100 0x16e49c
++#define QM_REG_QVOQIDX_101 0x16e4a0
++#define QM_REG_QVOQIDX_102 0x16e4a4
++#define QM_REG_QVOQIDX_103 0x16e4a8
++#define QM_REG_QVOQIDX_104 0x16e4ac
++#define QM_REG_QVOQIDX_105 0x16e4b0
++#define QM_REG_QVOQIDX_106 0x16e4b4
++#define QM_REG_QVOQIDX_107 0x16e4b8
++#define QM_REG_QVOQIDX_108 0x16e4bc
++#define QM_REG_QVOQIDX_109 0x16e4c0
+ #define QM_REG_QVOQIDX_11 0x168120
++#define QM_REG_QVOQIDX_110 0x16e4c4
++#define QM_REG_QVOQIDX_111 0x16e4c8
++#define QM_REG_QVOQIDX_112 0x16e4cc
++#define QM_REG_QVOQIDX_113 0x16e4d0
++#define QM_REG_QVOQIDX_114 0x16e4d4
++#define QM_REG_QVOQIDX_115 0x16e4d8
++#define QM_REG_QVOQIDX_116 0x16e4dc
++#define QM_REG_QVOQIDX_117 0x16e4e0
++#define QM_REG_QVOQIDX_118 0x16e4e4
++#define QM_REG_QVOQIDX_119 0x16e4e8
++#define QM_REG_QVOQIDX_110 0x16e4c4
++#define QM_REG_QVOQIDX_111 0x16e4c8
++#define QM_REG_QVOQIDX_112 0x16e4cc
++#define QM_REG_QVOQIDX_113 0x16e4d0
++#define QM_REG_QVOQIDX_114 0x16e4d4
++#define QM_REG_QVOQIDX_115 0x16e4d8
++#define QM_REG_QVOQIDX_116 0x16e4dc
++#define QM_REG_QVOQIDX_117 0x16e4e0
++#define QM_REG_QVOQIDX_118 0x16e4e4
++#define QM_REG_QVOQIDX_119 0x16e4e8
+ #define QM_REG_QVOQIDX_12 0x168124
++#define QM_REG_QVOQIDX_120 0x16e4ec
++#define QM_REG_QVOQIDX_121 0x16e4f0
++#define QM_REG_QVOQIDX_122 0x16e4f4
++#define QM_REG_QVOQIDX_123 0x16e4f8
++#define QM_REG_QVOQIDX_124 0x16e4fc
++#define QM_REG_QVOQIDX_125 0x16e500
++#define QM_REG_QVOQIDX_126 0x16e504
++#define QM_REG_QVOQIDX_127 0x16e508
++#define QM_REG_QVOQIDX_120 0x16e4ec
++#define QM_REG_QVOQIDX_121 0x16e4f0
++#define QM_REG_QVOQIDX_122 0x16e4f4
++#define QM_REG_QVOQIDX_123 0x16e4f8
++#define QM_REG_QVOQIDX_124 0x16e4fc
++#define QM_REG_QVOQIDX_125 0x16e500
++#define QM_REG_QVOQIDX_126 0x16e504
++#define QM_REG_QVOQIDX_127 0x16e508
+ #define QM_REG_QVOQIDX_13 0x168128
+ #define QM_REG_QVOQIDX_14 0x16812c
+ #define QM_REG_QVOQIDX_15 0x168130
+ #define QM_REG_QVOQIDX_16 0x168134
+ #define QM_REG_QVOQIDX_17 0x168138
+ #define QM_REG_QVOQIDX_21 0x168148
++#define QM_REG_QVOQIDX_22 0x16814c
++#define QM_REG_QVOQIDX_23 0x168150
++#define QM_REG_QVOQIDX_24 0x168154
+ #define QM_REG_QVOQIDX_25 0x168158
++#define QM_REG_QVOQIDX_26 0x16815c
++#define QM_REG_QVOQIDX_27 0x168160
++#define QM_REG_QVOQIDX_28 0x168164
+ #define QM_REG_QVOQIDX_29 0x168168
++#define QM_REG_QVOQIDX_30 0x16816c
++#define QM_REG_QVOQIDX_31 0x168170
+ #define QM_REG_QVOQIDX_32 0x168174
+ #define QM_REG_QVOQIDX_33 0x168178
+ #define QM_REG_QVOQIDX_34 0x16817c
+@@ -2328,17 +2800,79 @@
+ #define QM_REG_QVOQIDX_61 0x1681e8
+ #define QM_REG_QVOQIDX_62 0x1681ec
+ #define QM_REG_QVOQIDX_63 0x1681f0
++#define QM_REG_QVOQIDX_64 0x16e40c
++#define QM_REG_QVOQIDX_65 0x16e410
++#define QM_REG_QVOQIDX_66 0x16e414
++#define QM_REG_QVOQIDX_67 0x16e418
++#define QM_REG_QVOQIDX_68 0x16e41c
++#define QM_REG_QVOQIDX_69 0x16e420
+ #define QM_REG_QVOQIDX_60 0x1681e4
+ #define QM_REG_QVOQIDX_61 0x1681e8
+ #define QM_REG_QVOQIDX_62 0x1681ec
+ #define QM_REG_QVOQIDX_63 0x1681f0
++#define QM_REG_QVOQIDX_64 0x16e40c
++#define QM_REG_QVOQIDX_65 0x16e410
++#define QM_REG_QVOQIDX_69 0x16e420
+ #define QM_REG_QVOQIDX_7 0x168110
++#define QM_REG_QVOQIDX_70 0x16e424
++#define QM_REG_QVOQIDX_71 0x16e428
++#define QM_REG_QVOQIDX_72 0x16e42c
++#define QM_REG_QVOQIDX_73 0x16e430
++#define QM_REG_QVOQIDX_74 0x16e434
++#define QM_REG_QVOQIDX_75 0x16e438
++#define QM_REG_QVOQIDX_76 0x16e43c
++#define QM_REG_QVOQIDX_77 0x16e440
++#define QM_REG_QVOQIDX_78 0x16e444
++#define QM_REG_QVOQIDX_79 0x16e448
++#define QM_REG_QVOQIDX_70 0x16e424
++#define QM_REG_QVOQIDX_71 0x16e428
++#define QM_REG_QVOQIDX_72 0x16e42c
++#define QM_REG_QVOQIDX_73 0x16e430
++#define QM_REG_QVOQIDX_74 0x16e434
++#define QM_REG_QVOQIDX_75 0x16e438
++#define QM_REG_QVOQIDX_76 0x16e43c
++#define QM_REG_QVOQIDX_77 0x16e440
++#define QM_REG_QVOQIDX_78 0x16e444
++#define QM_REG_QVOQIDX_79 0x16e448
+ #define QM_REG_QVOQIDX_8 0x168114
++#define QM_REG_QVOQIDX_80 0x16e44c
++#define QM_REG_QVOQIDX_81 0x16e450
++#define QM_REG_QVOQIDX_82 0x16e454
++#define QM_REG_QVOQIDX_83 0x16e458
++#define QM_REG_QVOQIDX_84 0x16e45c
++#define QM_REG_QVOQIDX_85 0x16e460
++#define QM_REG_QVOQIDX_86 0x16e464
++#define QM_REG_QVOQIDX_87 0x16e468
++#define QM_REG_QVOQIDX_88 0x16e46c
++#define QM_REG_QVOQIDX_89 0x16e470
++#define QM_REG_QVOQIDX_80 0x16e44c
++#define QM_REG_QVOQIDX_81 0x16e450
++#define QM_REG_QVOQIDX_85 0x16e460
++#define QM_REG_QVOQIDX_86 0x16e464
++#define QM_REG_QVOQIDX_87 0x16e468
++#define QM_REG_QVOQIDX_88 0x16e46c
++#define QM_REG_QVOQIDX_89 0x16e470
+ #define QM_REG_QVOQIDX_9 0x168118
+-/* [R 24] Remaining pause timeout for port 0 */
+-#define QM_REG_REMAINPAUSETM0 0x168418
+-/* [R 24] Remaining pause timeout for port 1 */
+-#define QM_REG_REMAINPAUSETM1 0x16841c
++#define QM_REG_QVOQIDX_90 0x16e474
++#define QM_REG_QVOQIDX_91 0x16e478
++#define QM_REG_QVOQIDX_92 0x16e47c
++#define QM_REG_QVOQIDX_93 0x16e480
++#define QM_REG_QVOQIDX_94 0x16e484
++#define QM_REG_QVOQIDX_95 0x16e488
++#define QM_REG_QVOQIDX_96 0x16e48c
++#define QM_REG_QVOQIDX_97 0x16e490
++#define QM_REG_QVOQIDX_98 0x16e494
++#define QM_REG_QVOQIDX_99 0x16e498
++#define QM_REG_QVOQIDX_90 0x16e474
++#define QM_REG_QVOQIDX_91 0x16e478
++#define QM_REG_QVOQIDX_92 0x16e47c
++#define QM_REG_QVOQIDX_93 0x16e480
++#define QM_REG_QVOQIDX_94 0x16e484
++#define QM_REG_QVOQIDX_95 0x16e488
++#define QM_REG_QVOQIDX_96 0x16e48c
++#define QM_REG_QVOQIDX_97 0x16e490
++#define QM_REG_QVOQIDX_98 0x16e494
++#define QM_REG_QVOQIDX_99 0x16e498
+ /* [RW 1] Initialization bit command */
+ #define QM_REG_SOFT_RESET 0x168428
+ /* [RW 8] The credit cost per every task in the QM. A value per each VOQ */
+@@ -2372,44 +2906,103 @@
+ #define QM_REG_VOQINITCREDIT_4 0x168070
+ #define QM_REG_VOQINITCREDIT_5 0x168074
+ /* [RW 1] The port of which VOQ belongs */
++#define QM_REG_VOQPORT_0 0x1682a0
+ #define QM_REG_VOQPORT_1 0x1682a4
+ #define QM_REG_VOQPORT_10 0x1682c8
+ #define QM_REG_VOQPORT_11 0x1682cc
+ #define QM_REG_VOQPORT_2 0x1682a8
+-/* [RW 32] The physical queue number associated with each VOQ */
++/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+ #define QM_REG_VOQQMASK_0_LSB 0x168240
+-/* [RW 32] The physical queue number associated with each VOQ */
++/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
++#define QM_REG_VOQQMASK_0_LSB_EXT_A 0x16e524
++/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+ #define QM_REG_VOQQMASK_0_MSB 0x168244
+-/* [RW 32] The physical queue number associated with each VOQ */
++/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
++#define QM_REG_VOQQMASK_0_MSB_EXT_A 0x16e528
++/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
++#define QM_REG_VOQQMASK_10_LSB 0x168290
++/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
++#define QM_REG_VOQQMASK_10_LSB_EXT_A 0x16e574
++/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
++#define QM_REG_VOQQMASK_10_MSB 0x168294
++/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
++#define QM_REG_VOQQMASK_10_MSB_EXT_A 0x16e578
++/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
++#define QM_REG_VOQQMASK_11_LSB 0x168298
++/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
++#define QM_REG_VOQQMASK_11_LSB_EXT_A 0x16e57c
++/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
++#define QM_REG_VOQQMASK_11_MSB 0x16829c
++/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
++#define QM_REG_VOQQMASK_11_MSB_EXT_A 0x16e580
++/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
++#define QM_REG_VOQQMASK_1_LSB 0x168248
++/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
++#define QM_REG_VOQQMASK_1_LSB_EXT_A 0x16e52c
++/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+ #define QM_REG_VOQQMASK_1_MSB 0x16824c
+-/* [RW 32] The physical queue number associated with each VOQ */
++/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
++#define QM_REG_VOQQMASK_1_MSB_EXT_A 0x16e530
++/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+ #define QM_REG_VOQQMASK_2_LSB 0x168250
+-/* [RW 32] The physical queue number associated with each VOQ */
++/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
++#define QM_REG_VOQQMASK_2_LSB_EXT_A 0x16e534
++/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+ #define QM_REG_VOQQMASK_2_MSB 0x168254
+-/* [RW 32] The physical queue number associated with each VOQ */
++/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
++#define QM_REG_VOQQMASK_2_MSB_EXT_A 0x16e538
++/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+ #define QM_REG_VOQQMASK_3_LSB 0x168258
+-/* [RW 32] The physical queue number associated with each VOQ */
++/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
++#define QM_REG_VOQQMASK_3_LSB_EXT_A 0x16e53c
++/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
++#define QM_REG_VOQQMASK_3_MSB_EXT_A 0x16e540
++/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+ #define QM_REG_VOQQMASK_4_LSB 0x168260
+-/* [RW 32] The physical queue number associated with each VOQ */
++/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
++#define QM_REG_VOQQMASK_4_LSB_EXT_A 0x16e544
++/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+ #define QM_REG_VOQQMASK_4_MSB 0x168264
+-/* [RW 32] The physical queue number associated with each VOQ */
++/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
++#define QM_REG_VOQQMASK_4_MSB_EXT_A 0x16e548
++/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+ #define QM_REG_VOQQMASK_5_LSB 0x168268
+-/* [RW 32] The physical queue number associated with each VOQ */
++/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
++#define QM_REG_VOQQMASK_5_LSB_EXT_A 0x16e54c
++/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+ #define QM_REG_VOQQMASK_5_MSB 0x16826c
+-/* [RW 32] The physical queue number associated with each VOQ */
++/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
++#define QM_REG_VOQQMASK_5_MSB_EXT_A 0x16e550
++/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+ #define QM_REG_VOQQMASK_6_LSB 0x168270
+-/* [RW 32] The physical queue number associated with each VOQ */
++/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
++#define QM_REG_VOQQMASK_6_LSB_EXT_A 0x16e554
++/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+ #define QM_REG_VOQQMASK_6_MSB 0x168274
+-/* [RW 32] The physical queue number associated with each VOQ */
++/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
++#define QM_REG_VOQQMASK_6_MSB_EXT_A 0x16e558
++/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+ #define QM_REG_VOQQMASK_7_LSB 0x168278
+-/* [RW 32] The physical queue number associated with each VOQ */
++/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
++#define QM_REG_VOQQMASK_7_LSB_EXT_A 0x16e55c
++/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+ #define QM_REG_VOQQMASK_7_MSB 0x16827c
+-/* [RW 32] The physical queue number associated with each VOQ */
++/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
++#define QM_REG_VOQQMASK_7_MSB_EXT_A 0x16e560
++/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+ #define QM_REG_VOQQMASK_8_LSB 0x168280
+-/* [RW 32] The physical queue number associated with each VOQ */
++/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
++#define QM_REG_VOQQMASK_8_LSB_EXT_A 0x16e564
++/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+ #define QM_REG_VOQQMASK_8_MSB 0x168284
+-/* [RW 32] The physical queue number associated with each VOQ */
++/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
++#define QM_REG_VOQQMASK_8_MSB_EXT_A 0x16e568
++/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+ #define QM_REG_VOQQMASK_9_LSB 0x168288
++/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
++#define QM_REG_VOQQMASK_9_LSB_EXT_A 0x16e56c
++/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
++#define QM_REG_VOQQMASK_9_MSB_EXT_A 0x16e570
+ /* [RW 32] Wrr weights */
+ #define QM_REG_WRRWEIGHTS_0 0x16880c
+ #define QM_REG_WRRWEIGHTS_1 0x168810
+@@ -2431,14 +3024,78 @@
+ #define QM_REG_WRRWEIGHTS_15 0x168828
+ #define QM_REG_WRRWEIGHTS_15_SIZE 1
+ /* [RW 32] Wrr weights */
++#define QM_REG_WRRWEIGHTS_16 0x16e000
++#define QM_REG_WRRWEIGHTS_16_SIZE 1
++/* [RW 32] Wrr weights */
++#define QM_REG_WRRWEIGHTS_17 0x16e004
++#define QM_REG_WRRWEIGHTS_17_SIZE 1
++/* [RW 32] Wrr weights */
++#define QM_REG_WRRWEIGHTS_18 0x16e008
++#define QM_REG_WRRWEIGHTS_18_SIZE 1
++/* [RW 32] Wrr weights */
++#define QM_REG_WRRWEIGHTS_19 0x16e00c
++#define QM_REG_WRRWEIGHTS_19_SIZE 1
++/* [RW 32] Wrr weights */
+ #define QM_REG_WRRWEIGHTS_10 0x168814
+ #define QM_REG_WRRWEIGHTS_11 0x168818
+ #define QM_REG_WRRWEIGHTS_12 0x16881c
+ #define QM_REG_WRRWEIGHTS_13 0x168820
+ #define QM_REG_WRRWEIGHTS_14 0x168824
+ #define QM_REG_WRRWEIGHTS_15 0x168828
++#define QM_REG_WRRWEIGHTS_16 0x16e000
++#define QM_REG_WRRWEIGHTS_17 0x16e004
++#define QM_REG_WRRWEIGHTS_18 0x16e008
++#define QM_REG_WRRWEIGHTS_19 0x16e00c
+ #define QM_REG_WRRWEIGHTS_2 0x16882c
++#define QM_REG_WRRWEIGHTS_20 0x16e010
++#define QM_REG_WRRWEIGHTS_20_SIZE 1
++/* [RW 32] Wrr weights */
++#define QM_REG_WRRWEIGHTS_21 0x16e014
++#define QM_REG_WRRWEIGHTS_21_SIZE 1
++/* [RW 32] Wrr weights */
++#define QM_REG_WRRWEIGHTS_22 0x16e018
++#define QM_REG_WRRWEIGHTS_22_SIZE 1
++/* [RW 32] Wrr weights */
++#define QM_REG_WRRWEIGHTS_23 0x16e01c
++#define QM_REG_WRRWEIGHTS_23_SIZE 1
++/* [RW 32] Wrr weights */
++#define QM_REG_WRRWEIGHTS_24 0x16e020
++#define QM_REG_WRRWEIGHTS_24_SIZE 1
++/* [RW 32] Wrr weights */
++#define QM_REG_WRRWEIGHTS_25 0x16e024
++#define QM_REG_WRRWEIGHTS_25_SIZE 1
++/* [RW 32] Wrr weights */
++#define QM_REG_WRRWEIGHTS_26 0x16e028
++#define QM_REG_WRRWEIGHTS_26_SIZE 1
++/* [RW 32] Wrr weights */
++#define QM_REG_WRRWEIGHTS_27 0x16e02c
++#define QM_REG_WRRWEIGHTS_27_SIZE 1
++/* [RW 32] Wrr weights */
++#define QM_REG_WRRWEIGHTS_28 0x16e030
++#define QM_REG_WRRWEIGHTS_28_SIZE 1
++/* [RW 32] Wrr weights */
++#define QM_REG_WRRWEIGHTS_29 0x16e034
++#define QM_REG_WRRWEIGHTS_29_SIZE 1
++/* [RW 32] Wrr weights */
++#define QM_REG_WRRWEIGHTS_20 0x16e010
++#define QM_REG_WRRWEIGHTS_21 0x16e014
++#define QM_REG_WRRWEIGHTS_22 0x16e018
++#define QM_REG_WRRWEIGHTS_23 0x16e01c
++#define QM_REG_WRRWEIGHTS_24 0x16e020
++#define QM_REG_WRRWEIGHTS_25 0x16e024
++#define QM_REG_WRRWEIGHTS_26 0x16e028
++#define QM_REG_WRRWEIGHTS_27 0x16e02c
++#define QM_REG_WRRWEIGHTS_28 0x16e030
++#define QM_REG_WRRWEIGHTS_29 0x16e034
+ #define QM_REG_WRRWEIGHTS_3 0x168830
++#define QM_REG_WRRWEIGHTS_30 0x16e038
++#define QM_REG_WRRWEIGHTS_30_SIZE 1
++/* [RW 32] Wrr weights */
++#define QM_REG_WRRWEIGHTS_31 0x16e03c
++#define QM_REG_WRRWEIGHTS_31_SIZE 1
++/* [RW 32] Wrr weights */
++#define QM_REG_WRRWEIGHTS_30 0x16e038
++#define QM_REG_WRRWEIGHTS_31 0x16e03c
+ #define QM_REG_WRRWEIGHTS_4 0x168834
+ #define QM_REG_WRRWEIGHTS_5 0x168838
+ #define QM_REG_WRRWEIGHTS_6 0x16883c
+@@ -2447,6 +3104,70 @@
+ #define QM_REG_WRRWEIGHTS_9 0x168848
+ /* [R 6] Keep the fill level of the fifo from write client 1 */
+ #define QM_REG_XQM_WRC_FIFOLVL 0x168000
++#define BRB1_BRB1_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
++#define BRB1_BRB1_INT_STS_REG_ADDRESS_ERROR_SIZE 0
++#define BRB1_BRB1_INT_STS_CLR_REG_ADDRESS_ERROR (0x1<<0)
++#define BRB1_BRB1_INT_STS_CLR_REG_ADDRESS_ERROR_SIZE 0
++#define BRB1_BRB1_INT_STS_WR_REG_ADDRESS_ERROR (0x1<<0)
++#define BRB1_BRB1_INT_STS_WR_REG_ADDRESS_ERROR_SIZE 0
++#define BRB1_BRB1_INT_MASK_REG_ADDRESS_ERROR (0x1<<0)
++#define BRB1_BRB1_INT_MASK_REG_ADDRESS_ERROR_SIZE 0
++#define CCM_CCM_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
++#define CCM_CCM_INT_STS_REG_ADDRESS_ERROR_SIZE 0
++#define CCM_CCM_INT_STS_CLR_REG_ADDRESS_ERROR (0x1<<0)
++#define CCM_CCM_INT_STS_CLR_REG_ADDRESS_ERROR_SIZE 0
++#define CCM_CCM_INT_STS_WR_REG_ADDRESS_ERROR (0x1<<0)
++#define CCM_CCM_INT_STS_WR_REG_ADDRESS_ERROR_SIZE 0
++#define CCM_CCM_INT_MASK_REG_ADDRESS_ERROR (0x1<<0)
++#define CCM_CCM_INT_MASK_REG_ADDRESS_ERROR_SIZE 0
++#define CDU_CDU_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
++#define CDU_CDU_INT_STS_REG_ADDRESS_ERROR_SIZE 0
++#define CDU_CDU_INT_STS_CLR_REG_ADDRESS_ERROR (0x1<<0)
++#define CDU_CDU_INT_STS_CLR_REG_ADDRESS_ERROR_SIZE 0
++#define CDU_CDU_INT_STS_WR_REG_ADDRESS_ERROR (0x1<<0)
++#define CDU_CDU_INT_STS_WR_REG_ADDRESS_ERROR_SIZE 0
++#define CDU_CDU_INT_MASK_REG_ADDRESS_ERROR (0x1<<0)
++#define CDU_CDU_INT_MASK_REG_ADDRESS_ERROR_SIZE 0
++#define CFC_CFC_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
++#define CFC_CFC_INT_STS_REG_ADDRESS_ERROR_SIZE 0
++#define CFC_CFC_INT_STS_CLR_REG_ADDRESS_ERROR (0x1<<0)
++#define CFC_CFC_INT_STS_CLR_REG_ADDRESS_ERROR_SIZE 0
++#define CFC_CFC_INT_STS_WR_REG_ADDRESS_ERROR (0x1<<0)
++#define CFC_CFC_INT_STS_WR_REG_ADDRESS_ERROR_SIZE 0
++#define CFC_CFC_INT_MASK_REG_ADDRESS_ERROR (0x1<<0)
++#define CFC_CFC_INT_MASK_REG_ADDRESS_ERROR_SIZE 0
++#define CSDM_CSDM_INT_STS_0_REG_ADDRESS_ERROR (0x1<<0)
++#define CSDM_CSDM_INT_STS_0_REG_ADDRESS_ERROR_SIZE 0
++#define CSDM_CSDM_INT_STS_CLR_0_REG_ADDRESS_ERROR (0x1<<0)
++#define CSDM_CSDM_INT_STS_CLR_0_REG_ADDRESS_ERROR_SIZE 0
++#define CSDM_CSDM_INT_STS_WR_0_REG_ADDRESS_ERROR (0x1<<0)
++#define CSDM_CSDM_INT_STS_WR_0_REG_ADDRESS_ERROR_SIZE 0
++#define CSDM_CSDM_INT_MASK_0_REG_ADDRESS_ERROR (0x1<<0)
++#define CSDM_CSDM_INT_MASK_0_REG_ADDRESS_ERROR_SIZE 0
++#define CSEM_CSEM_INT_STS_0_REG_ADDRESS_ERROR (0x1<<0)
++#define CSEM_CSEM_INT_STS_0_REG_ADDRESS_ERROR_SIZE 0
++#define CSEM_CSEM_INT_STS_CLR_0_REG_ADDRESS_ERROR (0x1<<0)
++#define CSEM_CSEM_INT_STS_CLR_0_REG_ADDRESS_ERROR_SIZE 0
++#define CSEM_CSEM_INT_STS_WR_0_REG_ADDRESS_ERROR (0x1<<0)
++#define CSEM_CSEM_INT_STS_WR_0_REG_ADDRESS_ERROR_SIZE 0
++#define CSEM_CSEM_INT_MASK_0_REG_ADDRESS_ERROR (0x1<<0)
++#define CSEM_CSEM_INT_MASK_0_REG_ADDRESS_ERROR_SIZE 0
++#define DBG_DBG_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
++#define DBG_DBG_INT_STS_REG_ADDRESS_ERROR_SIZE 0
++#define DBG_DBG_INT_STS_CLR_REG_ADDRESS_ERROR (0x1<<0)
++#define DBG_DBG_INT_STS_CLR_REG_ADDRESS_ERROR_SIZE 0
++#define DBG_DBG_INT_STS_WR_REG_ADDRESS_ERROR (0x1<<0)
++#define DBG_DBG_INT_STS_WR_REG_ADDRESS_ERROR_SIZE 0
++#define DBG_DBG_INT_MASK_REG_ADDRESS_ERROR (0x1<<0)
++#define DBG_DBG_INT_MASK_REG_ADDRESS_ERROR_SIZE 0
++#define DMAE_DMAE_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
++#define DMAE_DMAE_INT_STS_REG_ADDRESS_ERROR_SIZE 0
++#define DMAE_DMAE_INT_STS_CLR_REG_ADDRESS_ERROR (0x1<<0)
++#define DMAE_DMAE_INT_STS_CLR_REG_ADDRESS_ERROR_SIZE 0
++#define DMAE_DMAE_INT_STS_WR_REG_ADDRESS_ERROR (0x1<<0)
++#define DMAE_DMAE_INT_STS_WR_REG_ADDRESS_ERROR_SIZE 0
++#define DMAE_DMAE_INT_MASK_REG_ADDRESS_ERROR (0x1<<0)
++#define DMAE_DMAE_INT_MASK_REG_ADDRESS_ERROR_SIZE 0
+ #define DORQ_DORQ_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
+ #define DORQ_DORQ_INT_STS_REG_ADDRESS_ERROR_SIZE 0
+ #define DORQ_DORQ_INT_STS_CLR_REG_ADDRESS_ERROR (0x1<<0)
+@@ -2455,6 +3176,22 @@
+ #define DORQ_DORQ_INT_STS_WR_REG_ADDRESS_ERROR_SIZE 0
+ #define DORQ_DORQ_INT_MASK_REG_ADDRESS_ERROR (0x1<<0)
+ #define DORQ_DORQ_INT_MASK_REG_ADDRESS_ERROR_SIZE 0
++#define HC_HC_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
++#define HC_HC_INT_STS_REG_ADDRESS_ERROR_SIZE 0
++#define HC_HC_INT_STS_CLR_REG_ADDRESS_ERROR (0x1<<0)
++#define HC_HC_INT_STS_CLR_REG_ADDRESS_ERROR_SIZE 0
++#define HC_HC_INT_STS_WR_REG_ADDRESS_ERROR (0x1<<0)
++#define HC_HC_INT_STS_WR_REG_ADDRESS_ERROR_SIZE 0
++#define HC_HC_INT_MASK_REG_ADDRESS_ERROR (0x1<<0)
++#define HC_HC_INT_MASK_REG_ADDRESS_ERROR_SIZE 0
++#define MISC_MISC_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
++#define MISC_MISC_INT_STS_REG_ADDRESS_ERROR_SIZE 0
++#define MISC_MISC_INT_STS_CLR_REG_ADDRESS_ERROR (0x1<<0)
++#define MISC_MISC_INT_STS_CLR_REG_ADDRESS_ERROR_SIZE 0
++#define MISC_MISC_INT_STS_WR_REG_ADDRESS_ERROR (0x1<<0)
++#define MISC_MISC_INT_STS_WR_REG_ADDRESS_ERROR_SIZE 0
++#define MISC_MISC_INT_MASK_REG_ADDRESS_ERROR (0x1<<0)
++#define MISC_MISC_INT_MASK_REG_ADDRESS_ERROR_SIZE 0
+ #define NIG_NIG_INT_STS_0_REG_ADDRESS_ERROR (0x1<<0)
+ #define NIG_NIG_INT_STS_0_REG_ADDRESS_ERROR_SIZE 0
+ #define NIG_NIG_INT_STS_CLR_0_REG_ADDRESS_ERROR (0x1<<0)
+@@ -2463,6 +3200,70 @@
+ #define NIG_NIG_INT_STS_WR_0_REG_ADDRESS_ERROR_SIZE 0
+ #define NIG_NIG_INT_MASK_0_REG_ADDRESS_ERROR (0x1<<0)
+ #define NIG_NIG_INT_MASK_0_REG_ADDRESS_ERROR_SIZE 0
++#define PBF_PBF_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
++#define PBF_PBF_INT_STS_REG_ADDRESS_ERROR_SIZE 0
++#define PBF_PBF_INT_STS_CLR_REG_ADDRESS_ERROR (0x1<<0)
++#define PBF_PBF_INT_STS_CLR_REG_ADDRESS_ERROR_SIZE 0
++#define PBF_PBF_INT_STS_WR_REG_ADDRESS_ERROR (0x1<<0)
++#define PBF_PBF_INT_STS_WR_REG_ADDRESS_ERROR_SIZE 0
++#define PBF_PBF_INT_MASK_REG_ADDRESS_ERROR (0x1<<0)
++#define PBF_PBF_INT_MASK_REG_ADDRESS_ERROR_SIZE 0
++#define PB_PB_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
++#define PB_PB_INT_STS_REG_ADDRESS_ERROR_SIZE 0
++#define PB_PB_INT_STS_CLR_REG_ADDRESS_ERROR (0x1<<0)
++#define PB_PB_INT_STS_CLR_REG_ADDRESS_ERROR_SIZE 0
++#define PB_PB_INT_STS_WR_REG_ADDRESS_ERROR (0x1<<0)
++#define PB_PB_INT_STS_WR_REG_ADDRESS_ERROR_SIZE 0
++#define PB_PB_INT_MASK_REG_ADDRESS_ERROR (0x1<<0)
++#define PB_PB_INT_MASK_REG_ADDRESS_ERROR_SIZE 0
++#define PRS_PRS_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
++#define PRS_PRS_INT_STS_REG_ADDRESS_ERROR_SIZE 0
++#define PRS_PRS_INT_STS_CLR_REG_ADDRESS_ERROR (0x1<<0)
++#define PRS_PRS_INT_STS_CLR_REG_ADDRESS_ERROR_SIZE 0
++#define PRS_PRS_INT_STS_WR_REG_ADDRESS_ERROR (0x1<<0)
++#define PRS_PRS_INT_STS_WR_REG_ADDRESS_ERROR_SIZE 0
++#define PRS_PRS_INT_MASK_REG_ADDRESS_ERROR (0x1<<0)
++#define PRS_PRS_INT_MASK_REG_ADDRESS_ERROR_SIZE 0
++#define PXP2_PXP2_INT_STS_0_REG_ADDRESS_ERROR (0x1<<0)
++#define PXP2_PXP2_INT_STS_0_REG_ADDRESS_ERROR_SIZE 0
++#define PXP2_PXP2_INT_STS_CLR_0_REG_ADDRESS_ERROR (0x1<<0)
++#define PXP2_PXP2_INT_STS_CLR_0_REG_ADDRESS_ERROR_SIZE 0
++#define PXP2_PXP2_INT_STS_WR_0_REG_ADDRESS_ERROR (0x1<<0)
++#define PXP2_PXP2_INT_STS_WR_0_REG_ADDRESS_ERROR_SIZE 0
++#define PXP2_PXP2_INT_MASK_0_REG_ADDRESS_ERROR (0x1<<0)
++#define PXP2_PXP2_INT_MASK_0_REG_ADDRESS_ERROR_SIZE 0
++#define PXP_PXP_INT_STS_0_REG_ADDRESS_ERROR (0x1<<0)
++#define PXP_PXP_INT_STS_0_REG_ADDRESS_ERROR_SIZE 0
++#define PXP_PXP_INT_STS_CLR_0_REG_ADDRESS_ERROR (0x1<<0)
++#define PXP_PXP_INT_STS_CLR_0_REG_ADDRESS_ERROR_SIZE 0
++#define PXP_PXP_INT_STS_WR_0_REG_ADDRESS_ERROR (0x1<<0)
++#define PXP_PXP_INT_STS_WR_0_REG_ADDRESS_ERROR_SIZE 0
++#define PXP_PXP_INT_MASK_0_REG_ADDRESS_ERROR (0x1<<0)
++#define PXP_PXP_INT_MASK_0_REG_ADDRESS_ERROR_SIZE 0
++#define QM_QM_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
++#define QM_QM_INT_STS_REG_ADDRESS_ERROR_SIZE 0
++#define QM_QM_INT_STS_CLR_REG_ADDRESS_ERROR (0x1<<0)
++#define QM_QM_INT_STS_CLR_REG_ADDRESS_ERROR_SIZE 0
++#define QM_QM_INT_STS_WR_REG_ADDRESS_ERROR (0x1<<0)
++#define QM_QM_INT_STS_WR_REG_ADDRESS_ERROR_SIZE 0
++#define QM_QM_INT_MASK_REG_ADDRESS_ERROR (0x1<<0)
++#define QM_QM_INT_MASK_REG_ADDRESS_ERROR_SIZE 0
++#define SEM_FAST_SEM_FAST_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
++#define SEM_FAST_SEM_FAST_INT_STS_REG_ADDRESS_ERROR_SIZE 0
++#define SEM_FAST_SEM_FAST_INT_STS_CLR_REG_ADDRESS_ERROR (0x1<<0)
++#define SEM_FAST_SEM_FAST_INT_STS_CLR_REG_ADDRESS_ERROR_SIZE 0
++#define SEM_FAST_SEM_FAST_INT_STS_WR_REG_ADDRESS_ERROR (0x1<<0)
++#define SEM_FAST_SEM_FAST_INT_STS_WR_REG_ADDRESS_ERROR_SIZE 0
++#define SEM_FAST_SEM_FAST_INT_MASK_REG_ADDRESS_ERROR (0x1<<0)
++#define SEM_FAST_SEM_FAST_INT_MASK_REG_ADDRESS_ERROR_SIZE 0
++#define SRC_SRC_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
++#define SRC_SRC_INT_STS_REG_ADDRESS_ERROR_SIZE 0
++#define SRC_SRC_INT_STS_CLR_REG_ADDRESS_ERROR (0x1<<0)
++#define SRC_SRC_INT_STS_CLR_REG_ADDRESS_ERROR_SIZE 0
++#define SRC_SRC_INT_STS_WR_REG_ADDRESS_ERROR (0x1<<0)
++#define SRC_SRC_INT_STS_WR_REG_ADDRESS_ERROR_SIZE 0
++#define SRC_SRC_INT_MASK_REG_ADDRESS_ERROR (0x1<<0)
++#define SRC_SRC_INT_MASK_REG_ADDRESS_ERROR_SIZE 0
+ #define TCM_TCM_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
+ #define TCM_TCM_INT_STS_REG_ADDRESS_ERROR_SIZE 0
+ #define TCM_TCM_INT_STS_CLR_REG_ADDRESS_ERROR (0x1<<0)
+@@ -2471,18 +3272,98 @@
+ #define TCM_TCM_INT_STS_WR_REG_ADDRESS_ERROR_SIZE 0
+ #define TCM_TCM_INT_MASK_REG_ADDRESS_ERROR (0x1<<0)
+ #define TCM_TCM_INT_MASK_REG_ADDRESS_ERROR_SIZE 0
++#define TM_TM_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
++#define TM_TM_INT_STS_REG_ADDRESS_ERROR_SIZE 0
++#define TM_TM_INT_STS_CLR_REG_ADDRESS_ERROR (0x1<<0)
++#define TM_TM_INT_STS_CLR_REG_ADDRESS_ERROR_SIZE 0
++#define TM_TM_INT_STS_WR_REG_ADDRESS_ERROR (0x1<<0)
++#define TM_TM_INT_STS_WR_REG_ADDRESS_ERROR_SIZE 0
++#define TM_TM_INT_MASK_REG_ADDRESS_ERROR (0x1<<0)
++#define TM_TM_INT_MASK_REG_ADDRESS_ERROR_SIZE 0
++#define TSDM_TSDM_INT_STS_0_REG_ADDRESS_ERROR (0x1<<0)
++#define TSDM_TSDM_INT_STS_0_REG_ADDRESS_ERROR_SIZE 0
++#define TSDM_TSDM_INT_STS_CLR_0_REG_ADDRESS_ERROR (0x1<<0)
++#define TSDM_TSDM_INT_STS_CLR_0_REG_ADDRESS_ERROR_SIZE 0
++#define TSDM_TSDM_INT_STS_WR_0_REG_ADDRESS_ERROR (0x1<<0)
++#define TSDM_TSDM_INT_STS_WR_0_REG_ADDRESS_ERROR_SIZE 0
++#define TSDM_TSDM_INT_MASK_0_REG_ADDRESS_ERROR (0x1<<0)
++#define TSDM_TSDM_INT_MASK_0_REG_ADDRESS_ERROR_SIZE 0
++#define TSEM_TSEM_INT_STS_0_REG_ADDRESS_ERROR (0x1<<0)
++#define TSEM_TSEM_INT_STS_0_REG_ADDRESS_ERROR_SIZE 0
++#define TSEM_TSEM_INT_STS_CLR_0_REG_ADDRESS_ERROR (0x1<<0)
++#define TSEM_TSEM_INT_STS_CLR_0_REG_ADDRESS_ERROR_SIZE 0
++#define TSEM_TSEM_INT_STS_WR_0_REG_ADDRESS_ERROR (0x1<<0)
++#define TSEM_TSEM_INT_STS_WR_0_REG_ADDRESS_ERROR_SIZE 0
++#define TSEM_TSEM_INT_MASK_0_REG_ADDRESS_ERROR (0x1<<0)
++#define TSEM_TSEM_INT_MASK_0_REG_ADDRESS_ERROR_SIZE 0
++#define UCM_UCM_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
++#define UCM_UCM_INT_STS_REG_ADDRESS_ERROR_SIZE 0
++#define UCM_UCM_INT_STS_CLR_REG_ADDRESS_ERROR (0x1<<0)
++#define UCM_UCM_INT_STS_CLR_REG_ADDRESS_ERROR_SIZE 0
++#define UCM_UCM_INT_STS_WR_REG_ADDRESS_ERROR (0x1<<0)
++#define UCM_UCM_INT_STS_WR_REG_ADDRESS_ERROR_SIZE 0
++#define UCM_UCM_INT_MASK_REG_ADDRESS_ERROR (0x1<<0)
++#define UCM_UCM_INT_MASK_REG_ADDRESS_ERROR_SIZE 0
++#define USDM_USDM_INT_STS_0_REG_ADDRESS_ERROR (0x1<<0)
++#define USDM_USDM_INT_STS_0_REG_ADDRESS_ERROR_SIZE 0
++#define USDM_USDM_INT_STS_CLR_0_REG_ADDRESS_ERROR (0x1<<0)
++#define USDM_USDM_INT_STS_CLR_0_REG_ADDRESS_ERROR_SIZE 0
++#define USDM_USDM_INT_STS_WR_0_REG_ADDRESS_ERROR (0x1<<0)
++#define USDM_USDM_INT_STS_WR_0_REG_ADDRESS_ERROR_SIZE 0
++#define USDM_USDM_INT_MASK_0_REG_ADDRESS_ERROR (0x1<<0)
++#define USDM_USDM_INT_MASK_0_REG_ADDRESS_ERROR_SIZE 0
++#define USEM_USEM_INT_STS_0_REG_ADDRESS_ERROR (0x1<<0)
++#define USEM_USEM_INT_STS_0_REG_ADDRESS_ERROR_SIZE 0
++#define USEM_USEM_INT_STS_CLR_0_REG_ADDRESS_ERROR (0x1<<0)
++#define USEM_USEM_INT_STS_CLR_0_REG_ADDRESS_ERROR_SIZE 0
++#define USEM_USEM_INT_STS_WR_0_REG_ADDRESS_ERROR (0x1<<0)
++#define USEM_USEM_INT_STS_WR_0_REG_ADDRESS_ERROR_SIZE 0
++#define USEM_USEM_INT_MASK_0_REG_ADDRESS_ERROR (0x1<<0)
++#define USEM_USEM_INT_MASK_0_REG_ADDRESS_ERROR_SIZE 0
++#define XCM_XCM_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
++#define XCM_XCM_INT_STS_REG_ADDRESS_ERROR_SIZE 0
++#define XCM_XCM_INT_STS_CLR_REG_ADDRESS_ERROR (0x1<<0)
++#define XCM_XCM_INT_STS_CLR_REG_ADDRESS_ERROR_SIZE 0
++#define XCM_XCM_INT_STS_WR_REG_ADDRESS_ERROR (0x1<<0)
++#define XCM_XCM_INT_STS_WR_REG_ADDRESS_ERROR_SIZE 0
++#define XCM_XCM_INT_MASK_REG_ADDRESS_ERROR (0x1<<0)
++#define XCM_XCM_INT_MASK_REG_ADDRESS_ERROR_SIZE 0
++#define XSDM_XSDM_INT_STS_0_REG_ADDRESS_ERROR (0x1<<0)
++#define XSDM_XSDM_INT_STS_0_REG_ADDRESS_ERROR_SIZE 0
++#define XSDM_XSDM_INT_STS_CLR_0_REG_ADDRESS_ERROR (0x1<<0)
++#define XSDM_XSDM_INT_STS_CLR_0_REG_ADDRESS_ERROR_SIZE 0
++#define XSDM_XSDM_INT_STS_WR_0_REG_ADDRESS_ERROR (0x1<<0)
++#define XSDM_XSDM_INT_STS_WR_0_REG_ADDRESS_ERROR_SIZE 0
++#define XSDM_XSDM_INT_MASK_0_REG_ADDRESS_ERROR (0x1<<0)
++#define XSDM_XSDM_INT_MASK_0_REG_ADDRESS_ERROR_SIZE 0
++#define XSEM_XSEM_INT_STS_0_REG_ADDRESS_ERROR (0x1<<0)
++#define XSEM_XSEM_INT_STS_0_REG_ADDRESS_ERROR_SIZE 0
++#define XSEM_XSEM_INT_STS_CLR_0_REG_ADDRESS_ERROR (0x1<<0)
++#define XSEM_XSEM_INT_STS_CLR_0_REG_ADDRESS_ERROR_SIZE 0
++#define XSEM_XSEM_INT_STS_WR_0_REG_ADDRESS_ERROR (0x1<<0)
++#define XSEM_XSEM_INT_STS_WR_0_REG_ADDRESS_ERROR_SIZE 0
++#define XSEM_XSEM_INT_MASK_0_REG_ADDRESS_ERROR (0x1<<0)
++#define XSEM_XSEM_INT_MASK_0_REG_ADDRESS_ERROR_SIZE 0
+ #define CFC_DEBUG1_REG_WRITE_AC (0x1<<4)
+ #define CFC_DEBUG1_REG_WRITE_AC_SIZE 4
+-/* [R 1] debug only: This bit indicates wheter indicates that external
++/* [R 1] debug only: This bit indicates whether indicates that external
+ buffer was wrapped (oldest data was thrown); Relevant only when
+ ~dbg_registers_debug_target=2 (PCI) & ~dbg_registers_full_mode=1 (wrap); */
+ #define DBG_REG_WRAP_ON_EXT_BUFFER 0xc124
+ #define DBG_REG_WRAP_ON_EXT_BUFFER_SIZE 1
+-/* [R 1] debug only: This bit indicates wheter the internal buffer was
++/* [R 1] debug only: This bit indicates whether the internal buffer was
+ wrapped (oldest data was thrown) Relevant only when
+ ~dbg_registers_debug_target=0 (internal buffer) */
+ #define DBG_REG_WRAP_ON_INT_BUFFER 0xc128
+ #define DBG_REG_WRAP_ON_INT_BUFFER_SIZE 1
++#define QM_QM_PRTY_STS_REG_WRBUFF (0x1<<8)
++#define QM_QM_PRTY_STS_REG_WRBUFF_SIZE 8
++#define QM_QM_PRTY_STS_CLR_REG_WRBUFF (0x1<<8)
++#define QM_QM_PRTY_STS_CLR_REG_WRBUFF_SIZE 8
++#define QM_QM_PRTY_STS_WR_REG_WRBUFF (0x1<<8)
++#define QM_QM_PRTY_STS_WR_REG_WRBUFF_SIZE 8
++#define QM_QM_PRTY_MASK_REG_WRBUFF (0x1<<8)
++#define QM_QM_PRTY_MASK_REG_WRBUFF_SIZE 8
+ /* [RW 32] Wrr weights */
+ #define QM_REG_WRRWEIGHTS_0 0x16880c
+ #define QM_REG_WRRWEIGHTS_0_SIZE 1
+@@ -2531,20 +3412,77 @@
+ /* [RW 32] Wrr weights */
+ #define QM_REG_WRRWEIGHTS_9 0x168848
+ #define QM_REG_WRRWEIGHTS_9_SIZE 1
+-/* [RW 22] Number of free element in the free list of T2 entries - port 0. */
++/* [RW 32] Wrr weights */
++#define QM_REG_WRRWEIGHTS_16 0x16e000
++#define QM_REG_WRRWEIGHTS_16_SIZE 1
++/* [RW 32] Wrr weights */
++#define QM_REG_WRRWEIGHTS_17 0x16e004
++#define QM_REG_WRRWEIGHTS_17_SIZE 1
++/* [RW 32] Wrr weights */
++#define QM_REG_WRRWEIGHTS_18 0x16e008
++#define QM_REG_WRRWEIGHTS_18_SIZE 1
++/* [RW 32] Wrr weights */
++#define QM_REG_WRRWEIGHTS_19 0x16e00c
++#define QM_REG_WRRWEIGHTS_19_SIZE 1
++/* [RW 32] Wrr weights */
++#define QM_REG_WRRWEIGHTS_20 0x16e010
++#define QM_REG_WRRWEIGHTS_20_SIZE 1
++/* [RW 32] Wrr weights */
++#define QM_REG_WRRWEIGHTS_21 0x16e014
++#define QM_REG_WRRWEIGHTS_21_SIZE 1
++/* [RW 32] Wrr weights */
++#define QM_REG_WRRWEIGHTS_22 0x16e018
++#define QM_REG_WRRWEIGHTS_22_SIZE 1
++/* [RW 32] Wrr weights */
++#define QM_REG_WRRWEIGHTS_23 0x16e01c
++#define QM_REG_WRRWEIGHTS_23_SIZE 1
++/* [RW 32] Wrr weights */
++#define QM_REG_WRRWEIGHTS_24 0x16e020
++#define QM_REG_WRRWEIGHTS_24_SIZE 1
++/* [RW 32] Wrr weights */
++#define QM_REG_WRRWEIGHTS_25 0x16e024
++#define QM_REG_WRRWEIGHTS_25_SIZE 1
++/* [RW 32] Wrr weights */
++#define QM_REG_WRRWEIGHTS_26 0x16e028
++#define QM_REG_WRRWEIGHTS_26_SIZE 1
++/* [RW 32] Wrr weights */
++#define QM_REG_WRRWEIGHTS_27 0x16e02c
++#define QM_REG_WRRWEIGHTS_27_SIZE 1
++/* [RW 32] Wrr weights */
++#define QM_REG_WRRWEIGHTS_28 0x16e030
++#define QM_REG_WRRWEIGHTS_28_SIZE 1
++/* [RW 32] Wrr weights */
++#define QM_REG_WRRWEIGHTS_29 0x16e034
++#define QM_REG_WRRWEIGHTS_29_SIZE 1
++/* [RW 32] Wrr weights */
++#define QM_REG_WRRWEIGHTS_30 0x16e038
++#define QM_REG_WRRWEIGHTS_30_SIZE 1
++/* [RW 32] Wrr weights */
++#define QM_REG_WRRWEIGHTS_31 0x16e03c
++#define QM_REG_WRRWEIGHTS_31_SIZE 1
+ #define SRC_REG_COUNTFREE0 0x40500
+-/* [WB 64] First free element in the free list of T2 entries - port 0. */
++/* [RW 1] If clr the searcher is compatible to E1 A0 - support only two
++ ports. If set the searcher support 8 functions. */
++#define SRC_REG_E1HMF_ENABLE 0x404cc
+ #define SRC_REG_FIRSTFREE0 0x40510
+ #define SRC_REG_KEYRSS0_0 0x40408
++#define SRC_REG_KEYRSS0_7 0x40424
+ #define SRC_REG_KEYRSS1_9 0x40454
+-/* [WB 64] Last free element in the free list of T2 entries - port 0. */
++#define SRC_REG_KEYSEARCH_0 0x40458
++#define SRC_REG_KEYSEARCH_1 0x4045c
++#define SRC_REG_KEYSEARCH_2 0x40460
++#define SRC_REG_KEYSEARCH_3 0x40464
++#define SRC_REG_KEYSEARCH_4 0x40468
++#define SRC_REG_KEYSEARCH_5 0x4046c
++#define SRC_REG_KEYSEARCH_6 0x40470
++#define SRC_REG_KEYSEARCH_7 0x40474
++#define SRC_REG_KEYSEARCH_8 0x40478
++#define SRC_REG_KEYSEARCH_9 0x4047c
+ #define SRC_REG_LASTFREE0 0x40530
+-/* [RW 5] The number of hash bits used for the search (h); Values can be 8
+- to 24. */
+ #define SRC_REG_NUMBER_HASH_BITS0 0x40400
+ /* [RW 1] Reset internal state machines. */
+ #define SRC_REG_SOFT_RST 0x4049c
+-/* [R 1] Interrupt register #0 read */
++/* [R 3] Interrupt register #0 read */
+ #define SRC_REG_SRC_INT_STS 0x404ac
+ /* [RW 3] Parity mask register #0 read/write */
+ #define SRC_REG_SRC_PRTY_MASK 0x404c8
+@@ -2583,6 +3521,10 @@
+ /* [RC 1] Message length mismatch (relative to last indication) at the In#9
+ interface. */
+ #define TCM_REG_CSEM_LENGTH_MIS 0x50174
++/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for
++ weight 8 (the most prioritised); 1 stands for weight 1(least
++ prioritised); 2 stands for weight 2; tc. */
++#define TCM_REG_CSEM_WEIGHT 0x500bc
+ /* [RW 8] The Event ID in case of ErrorFlg is set in the input message. */
+ #define TCM_REG_ERR_EVNT_ID 0x500a0
+ /* [RW 28] The CM erroneous header for QM and Timers formatting. */
+@@ -2626,6 +3568,7 @@
+ #define TCM_REG_N_SM_CTX_LD_2 0x50058
+ #define TCM_REG_N_SM_CTX_LD_3 0x5005c
+ #define TCM_REG_N_SM_CTX_LD_4 0x50060
++#define TCM_REG_N_SM_CTX_LD_5 0x50064
+ /* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+@@ -2637,11 +3580,14 @@
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+ #define TCM_REG_PBF_WEIGHT 0x500b4
+-/* [RW 6] The physical queue number 0 per port index. */
+ #define TCM_REG_PHYS_QNUM0_0 0x500e0
+ #define TCM_REG_PHYS_QNUM0_1 0x500e4
+-/* [RW 6] The physical queue number 1 per port index. */
+ #define TCM_REG_PHYS_QNUM1_0 0x500e8
++#define TCM_REG_PHYS_QNUM1_1 0x500ec
++#define TCM_REG_PHYS_QNUM2_0 0x500f0
++#define TCM_REG_PHYS_QNUM2_1 0x500f4
++#define TCM_REG_PHYS_QNUM3_0 0x500f8
++#define TCM_REG_PHYS_QNUM3_1 0x500fc
+ /* [RW 1] Input prs Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+@@ -2662,6 +3608,10 @@
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+ #define TCM_REG_STORM_TCM_IFEN 0x50010
++/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for
++ weight 8 (the most prioritised); 1 stands for weight 1(least
++ prioritised); 2 stands for weight 2; tc. */
++#define TCM_REG_STORM_WEIGHT 0x500ac
+ /* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+@@ -2670,6 +3620,8 @@
+ #define TCM_REG_TCM_INT_MASK 0x501dc
+ /* [R 11] Interrupt register #0 read */
+ #define TCM_REG_TCM_INT_STS 0x501d0
++/* [R 27] Parity register #0 read */
++#define TCM_REG_TCM_PRTY_STS 0x501e0
+ /* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
+ REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
+ Is used to determine the number of the AG context REG-pairs written back;
+@@ -2695,10 +3647,22 @@
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+ #define TCM_REG_TM_TCM_IFEN 0x5001c
++/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for
++ weight 8 (the most prioritised); 1 stands for weight 1(least
++ prioritised); 2 stands for weight 2; tc. */
++#define TCM_REG_TM_WEIGHT 0x500d0
+ /* [RW 6] QM output initial credit. Max credit available - 32.Write writes
+ the initial credit value; read returns the current value of the credit
+ counter. Must be initialized to 32 at start-up. */
+ #define TCM_REG_TQM_INIT_CRD 0x5021c
++/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0
++ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
++ prioritised); 2 stands for weight 2; tc. */
++#define TCM_REG_TQM_P_WEIGHT 0x500c8
++/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
++ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
++ prioritised); 2 stands for weight 2; tc. */
++#define TCM_REG_TQM_S_WEIGHT 0x500cc
+ /* [RW 28] The CM header value for QM request (primary). */
+ #define TCM_REG_TQM_TCM_HDR_P 0x50090
+ /* [RW 28] The CM header value for QM request (secondary). */
+@@ -2725,10 +3689,15 @@
+ /* [RC 1] Message length mismatch (relative to last indication) at the In#8
+ interface. */
+ #define TCM_REG_USEM_LENGTH_MIS 0x50170
++/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for
++ weight 8 (the most prioritised); 1 stands for weight 1(least
++ prioritised); 2 stands for weight 2; tc. */
++#define TCM_REG_USEM_WEIGHT 0x500b8
+ /* [RW 21] Indirect access to the descriptor table of the XX protection
+ mechanism. The fields are: [5:0] - length of the message; 15:6] - message
+ pointer; 20:16] - next pointer. */
+ #define TCM_REG_XX_DESCR_TABLE 0x50280
++#define TCM_REG_XX_DESCR_TABLE_SIZE 32
+ /* [R 6] Use to read the value of XX protection Free counter. */
+ #define TCM_REG_XX_FREE 0x50178
+ /* [RW 6] Initial value for the credit counter; responsible for fulfilling
+@@ -2773,6 +3742,7 @@
+ #define TM_REG_EN_CL1_INPUT 0x16400c
+ /* [RW 1] Enable client2 input. */
+ #define TM_REG_EN_CL2_INPUT 0x164010
++#define TM_REG_EN_LINEAR0_TIMER 0x164014
+ /* [RW 1] Enable real time counter. */
+ #define TM_REG_EN_REAL_TIME_CNT 0x1640d8
+ /* [RW 1] Enable for Timers state machines. */
+@@ -2780,14 +3750,22 @@
+ /* [RW 4] Load value for expiration credit cnt. CFC max number of
+ outstanding load requests for timers (expiration) context loading. */
+ #define TM_REG_EXP_CRDCNT_VAL 0x164238
+-/* [RW 18] Linear0 Max active cid. */
++/* [RW 32] Linear0 logic address. */
++#define TM_REG_LIN0_LOGIC_ADDR 0x164240
++/* [RW 18] Linear0 Max active cid (in banks of 32 entries). */
+ #define TM_REG_LIN0_MAX_ACTIVE_CID 0x164048
+ /* [WB 64] Linear0 phy address. */
+ #define TM_REG_LIN0_PHY_ADDR 0x164270
++/* [RW 1] Linear0 physical address valid. */
++#define TM_REG_LIN0_PHY_ADDR_VALID 0x164248
+ /* [RW 24] Linear0 array scan timeout. */
+ #define TM_REG_LIN0_SCAN_TIME 0x16403c
++/* [RW 32] Linear1 logic address. */
++#define TM_REG_LIN1_LOGIC_ADDR 0x164250
+ /* [WB 64] Linear1 phy address. */
+ #define TM_REG_LIN1_PHY_ADDR 0x164280
++/* [RW 1] Linear1 physical address valid. */
++#define TM_REG_LIN1_PHY_ADDR_VALID 0x164258
+ /* [RW 6] Linear timer set_clear fifo threshold. */
+ #define TM_REG_LIN_SETCLR_FIFO_ALFULL_THR 0x164070
+ /* [RW 2] Load value for pci arbiter credit cnt. */
+@@ -2804,6 +3782,45 @@
+ #define TM_REG_TM_INT_STS 0x1640f0
+ /* [RW 8] The event id for aggregated interrupt 0 */
+ #define TSDM_REG_AGG_INT_EVENT_0 0x42038
++#define TSDM_REG_AGG_INT_EVENT_1 0x4203c
++#define TSDM_REG_AGG_INT_EVENT_10 0x42060
++#define TSDM_REG_AGG_INT_EVENT_11 0x42064
++#define TSDM_REG_AGG_INT_EVENT_12 0x42068
++#define TSDM_REG_AGG_INT_EVENT_13 0x4206c
++#define TSDM_REG_AGG_INT_EVENT_14 0x42070
++#define TSDM_REG_AGG_INT_EVENT_15 0x42074
++#define TSDM_REG_AGG_INT_EVENT_16 0x42078
++#define TSDM_REG_AGG_INT_EVENT_17 0x4207c
++#define TSDM_REG_AGG_INT_EVENT_18 0x42080
++#define TSDM_REG_AGG_INT_EVENT_19 0x42084
++#define TSDM_REG_AGG_INT_EVENT_2 0x42040
++#define TSDM_REG_AGG_INT_EVENT_20 0x42088
++#define TSDM_REG_AGG_INT_EVENT_21 0x4208c
++#define TSDM_REG_AGG_INT_EVENT_22 0x42090
++#define TSDM_REG_AGG_INT_EVENT_23 0x42094
++#define TSDM_REG_AGG_INT_EVENT_24 0x42098
++#define TSDM_REG_AGG_INT_EVENT_25 0x4209c
++#define TSDM_REG_AGG_INT_EVENT_26 0x420a0
++#define TSDM_REG_AGG_INT_EVENT_27 0x420a4
++#define TSDM_REG_AGG_INT_EVENT_28 0x420a8
++#define TSDM_REG_AGG_INT_EVENT_29 0x420ac
++#define TSDM_REG_AGG_INT_EVENT_3 0x42044
++#define TSDM_REG_AGG_INT_EVENT_30 0x420b0
++#define TSDM_REG_AGG_INT_EVENT_31 0x420b4
++#define TSDM_REG_AGG_INT_EVENT_4 0x42048
++/* [RW 1] The T bit for aggregated interrupt 0 */
++#define TSDM_REG_AGG_INT_T_0 0x420b8
++#define TSDM_REG_AGG_INT_T_1 0x420bc
++#define TSDM_REG_AGG_INT_T_10 0x420e0
++#define TSDM_REG_AGG_INT_T_11 0x420e4
++#define TSDM_REG_AGG_INT_T_12 0x420e8
++#define TSDM_REG_AGG_INT_T_13 0x420ec
++#define TSDM_REG_AGG_INT_T_14 0x420f0
++#define TSDM_REG_AGG_INT_T_15 0x420f4
++#define TSDM_REG_AGG_INT_T_16 0x420f8
++#define TSDM_REG_AGG_INT_T_17 0x420fc
++#define TSDM_REG_AGG_INT_T_18 0x42100
++#define TSDM_REG_AGG_INT_T_19 0x42104
+ /* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
+ #define TSDM_REG_CFC_RSP_START_ADDR 0x42008
+ /* [RW 16] The maximum value of the competion counter #0 */
+@@ -2868,6 +3885,9 @@
+ /* [RW 32] Interrupt mask register #0 read/write */
+ #define TSDM_REG_TSDM_INT_MASK_0 0x4229c
+ #define TSDM_REG_TSDM_INT_MASK_1 0x422ac
++/* [R 32] Interrupt register #0 read */
++#define TSDM_REG_TSDM_INT_STS_0 0x42290
++#define TSDM_REG_TSDM_INT_STS_1 0x422a0
+ /* [RW 11] Parity mask register #0 read/write */
+ #define TSDM_REG_TSDM_PRTY_MASK 0x422bc
+ /* [R 11] Parity register #0 read */
+@@ -2908,9 +3928,8 @@
+ #define TSEM_REG_ENABLE_OUT 0x1800a8
+ /* [RW 32] This address space contains all registers and memories that are
+ placed in SEM_FAST block. The SEM_FAST registers are described in
+- appendix B. In order to access the SEM_FAST registers the base address
+- TSEM_REGISTERS_FAST_MEMORY (Offset: 0x1a0000) should be added to each
+- SEM_FAST register offset. */
++ appendix B. In order to access the sem_fast registers the base address
++ ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
+ #define TSEM_REG_FAST_MEMORY 0x1a0000
+ /* [RW 1] Disables input messages from FIC0 May be updated during run_time
+ by the microcode */
+@@ -2993,6 +4012,9 @@
+ /* [RW 32] Interrupt mask register #0 read/write */
+ #define TSEM_REG_TSEM_INT_MASK_0 0x180100
+ #define TSEM_REG_TSEM_INT_MASK_1 0x180110
++/* [R 32] Interrupt register #0 read */
++#define TSEM_REG_TSEM_INT_STS_0 0x1800f4
++#define TSEM_REG_TSEM_INT_STS_1 0x180104
+ /* [RW 32] Parity mask register #0 read/write */
+ #define TSEM_REG_TSEM_PRTY_MASK_0 0x180120
+ #define TSEM_REG_TSEM_PRTY_MASK_1 0x180130
+@@ -3043,6 +4065,10 @@
+ /* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the dorq interface is detected. */
+ #define UCM_REG_DORQ_LENGTH_MIS 0xe0168
++/* [RW 3] The weight of the input dorq in the WRR mechanism. 0 stands for
++ weight 8 (the most prioritised); 1 stands for weight 1(least
++ prioritised); 2 stands for weight 2; tc. */
++#define UCM_REG_DORQ_WEIGHT 0xe00c0
+ /* [RW 8] The Event ID in case ErrorFlg input message bit is set. */
+ #define UCM_REG_ERR_EVNT_ID 0xe00a4
+ /* [RW 28] The CM erroneous header for QM and Timers formatting. */
+@@ -3088,12 +4114,15 @@
+ #define UCM_REG_N_SM_CTX_LD_2 0xe005c
+ #define UCM_REG_N_SM_CTX_LD_3 0xe0060
+ #define UCM_REG_N_SM_CTX_LD_4 0xe0064
+-/* [RW 6] The physical queue number 0 per port index (CID[23]) */
++#define UCM_REG_N_SM_CTX_LD_5 0xe0068
+ #define UCM_REG_PHYS_QNUM0_0 0xe0110
+ #define UCM_REG_PHYS_QNUM0_1 0xe0114
+-/* [RW 6] The physical queue number 1 per port index (CID[23]) */
+ #define UCM_REG_PHYS_QNUM1_0 0xe0118
+ #define UCM_REG_PHYS_QNUM1_1 0xe011c
++#define UCM_REG_PHYS_QNUM2_0 0xe0120
++#define UCM_REG_PHYS_QNUM2_1 0xe0124
++#define UCM_REG_PHYS_QNUM3_0 0xe0128
++#define UCM_REG_PHYS_QNUM3_1 0xe012c
+ /* [RW 8] The Event ID for Timers formatting in case of stop done. */
+ #define UCM_REG_STOP_EVNT_ID 0xe00ac
+ /* [RC 1] Set when the message length mismatch (relative to last indication)
+@@ -3103,6 +4132,10 @@
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+ #define UCM_REG_STORM_UCM_IFEN 0xe0010
++/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for
++ weight 8 (the most prioritised); 1 stands for weight 1(least
++ prioritised); 2 stands for weight 2; tc. */
++#define UCM_REG_STORM_WEIGHT 0xe00b0
+ /* [RW 4] Timers output initial credit. Max credit available - 15.Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 4 at start-up. */
+@@ -3113,6 +4146,10 @@
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+ #define UCM_REG_TM_UCM_IFEN 0xe001c
++/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for
++ weight 8 (the most prioritised); 1 stands for weight 1(least
++ prioritised); 2 stands for weight 2; tc. */
++#define UCM_REG_TM_WEIGHT 0xe00d4
+ /* [RW 1] Input tsem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+@@ -3132,6 +4169,8 @@
+ #define UCM_REG_UCM_INT_MASK 0xe01d4
+ /* [R 11] Interrupt register #0 read */
+ #define UCM_REG_UCM_INT_STS 0xe01c8
++/* [R 27] Parity register #0 read */
++#define UCM_REG_UCM_PRTY_STS 0xe01d8
+ /* [RW 2] The size of AG context region 0 in REG-pairs. Designates the MS
+ REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
+ Is used to determine the number of the AG context REG-pairs written back;
+@@ -3163,6 +4202,10 @@
+ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+ #define UCM_REG_UQM_P_WEIGHT 0xe00cc
++/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
++ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
++ prioritised); 2 stands for weight 2; tc. */
++#define UCM_REG_UQM_S_WEIGHT 0xe00d0
+ /* [RW 28] The CM header value for QM request (primary). */
+ #define UCM_REG_UQM_UCM_HDR_P 0xe0094
+ /* [RW 28] The CM header value for QM request (secondary). */
+@@ -3178,6 +4221,10 @@
+ /* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the SDM interface is detected. */
+ #define UCM_REG_USDM_LENGTH_MIS 0xe0158
++/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for
++ weight 8 (the most prioritised); 1 stands for weight 1(least
++ prioritised); 2 stands for weight 2; tc. */
++#define UCM_REG_USDM_WEIGHT 0xe00c8
+ /* [RW 1] Input xsem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+@@ -3185,10 +4232,15 @@
+ /* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the xsem interface isdetected. */
+ #define UCM_REG_XSEM_LENGTH_MIS 0xe0164
++/* [RW 3] The weight of the input xsem in the WRR mechanism. 0 stands for
++ weight 8 (the most prioritised); 1 stands for weight 1(least
++ prioritised); 2 stands for weight 2; tc. */
++#define UCM_REG_XSEM_WEIGHT 0xe00bc
+ /* [RW 20] Indirect access to the descriptor table of the XX protection
+ mechanism. The fields are:[5:0] - message length; 14:6] - message
+ pointer; 19:15] - next pointer. */
+ #define UCM_REG_XX_DESCR_TABLE 0xe0280
++#define UCM_REG_XX_DESCR_TABLE_SIZE 32
+ /* [R 6] Use to read the XX protection Free counter. */
+ #define UCM_REG_XX_FREE 0xe016c
+ /* [RW 6] Initial value for the credit counter; responsible for fulfilling
+@@ -3218,6 +4270,22 @@
+ #define USDM_REG_AGG_INT_EVENT_17 0xc407c
+ #define USDM_REG_AGG_INT_EVENT_18 0xc4080
+ #define USDM_REG_AGG_INT_EVENT_19 0xc4084
++#define USDM_REG_AGG_INT_EVENT_2 0xc4040
++#define USDM_REG_AGG_INT_EVENT_20 0xc4088
++#define USDM_REG_AGG_INT_EVENT_21 0xc408c
++#define USDM_REG_AGG_INT_EVENT_22 0xc4090
++#define USDM_REG_AGG_INT_EVENT_23 0xc4094
++#define USDM_REG_AGG_INT_EVENT_24 0xc4098
++#define USDM_REG_AGG_INT_EVENT_25 0xc409c
++#define USDM_REG_AGG_INT_EVENT_26 0xc40a0
++#define USDM_REG_AGG_INT_EVENT_27 0xc40a4
++#define USDM_REG_AGG_INT_EVENT_28 0xc40a8
++#define USDM_REG_AGG_INT_EVENT_29 0xc40ac
++#define USDM_REG_AGG_INT_EVENT_3 0xc4044
++#define USDM_REG_AGG_INT_EVENT_30 0xc40b0
++#define USDM_REG_AGG_INT_EVENT_31 0xc40b4
++#define USDM_REG_AGG_INT_EVENT_4 0xc4048
++#define USDM_REG_AGG_INT_EVENT_5 0xc404c
+ /* [RW 1] For each aggregated interrupt index whether the mode is normal (0)
+ or auto-mask-mode (1) */
+ #define USDM_REG_AGG_INT_MODE_0 0xc41b8
+@@ -3232,6 +4300,8 @@
+ #define USDM_REG_AGG_INT_MODE_17 0xc41fc
+ #define USDM_REG_AGG_INT_MODE_18 0xc4200
+ #define USDM_REG_AGG_INT_MODE_19 0xc4204
++#define USDM_REG_AGG_INT_MODE_4 0xc41c8
++#define USDM_REG_AGG_INT_MODE_5 0xc41cc
+ /* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
+ #define USDM_REG_CFC_RSP_START_ADDR 0xc4008
+ /* [RW 16] The maximum value of the competion counter #0 */
+@@ -3298,6 +4368,9 @@
+ /* [RW 32] Interrupt mask register #0 read/write */
+ #define USDM_REG_USDM_INT_MASK_0 0xc42a0
+ #define USDM_REG_USDM_INT_MASK_1 0xc42b0
++/* [R 32] Interrupt register #0 read */
++#define USDM_REG_USDM_INT_STS_0 0xc4294
++#define USDM_REG_USDM_INT_STS_1 0xc42a4
+ /* [RW 11] Parity mask register #0 read/write */
+ #define USDM_REG_USDM_PRTY_MASK 0xc42c0
+ /* [R 11] Parity register #0 read */
+@@ -3338,9 +4411,8 @@
+ #define USEM_REG_ENABLE_OUT 0x3000a8
+ /* [RW 32] This address space contains all registers and memories that are
+ placed in SEM_FAST block. The SEM_FAST registers are described in
+- appendix B. In order to access the SEM_FAST registers... the base address
+- USEM_REGISTERS_FAST_MEMORY (Offset: 0x320000) should be added to each
+- SEM_FAST register offset. */
++ appendix B. In order to access the sem_fast registers the base address
++ ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
+ #define USEM_REG_FAST_MEMORY 0x320000
+ /* [RW 1] Disables input messages from FIC0 May be updated during run_time
+ by the microcode */
+@@ -3423,6 +4495,9 @@
+ /* [RW 32] Interrupt mask register #0 read/write */
+ #define USEM_REG_USEM_INT_MASK_0 0x300110
+ #define USEM_REG_USEM_INT_MASK_1 0x300120
++/* [R 32] Interrupt register #0 read */
++#define USEM_REG_USEM_INT_STS_0 0x300104
++#define USEM_REG_USEM_INT_STS_1 0x300114
+ /* [RW 32] Parity mask register #0 read/write */
+ #define USEM_REG_USEM_PRTY_MASK_0 0x300130
+ #define USEM_REG_USEM_PRTY_MASK_1 0x300140
+@@ -3477,6 +4552,10 @@
+ /* [RC 1] Set at message length mismatch (relative to last indication) at
+ the dorq interface. */
+ #define XCM_REG_DORQ_LENGTH_MIS 0x20230
++/* [RW 3] The weight of the input dorq in the WRR mechanism. 0 stands for
++ weight 8 (the most prioritised); 1 stands for weight 1(least
++ prioritised); 2 stands for weight 2; tc. */
++#define XCM_REG_DORQ_WEIGHT 0x200cc
+ /* [RW 8] The Event ID in case the ErrorFlg input message bit is set. */
+ #define XCM_REG_ERR_EVNT_ID 0x200b0
+ /* [RW 28] The CM erroneous header for QM and Timers formatting. */
+@@ -3491,11 +4570,8 @@
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 64 at start-up. */
+ #define XCM_REG_FIC1_INIT_CRD 0x20410
+-/* [RW 8] The maximum delayed ACK counter value.Must be at least 2. Per port
+- value. */
+ #define XCM_REG_GLB_DEL_ACK_MAX_CNT_0 0x20118
+ #define XCM_REG_GLB_DEL_ACK_MAX_CNT_1 0x2011c
+-/* [RW 28] The delayed ACK timeout in ticks. Per port value. */
+ #define XCM_REG_GLB_DEL_ACK_TMR_VAL_0 0x20108
+ #define XCM_REG_GLB_DEL_ACK_TMR_VAL_1 0x2010c
+ /* [RW 1] Arbitratiojn between Input Arbiter groups: 0 - fair Round-Robin; 1
+@@ -3518,6 +4594,10 @@
+ /* [RC 1] Set at message length mismatch (relative to last indication) at
+ the nig0 interface. */
+ #define XCM_REG_NIG0_LENGTH_MIS 0x20238
++/* [RW 3] The weight of the input nig0 in the WRR mechanism. 0 stands for
++ weight 8 (the most prioritised); 1 stands for weight 1(least
++ prioritised); 2 stands for weight 2; tc. */
++#define XCM_REG_NIG0_WEIGHT 0x200d4
+ /* [RW 1] Input nig1 Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+@@ -3545,6 +4625,7 @@
+ #define XCM_REG_N_SM_CTX_LD_2 0x20068
+ #define XCM_REG_N_SM_CTX_LD_3 0x2006c
+ #define XCM_REG_N_SM_CTX_LD_4 0x20070
++#define XCM_REG_N_SM_CTX_LD_5 0x20074
+ /* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+@@ -3556,6 +4637,8 @@
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+ #define XCM_REG_PBF_WEIGHT 0x200d0
++#define XCM_REG_PHYS_QNUM3_0 0x20100
++#define XCM_REG_PHYS_QNUM3_1 0x20104
+ /* [RW 8] The Event ID for Timers formatting in case of stop done. */
+ #define XCM_REG_STOP_EVNT_ID 0x200b8
+ /* [RC 1] Set at message length mismatch (relative to last indication) at
+@@ -3573,6 +4656,10 @@
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 4 at start-up. */
+ #define XCM_REG_TM_INIT_CRD 0x2041c
++/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for
++ weight 8 (the most prioritised); 1 stands for weight 1(least
++ prioritised); 2 stands for weight 2; tc. */
++#define XCM_REG_TM_WEIGHT 0x200ec
+ /* [RW 28] The CM header for Timers expiration command. */
+ #define XCM_REG_TM_XCM_HDR 0x200a8
+ /* [RW 1] Timers - CM Interface enable. If 0 - the valid input is
+@@ -3603,53 +4690,17 @@
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+ #define XCM_REG_USEM_WEIGHT 0x200c8
+-/* [RW 2] DA counter command; used in case of window update doorbell.The
+- first index stands for the value DaEnable of that connection. The second
+- index stands for port number. */
+ #define XCM_REG_WU_DA_CNT_CMD00 0x201d4
+-/* [RW 2] DA counter command; used in case of window update doorbell.The
+- first index stands for the value DaEnable of that connection. The second
+- index stands for port number. */
+ #define XCM_REG_WU_DA_CNT_CMD01 0x201d8
+-/* [RW 2] DA counter command; used in case of window update doorbell.The
+- first index stands for the value DaEnable of that connection. The second
+- index stands for port number. */
+ #define XCM_REG_WU_DA_CNT_CMD10 0x201dc
+-/* [RW 2] DA counter command; used in case of window update doorbell.The
+- first index stands for the value DaEnable of that connection. The second
+- index stands for port number. */
+ #define XCM_REG_WU_DA_CNT_CMD11 0x201e0
+-/* [RW 8] DA counter update value used in case of window update doorbell.The
+- first index stands for the value DaEnable of that connection. The second
+- index stands for port number. */
+ #define XCM_REG_WU_DA_CNT_UPD_VAL00 0x201e4
+-/* [RW 8] DA counter update value; used in case of window update
+- doorbell.The first index stands for the value DaEnable of that
+- connection. The second index stands for port number. */
+ #define XCM_REG_WU_DA_CNT_UPD_VAL01 0x201e8
+-/* [RW 8] DA counter update value; used in case of window update
+- doorbell.The first index stands for the value DaEnable of that
+- connection. The second index stands for port number. */
+ #define XCM_REG_WU_DA_CNT_UPD_VAL10 0x201ec
+-/* [RW 8] DA counter update value; used in case of window update
+- doorbell.The first index stands for the value DaEnable of that
+- connection. The second index stands for port number. */
+ #define XCM_REG_WU_DA_CNT_UPD_VAL11 0x201f0
+-/* [RW 1] DA timer command; used in case of window update doorbell.The first
+- index stands for the value DaEnable of that connection. The second index
+- stands for port number. */
+ #define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00 0x201c4
+-/* [RW 1] DA timer command; used in case of window update doorbell.The first
+- index stands for the value DaEnable of that connection. The second index
+- stands for port number. */
+ #define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01 0x201c8
+-/* [RW 1] DA timer command; used in case of window update doorbell.The first
+- index stands for the value DaEnable of that connection. The second index
+- stands for port number. */
+ #define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD10 0x201cc
+-/* [RW 1] DA timer command; used in case of window update doorbell.The first
+- index stands for the value DaEnable of that connection. The second index
+- stands for port number. */
+ #define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD11 0x201d0
+ /* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+@@ -3659,6 +4710,8 @@
+ #define XCM_REG_XCM_INT_MASK 0x202b4
+ /* [R 14] Interrupt register #0 read */
+ #define XCM_REG_XCM_INT_STS 0x202a8
++/* [R 30] Parity register #0 read */
++#define XCM_REG_XCM_PRTY_STS 0x202b8
+ /* [RW 4] The size of AG context region 0 in REG-pairs. Designates the MS
+ REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
+ Is used to determine the number of the AG context REG-pairs written back;
+@@ -3692,6 +4745,10 @@
+ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+ #define XCM_REG_XQM_P_WEIGHT 0x200e4
++/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
++ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
++ prioritised); 2 stands for weight 2; tc. */
++#define XCM_REG_XQM_S_WEIGHT 0x200e8
+ /* [RW 28] The CM header value for QM request (primary). */
+ #define XCM_REG_XQM_XCM_HDR_P 0x200a0
+ /* [RW 28] The CM header value for QM request (secondary). */
+@@ -3715,6 +4772,7 @@
+ mechanism. The fields are: [5:0] - message length; 11:6] - message
+ pointer; 16:12] - next pointer. */
+ #define XCM_REG_XX_DESCR_TABLE 0x20480
++#define XCM_REG_XX_DESCR_TABLE_SIZE 32
+ /* [R 6] Used to read the XX protection Free counter. */
+ #define XCM_REG_XX_FREE 0x20240
+ /* [RW 6] Initial value for the credit counter; responsible for fulfilling
+@@ -3728,7 +4786,7 @@
+ #define XCM_REG_XX_MSG_NUM 0x20428
+ /* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */
+ #define XCM_REG_XX_OVFL_EVNT_ID 0x20058
+-/* [RW 15] Indirect access to the XX table of the XX protection mechanism.
++/* [RW 16] Indirect access to the XX table of the XX protection mechanism.
+ The fields are:[4:0] - tail pointer; 9:5] - Link List size; 14:10] -
+ header pointer. */
+ #define XCM_REG_XX_TABLE 0x20500
+@@ -3745,6 +4803,11 @@
+ #define XSDM_REG_AGG_INT_EVENT_17 0x16607c
+ #define XSDM_REG_AGG_INT_EVENT_18 0x166080
+ #define XSDM_REG_AGG_INT_EVENT_19 0x166084
++#define XSDM_REG_AGG_INT_EVENT_10 0x166060
++#define XSDM_REG_AGG_INT_EVENT_11 0x166064
++#define XSDM_REG_AGG_INT_EVENT_12 0x166068
++#define XSDM_REG_AGG_INT_EVENT_13 0x16606c
++#define XSDM_REG_AGG_INT_EVENT_14 0x166070
+ #define XSDM_REG_AGG_INT_EVENT_2 0x166040
+ #define XSDM_REG_AGG_INT_EVENT_20 0x166088
+ #define XSDM_REG_AGG_INT_EVENT_21 0x16608c
+@@ -3756,6 +4819,15 @@
+ #define XSDM_REG_AGG_INT_EVENT_27 0x1660a4
+ #define XSDM_REG_AGG_INT_EVENT_28 0x1660a8
+ #define XSDM_REG_AGG_INT_EVENT_29 0x1660ac
++#define XSDM_REG_AGG_INT_EVENT_3 0x166044
++#define XSDM_REG_AGG_INT_EVENT_30 0x1660b0
++#define XSDM_REG_AGG_INT_EVENT_31 0x1660b4
++#define XSDM_REG_AGG_INT_EVENT_4 0x166048
++#define XSDM_REG_AGG_INT_EVENT_5 0x16604c
++#define XSDM_REG_AGG_INT_EVENT_6 0x166050
++#define XSDM_REG_AGG_INT_EVENT_7 0x166054
++#define XSDM_REG_AGG_INT_EVENT_8 0x166058
++#define XSDM_REG_AGG_INT_EVENT_9 0x16605c
+ /* [RW 1] For each aggregated interrupt index whether the mode is normal (0)
+ or auto-mask-mode (1) */
+ #define XSDM_REG_AGG_INT_MODE_0 0x1661b8
+@@ -3832,6 +4904,9 @@
+ /* [RW 32] Interrupt mask register #0 read/write */
+ #define XSDM_REG_XSDM_INT_MASK_0 0x16629c
+ #define XSDM_REG_XSDM_INT_MASK_1 0x1662ac
++/* [R 32] Interrupt register #0 read */
++#define XSDM_REG_XSDM_INT_STS_0 0x166290
++#define XSDM_REG_XSDM_INT_STS_1 0x1662a0
+ /* [RW 11] Parity mask register #0 read/write */
+ #define XSDM_REG_XSDM_PRTY_MASK 0x1662bc
+ /* [R 11] Parity register #0 read */
+@@ -3872,9 +4947,8 @@
+ #define XSEM_REG_ENABLE_OUT 0x2800a8
+ /* [RW 32] This address space contains all registers and memories that are
+ placed in SEM_FAST block. The SEM_FAST registers are described in
+- appendix B. In order to access the SEM_FAST registers the base address
+- XSEM_REGISTERS_FAST_MEMORY (Offset: 0x2a0000) should be added to each
+- SEM_FAST register offset. */
++ appendix B. In order to access the sem_fast registers the base address
++ ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
+ #define XSEM_REG_FAST_MEMORY 0x2a0000
+ /* [RW 1] Disables input messages from FIC0 May be updated during run_time
+ by the microcode */
+@@ -3957,6 +5031,9 @@
+ /* [RW 32] Interrupt mask register #0 read/write */
+ #define XSEM_REG_XSEM_INT_MASK_0 0x280110
+ #define XSEM_REG_XSEM_INT_MASK_1 0x280120
++/* [R 32] Interrupt register #0 read */
++#define XSEM_REG_XSEM_INT_STS_0 0x280104
++#define XSEM_REG_XSEM_INT_STS_1 0x280114
+ /* [RW 32] Parity mask register #0 read/write */
+ #define XSEM_REG_XSEM_PRTY_MASK_0 0x280130
+ #define XSEM_REG_XSEM_PRTY_MASK_1 0x280140
+@@ -3993,10 +5070,14 @@
+ #define BIGMAC_REGISTER_TX_SOURCE_ADDR (0x08<<3)
+ #define BIGMAC_REGISTER_TX_STAT_GTBYT (0x20<<3)
+ #define BIGMAC_REGISTER_TX_STAT_GTPKT (0x0C<<3)
++#define EMAC_LED_1000MB_OVERRIDE (1L<<1)
++#define EMAC_LED_100MB_OVERRIDE (1L<<2)
++#define EMAC_LED_10MB_OVERRIDE (1L<<3)
++#define EMAC_LED_2500MB_OVERRIDE (1L<<12)
++#define EMAC_LED_OVERRIDE (1L<<0)
++#define EMAC_LED_TRAFFIC (1L<<6)
+ #define EMAC_MDIO_COMM_COMMAND_ADDRESS (0L<<26)
+-#define EMAC_MDIO_COMM_COMMAND_READ_22 (2L<<26)
+ #define EMAC_MDIO_COMM_COMMAND_READ_45 (3L<<26)
+-#define EMAC_MDIO_COMM_COMMAND_WRITE_22 (1L<<26)
+ #define EMAC_MDIO_COMM_COMMAND_WRITE_45 (1L<<26)
+ #define EMAC_MDIO_COMM_DATA (0xffffL<<0)
+ #define EMAC_MDIO_COMM_START_BUSY (1L<<29)
+@@ -4005,14 +5086,12 @@
+ #define EMAC_MDIO_MODE_CLOCK_CNT (0x3fL<<16)
+ #define EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT 16
+ #define EMAC_MODE_25G_MODE (1L<<5)
+-#define EMAC_MODE_ACPI_RCVD (1L<<20)
+ #define EMAC_MODE_HALF_DUPLEX (1L<<1)
+-#define EMAC_MODE_MPKT (1L<<18)
+-#define EMAC_MODE_MPKT_RCVD (1L<<19)
+ #define EMAC_MODE_PORT_GMII (2L<<2)
+ #define EMAC_MODE_PORT_MII (1L<<2)
+ #define EMAC_MODE_PORT_MII_10M (3L<<2)
+ #define EMAC_MODE_RESET (1L<<0)
++#define EMAC_REG_EMAC_LED 0xc
+ #define EMAC_REG_EMAC_MAC_MATCH 0x10
+ #define EMAC_REG_EMAC_MDIO_COMM 0xac
+ #define EMAC_REG_EMAC_MDIO_MODE 0xb4
+@@ -4030,19 +5109,23 @@
+ #define EMAC_RX_MODE_PROMISCUOUS (1L<<8)
+ #define EMAC_RX_MTU_SIZE_JUMBO_ENA (1L<<31)
+ #define EMAC_TX_MODE_EXT_PAUSE_EN (1L<<3)
+-#define EMAC_TX_MODE_RESET (1L<<0)
++#define EMAC_TX_MODE_FLOW_EN (1L<<4)
++#define MISC_REGISTERS_GPIO_0 0
+ #define MISC_REGISTERS_GPIO_1 1
+ #define MISC_REGISTERS_GPIO_2 2
+ #define MISC_REGISTERS_GPIO_3 3
+ #define MISC_REGISTERS_GPIO_CLR_POS 16
+ #define MISC_REGISTERS_GPIO_FLOAT (0xffL<<24)
+ #define MISC_REGISTERS_GPIO_FLOAT_POS 24
++#define MISC_REGISTERS_GPIO_HIGH 1
+ #define MISC_REGISTERS_GPIO_INPUT_HI_Z 2
++#define MISC_REGISTERS_GPIO_LOW 0
+ #define MISC_REGISTERS_GPIO_OUTPUT_HIGH 1
+ #define MISC_REGISTERS_GPIO_OUTPUT_LOW 0
+ #define MISC_REGISTERS_GPIO_PORT_SHIFT 4
+ #define MISC_REGISTERS_GPIO_SET_POS 8
+ #define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588
++#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7)
+ #define MISC_REGISTERS_RESET_REG_1_SET 0x584
+ #define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598
+ #define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0)
+@@ -4077,7 +5160,9 @@
+ #define HW_LOCK_MAX_RESOURCE_VALUE 31
+ #define HW_LOCK_RESOURCE_8072_MDIO 0
+ #define HW_LOCK_RESOURCE_GPIO 1
++#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3
+ #define HW_LOCK_RESOURCE_SPIO 2
++#define HW_LOCK_RESOURCE_UNDI 5
+ #define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (1<<18)
+ #define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (1<<31)
+ #define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (1<<9)
+@@ -4127,7 +5212,7 @@
+ #define AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR (1<<10)
+ #define RESERVED_GENERAL_ATTENTION_BIT_0 0
+
+-#define EVEREST_GEN_ATTN_IN_USE_MASK 0x3e0
++#define EVEREST_GEN_ATTN_IN_USE_MASK 0x3ffe0
+ #define EVEREST_LATCHED_ATTN_IN_USE_MASK 0xffe00000
+
+ #define RESERVED_GENERAL_ATTENTION_BIT_6 6
+@@ -4156,6 +5241,17 @@
+ /* mcp error attention bit */
+ #define MCP_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_11
+
++/*E1H NIG status sync attention mapped to group 4-7*/
++#define LINK_SYNC_ATTENTION_BIT_FUNC_0 RESERVED_GENERAL_ATTENTION_BIT_12
++#define LINK_SYNC_ATTENTION_BIT_FUNC_1 RESERVED_GENERAL_ATTENTION_BIT_13
++#define LINK_SYNC_ATTENTION_BIT_FUNC_2 RESERVED_GENERAL_ATTENTION_BIT_14
++#define LINK_SYNC_ATTENTION_BIT_FUNC_3 RESERVED_GENERAL_ATTENTION_BIT_15
++#define LINK_SYNC_ATTENTION_BIT_FUNC_4 RESERVED_GENERAL_ATTENTION_BIT_16
++#define LINK_SYNC_ATTENTION_BIT_FUNC_5 RESERVED_GENERAL_ATTENTION_BIT_17
++#define LINK_SYNC_ATTENTION_BIT_FUNC_6 RESERVED_GENERAL_ATTENTION_BIT_18
++#define LINK_SYNC_ATTENTION_BIT_FUNC_7 RESERVED_GENERAL_ATTENTION_BIT_19
++
++
+ #define LATCHED_ATTN_RBCR 23
+ #define LATCHED_ATTN_RBCT 24
+ #define LATCHED_ATTN_RBCN 25
+@@ -4217,40 +5313,73 @@
+ #define GRCBASE_MISC_AEU GRCBASE_MISC
+
+
+-/*the offset of the configuration space in the pci core register*/
++/* offset of configuration space in the pci core register */
+ #define PCICFG_OFFSET 0x2000
+ #define PCICFG_VENDOR_ID_OFFSET 0x00
+ #define PCICFG_DEVICE_ID_OFFSET 0x02
+-#define PCICFG_SUBSYSTEM_VENDOR_ID_OFFSET 0x2c
+-#define PCICFG_SUBSYSTEM_ID_OFFSET 0x2e
+-#define PCICFG_INT_LINE 0x3c
+-#define PCICFG_INT_PIN 0x3d
++#define PCICFG_COMMAND_OFFSET 0x04
++#define PCICFG_COMMAND_IO_SPACE (1<<0)
++#define PCICFG_COMMAND_MEM_SPACE (1<<1)
++#define PCICFG_COMMAND_BUS_MASTER (1<<2)
++#define PCICFG_COMMAND_SPECIAL_CYCLES (1<<3)
++#define PCICFG_COMMAND_MWI_CYCLES (1<<4)
++#define PCICFG_COMMAND_VGA_SNOOP (1<<5)
++#define PCICFG_COMMAND_PERR_ENA (1<<6)
++#define PCICFG_COMMAND_STEPPING (1<<7)
++#define PCICFG_COMMAND_SERR_ENA (1<<8)
++#define PCICFG_COMMAND_FAST_B2B (1<<9)
++#define PCICFG_COMMAND_INT_DISABLE (1<<10)
++#define PCICFG_COMMAND_RESERVED (0x1f<<11)
++#define PCICFG_STATUS_OFFSET 0x06
++#define PCICFG_REVESION_ID 0x08
+ #define PCICFG_CACHE_LINE_SIZE 0x0c
+ #define PCICFG_LATENCY_TIMER 0x0d
+-#define PCICFG_REVESION_ID 0x08
+ #define PCICFG_BAR_1_LOW 0x10
+ #define PCICFG_BAR_1_HIGH 0x14
+ #define PCICFG_BAR_2_LOW 0x18
+ #define PCICFG_BAR_2_HIGH 0x1c
++#define PCICFG_SUBSYSTEM_VENDOR_ID_OFFSET 0x2c
++#define PCICFG_SUBSYSTEM_ID_OFFSET 0x2e
++#define PCICFG_INT_LINE 0x3c
++#define PCICFG_INT_PIN 0x3d
++#define PCICFG_PM_CAPABILITY 0x48
++#define PCICFG_PM_CAPABILITY_VERSION (0x3<<16)
++#define PCICFG_PM_CAPABILITY_CLOCK (1<<19)
++#define PCICFG_PM_CAPABILITY_RESERVED (1<<20)
++#define PCICFG_PM_CAPABILITY_DSI (1<<21)
++#define PCICFG_PM_CAPABILITY_AUX_CURRENT (0x7<<22)
++#define PCICFG_PM_CAPABILITY_D1_SUPPORT (1<<25)
++#define PCICFG_PM_CAPABILITY_D2_SUPPORT (1<<26)
++#define PCICFG_PM_CAPABILITY_PME_IN_D0 (1<<27)
++#define PCICFG_PM_CAPABILITY_PME_IN_D1 (1<<28)
++#define PCICFG_PM_CAPABILITY_PME_IN_D2 (1<<29)
++#define PCICFG_PM_CAPABILITY_PME_IN_D3_HOT (1<<30)
++#define PCICFG_PM_CAPABILITY_PME_IN_D3_COLD (1<<31)
++#define PCICFG_PM_CSR_OFFSET 0x4c
++#define PCICFG_PM_CSR_STATE (0x3<<0)
++#define PCICFG_PM_CSR_PME_ENABLE (1<<8)
++#define PCICFG_PM_CSR_PME_STATUS (1<<15)
+ #define PCICFG_GRC_ADDRESS 0x78
+ #define PCICFG_GRC_DATA 0x80
+ #define PCICFG_DEVICE_CONTROL 0xb4
+ #define PCICFG_LINK_CONTROL 0xbc
+
++
+ #define BAR_USTRORM_INTMEM 0x400000
+ #define BAR_CSTRORM_INTMEM 0x410000
+ #define BAR_XSTRORM_INTMEM 0x420000
+ #define BAR_TSTRORM_INTMEM 0x430000
+
++/* for accessing the IGU in case of status block ACK */
+ #define BAR_IGU_INTMEM 0x440000
+
+ #define BAR_DOORBELL_OFFSET 0x800000
+
+ #define BAR_ME_REGISTER 0x450000
+
+-
+-#define GRC_CONFIG_2_SIZE_REG 0x408 /* config_2 offset */
+-#define PCI_CONFIG_2_BAR1_SIZE (0xfL<<0)
++/* config_2 offset */
++#define GRC_CONFIG_2_SIZE_REG 0x408
++#define PCI_CONFIG_2_BAR1_SIZE (0xfL<<0)
+ #define PCI_CONFIG_2_BAR1_SIZE_DISABLED (0L<<0)
+ #define PCI_CONFIG_2_BAR1_SIZE_64K (1L<<0)
+ #define PCI_CONFIG_2_BAR1_SIZE_128K (2L<<0)
+@@ -4267,11 +5396,11 @@
+ #define PCI_CONFIG_2_BAR1_SIZE_256M (13L<<0)
+ #define PCI_CONFIG_2_BAR1_SIZE_512M (14L<<0)
+ #define PCI_CONFIG_2_BAR1_SIZE_1G (15L<<0)
+-#define PCI_CONFIG_2_BAR1_64ENA (1L<<4)
+-#define PCI_CONFIG_2_EXP_ROM_RETRY (1L<<5)
+-#define PCI_CONFIG_2_CFG_CYCLE_RETRY (1L<<6)
+-#define PCI_CONFIG_2_FIRST_CFG_DONE (1L<<7)
+-#define PCI_CONFIG_2_EXP_ROM_SIZE (0xffL<<8)
++#define PCI_CONFIG_2_BAR1_64ENA (1L<<4)
++#define PCI_CONFIG_2_EXP_ROM_RETRY (1L<<5)
++#define PCI_CONFIG_2_CFG_CYCLE_RETRY (1L<<6)
++#define PCI_CONFIG_2_FIRST_CFG_DONE (1L<<7)
++#define PCI_CONFIG_2_EXP_ROM_SIZE (0xffL<<8)
+ #define PCI_CONFIG_2_EXP_ROM_SIZE_DISABLED (0L<<8)
+ #define PCI_CONFIG_2_EXP_ROM_SIZE_2K (1L<<8)
+ #define PCI_CONFIG_2_EXP_ROM_SIZE_4K (2L<<8)
+@@ -4288,46 +5417,44 @@
+ #define PCI_CONFIG_2_EXP_ROM_SIZE_8M (13L<<8)
+ #define PCI_CONFIG_2_EXP_ROM_SIZE_16M (14L<<8)
+ #define PCI_CONFIG_2_EXP_ROM_SIZE_32M (15L<<8)
+-#define PCI_CONFIG_2_BAR_PREFETCH (1L<<16)
+-#define PCI_CONFIG_2_RESERVED0 (0x7fffL<<17)
++#define PCI_CONFIG_2_BAR_PREFETCH (1L<<16)
++#define PCI_CONFIG_2_RESERVED0 (0x7fffL<<17)
+
+ /* config_3 offset */
+-#define GRC_CONFIG_3_SIZE_REG (0x40c)
+-#define PCI_CONFIG_3_STICKY_BYTE (0xffL<<0)
+-#define PCI_CONFIG_3_FORCE_PME (1L<<24)
+-#define PCI_CONFIG_3_PME_STATUS (1L<<25)
+-#define PCI_CONFIG_3_PME_ENABLE (1L<<26)
+-#define PCI_CONFIG_3_PM_STATE (0x3L<<27)
+-#define PCI_CONFIG_3_VAUX_PRESET (1L<<30)
+-#define PCI_CONFIG_3_PCI_POWER (1L<<31)
+-
+-/* config_2 offset */
+-#define GRC_CONFIG_2_SIZE_REG 0x408
++#define GRC_CONFIG_3_SIZE_REG 0x40c
++#define PCI_CONFIG_3_STICKY_BYTE (0xffL<<0)
++#define PCI_CONFIG_3_FORCE_PME (1L<<24)
++#define PCI_CONFIG_3_PME_STATUS (1L<<25)
++#define PCI_CONFIG_3_PME_ENABLE (1L<<26)
++#define PCI_CONFIG_3_PM_STATE (0x3L<<27)
++#define PCI_CONFIG_3_VAUX_PRESET (1L<<30)
++#define PCI_CONFIG_3_PCI_POWER (1L<<31)
+
+ #define GRC_BAR2_CONFIG 0x4e0
+-#define PCI_CONFIG_2_BAR2_SIZE (0xfL<<0)
+-#define PCI_CONFIG_2_BAR2_SIZE_DISABLED (0L<<0)
+-#define PCI_CONFIG_2_BAR2_SIZE_64K (1L<<0)
+-#define PCI_CONFIG_2_BAR2_SIZE_128K (2L<<0)
+-#define PCI_CONFIG_2_BAR2_SIZE_256K (3L<<0)
+-#define PCI_CONFIG_2_BAR2_SIZE_512K (4L<<0)
+-#define PCI_CONFIG_2_BAR2_SIZE_1M (5L<<0)
+-#define PCI_CONFIG_2_BAR2_SIZE_2M (6L<<0)
+-#define PCI_CONFIG_2_BAR2_SIZE_4M (7L<<0)
+-#define PCI_CONFIG_2_BAR2_SIZE_8M (8L<<0)
+-#define PCI_CONFIG_2_BAR2_SIZE_16M (9L<<0)
+-#define PCI_CONFIG_2_BAR2_SIZE_32M (10L<<0)
+-#define PCI_CONFIG_2_BAR2_SIZE_64M (11L<<0)
+-#define PCI_CONFIG_2_BAR2_SIZE_128M (12L<<0)
+-#define PCI_CONFIG_2_BAR2_SIZE_256M (13L<<0)
+-#define PCI_CONFIG_2_BAR2_SIZE_512M (14L<<0)
+-#define PCI_CONFIG_2_BAR2_SIZE_1G (15L<<0)
+-#define PCI_CONFIG_2_BAR2_64ENA (1L<<4)
++#define PCI_CONFIG_2_BAR2_SIZE (0xfL<<0)
++#define PCI_CONFIG_2_BAR2_SIZE_DISABLED (0L<<0)
++#define PCI_CONFIG_2_BAR2_SIZE_64K (1L<<0)
++#define PCI_CONFIG_2_BAR2_SIZE_128K (2L<<0)
++#define PCI_CONFIG_2_BAR2_SIZE_256K (3L<<0)
++#define PCI_CONFIG_2_BAR2_SIZE_512K (4L<<0)
++#define PCI_CONFIG_2_BAR2_SIZE_1M (5L<<0)
++#define PCI_CONFIG_2_BAR2_SIZE_2M (6L<<0)
++#define PCI_CONFIG_2_BAR2_SIZE_4M (7L<<0)
++#define PCI_CONFIG_2_BAR2_SIZE_8M (8L<<0)
++#define PCI_CONFIG_2_BAR2_SIZE_16M (9L<<0)
++#define PCI_CONFIG_2_BAR2_SIZE_32M (10L<<0)
++#define PCI_CONFIG_2_BAR2_SIZE_64M (11L<<0)
++#define PCI_CONFIG_2_BAR2_SIZE_128M (12L<<0)
++#define PCI_CONFIG_2_BAR2_SIZE_256M (13L<<0)
++#define PCI_CONFIG_2_BAR2_SIZE_512M (14L<<0)
++#define PCI_CONFIG_2_BAR2_SIZE_1G (15L<<0)
++#define PCI_CONFIG_2_BAR2_64ENA (1L<<4)
++
++#define PCI_PM_DATA_A 0x410
++#define PCI_PM_DATA_B 0x414
++#define PCI_ID_VAL1 0x434
++#define PCI_ID_VAL2 0x438
+
+-#define PCI_PM_DATA_A (0x410)
+-#define PCI_PM_DATA_B (0x414)
+-#define PCI_ID_VAL1 (0x434)
+-#define PCI_ID_VAL2 (0x438)
+
+ #define MDIO_REG_BANK_CL73_IEEEB0 0x0
+ #define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0
+@@ -4336,7 +5463,7 @@
+ #define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_MAIN_RST 0x8000
+
+ #define MDIO_REG_BANK_CL73_IEEEB1 0x10
+-#define MDIO_CL73_IEEEB1_AN_ADV2 0x01
++#define MDIO_CL73_IEEEB1_AN_ADV2 0x01
+ #define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M 0x0000
+ #define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX 0x0020
+ #define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 0x0040
+@@ -4365,7 +5492,7 @@
+ #define MDIO_REG_BANK_RX_ALL 0x80f0
+ #define MDIO_RX_ALL_RX_EQ_BOOST 0x1c
+ #define MDIO_RX_ALL_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+-#define MDIO_RX_ALL_RX_EQ_BOOST_OFFSET_CTRL 0x10
++#define MDIO_RX_ALL_RX_EQ_BOOST_OFFSET_CTRL 0x10
+
+ #define MDIO_REG_BANK_TX0 0x8060
+ #define MDIO_TX0_TX_DRIVER 0x17
+@@ -4392,213 +5519,278 @@
+ #define MDIO_XGXS_BLOCK2_RX_LN_SWAP 0x10
+ #define MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE 0x8000
+ #define MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE 0x4000
+-#define MDIO_XGXS_BLOCK2_TX_LN_SWAP 0x11
++#define MDIO_XGXS_BLOCK2_TX_LN_SWAP 0x11
+ #define MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE 0x8000
+-#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G 0x14
++#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G 0x14
+ #define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS 0x0001
+ #define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS 0x0010
+-#define MDIO_XGXS_BLOCK2_TEST_MODE_LANE 0x15
++#define MDIO_XGXS_BLOCK2_TEST_MODE_LANE 0x15
+
+ #define MDIO_REG_BANK_GP_STATUS 0x8120
+-#define MDIO_GP_STATUS_TOP_AN_STATUS1 0x1B
+-#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE 0x0001
+-#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE 0x0002
+-#define MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS 0x0004
+-#define MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS 0x0008
+-#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE 0x0010
+-#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_LP_NP_BAM_ABLE 0x0020
+-
+-#define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE 0x0040
+-#define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE 0x0080
+-#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK 0x3f00
+-#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M 0x0000
+-#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M 0x0100
+-#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G 0x0200
+-#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G 0x0300
+-#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G 0x0400
+-#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G 0x0500
+-#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG 0x0600
+-#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4 0x0700
+-#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12G_HIG 0x0800
+-#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12_5G 0x0900
+-#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_13G 0x0A00
+-#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_15G 0x0B00
+-#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_16G 0x0C00
+-#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX 0x0D00
+-#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4 0x0E00
++#define MDIO_GP_STATUS_TOP_AN_STATUS1 0x1B
++#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE 0x0001
++#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE 0x0002
++#define MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS 0x0004
++#define MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS 0x0008
++#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE 0x0010
++#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_LP_NP_BAM_ABLE 0x0020
++#define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE 0x0040
++#define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE 0x0080
++#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK 0x3f00
++#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M 0x0000
++#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M 0x0100
++#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G 0x0200
++#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G 0x0300
++#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G 0x0400
++#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G 0x0500
++#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG 0x0600
++#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4 0x0700
++#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12G_HIG 0x0800
++#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12_5G 0x0900
++#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_13G 0x0A00
++#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_15G 0x0B00
++#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_16G 0x0C00
++#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX 0x0D00
++#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4 0x0E00
+
+
+ #define MDIO_REG_BANK_10G_PARALLEL_DETECT 0x8130
+-#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL 0x11
+-#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN 0x1
+-#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK 0x13
+-#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT (0xb71<<1)
++#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL 0x11
++#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN 0x1
++#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK 0x13
++#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT (0xb71<<1)
+
+ #define MDIO_REG_BANK_SERDES_DIGITAL 0x8300
+-#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1 0x10
+-#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE 0x0001
+-#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_TBI_IF 0x0002
+-#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN 0x0004
+-#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT 0x0008
+-#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET 0x0010
+-#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE 0x0020
+-#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2 0x11
+-#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN 0x0001
+-#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_AN_FST_TMR 0x0040
+-#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1 0x14
+-#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_DUPLEX 0x0004
+-#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_MASK 0x0018
+-#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_SHIFT 3
+-#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_2_5G 0x0018
+-#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_1G 0x0010
+-#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_100M 0x0008
+-#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_10M 0x0000
+-#define MDIO_SERDES_DIGITAL_MISC1 0x18
+-#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_MASK 0xE000
+-#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_25M 0x0000
+-#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_100M 0x2000
+-#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_125M 0x4000
+-#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M 0x6000
+-#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_187_5M 0x8000
+-#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL 0x0010
+-#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK 0x000f
+-#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_2_5G 0x0000
+-#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_5G 0x0001
+-#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_6G 0x0002
+-#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_HIG 0x0003
+-#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4 0x0004
+-#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_12G 0x0005
+-#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_12_5G 0x0006
+-#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G 0x0007
+-#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_15G 0x0008
+-#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_16G 0x0009
++#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1 0x10
++#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE 0x0001
++#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_TBI_IF 0x0002
++#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN 0x0004
++#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT 0x0008
++#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET 0x0010
++#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE 0x0020
++#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2 0x11
++#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN 0x0001
++#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_AN_FST_TMR 0x0040
++#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1 0x14
++#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_DUPLEX 0x0004
++#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_MASK 0x0018
++#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_SHIFT 3
++#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_2_5G 0x0018
++#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_1G 0x0010
++#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_100M 0x0008
++#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_10M 0x0000
++#define MDIO_SERDES_DIGITAL_MISC1 0x18
++#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_MASK 0xE000
++#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_25M 0x0000
++#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_100M 0x2000
++#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_125M 0x4000
++#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M 0x6000
++#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_187_5M 0x8000
++#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL 0x0010
++#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK 0x000f
++#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_2_5G 0x0000
++#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_5G 0x0001
++#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_6G 0x0002
++#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_HIG 0x0003
++#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4 0x0004
++#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_12G 0x0005
++#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_12_5G 0x0006
++#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G 0x0007
++#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_15G 0x0008
++#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_16G 0x0009
+
+ #define MDIO_REG_BANK_OVER_1G 0x8320
+-#define MDIO_OVER_1G_DIGCTL_3_4 0x14
+-#define MDIO_OVER_1G_DIGCTL_3_4_MP_ID_MASK 0xffe0
+-#define MDIO_OVER_1G_DIGCTL_3_4_MP_ID_SHIFT 5
+-#define MDIO_OVER_1G_UP1 0x19
+-#define MDIO_OVER_1G_UP1_2_5G 0x0001
+-#define MDIO_OVER_1G_UP1_5G 0x0002
+-#define MDIO_OVER_1G_UP1_6G 0x0004
+-#define MDIO_OVER_1G_UP1_10G 0x0010
+-#define MDIO_OVER_1G_UP1_10GH 0x0008
+-#define MDIO_OVER_1G_UP1_12G 0x0020
+-#define MDIO_OVER_1G_UP1_12_5G 0x0040
+-#define MDIO_OVER_1G_UP1_13G 0x0080
+-#define MDIO_OVER_1G_UP1_15G 0x0100
+-#define MDIO_OVER_1G_UP1_16G 0x0200
+-#define MDIO_OVER_1G_UP2 0x1A
+-#define MDIO_OVER_1G_UP2_IPREDRIVER_MASK 0x0007
+-#define MDIO_OVER_1G_UP2_IDRIVER_MASK 0x0038
+-#define MDIO_OVER_1G_UP2_PREEMPHASIS_MASK 0x03C0
+-#define MDIO_OVER_1G_UP3 0x1B
+-#define MDIO_OVER_1G_UP3_HIGIG2 0x0001
+-#define MDIO_OVER_1G_LP_UP1 0x1C
+-#define MDIO_OVER_1G_LP_UP2 0x1D
+-#define MDIO_OVER_1G_LP_UP2_MR_ADV_OVER_1G_MASK 0x03ff
+-#define MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK 0x0780
+-#define MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT 7
+-#define MDIO_OVER_1G_LP_UP3 0x1E
++#define MDIO_OVER_1G_DIGCTL_3_4 0x14
++#define MDIO_OVER_1G_DIGCTL_3_4_MP_ID_MASK 0xffe0
++#define MDIO_OVER_1G_DIGCTL_3_4_MP_ID_SHIFT 5
++#define MDIO_OVER_1G_UP1 0x19
++#define MDIO_OVER_1G_UP1_2_5G 0x0001
++#define MDIO_OVER_1G_UP1_5G 0x0002
++#define MDIO_OVER_1G_UP1_6G 0x0004
++#define MDIO_OVER_1G_UP1_10G 0x0010
++#define MDIO_OVER_1G_UP1_10GH 0x0008
++#define MDIO_OVER_1G_UP1_12G 0x0020
++#define MDIO_OVER_1G_UP1_12_5G 0x0040
++#define MDIO_OVER_1G_UP1_13G 0x0080
++#define MDIO_OVER_1G_UP1_15G 0x0100
++#define MDIO_OVER_1G_UP1_16G 0x0200
++#define MDIO_OVER_1G_UP2 0x1A
++#define MDIO_OVER_1G_UP2_IPREDRIVER_MASK 0x0007
++#define MDIO_OVER_1G_UP2_IDRIVER_MASK 0x0038
++#define MDIO_OVER_1G_UP2_PREEMPHASIS_MASK 0x03C0
++#define MDIO_OVER_1G_UP3 0x1B
++#define MDIO_OVER_1G_UP3_HIGIG2 0x0001
++#define MDIO_OVER_1G_LP_UP1 0x1C
++#define MDIO_OVER_1G_LP_UP2 0x1D
++#define MDIO_OVER_1G_LP_UP2_MR_ADV_OVER_1G_MASK 0x03ff
++#define MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK 0x0780
++#define MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT 7
++#define MDIO_OVER_1G_LP_UP3 0x1E
+
+ #define MDIO_REG_BANK_BAM_NEXT_PAGE 0x8350
+-#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL 0x10
+-#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE 0x0001
+-#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN 0x0002
++#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL 0x10
++#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE 0x0001
++#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN 0x0002
++
++#define MDIO_REG_BANK_CL73_USERB0 0x8370
++#define MDIO_CL73_USERB0_CL73_BAM_CTRL1 0x12
++#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN 0x8000
++#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN 0x4000
++#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN 0x2000
++#define MDIO_CL73_USERB0_CL73_BAM_CTRL3 0x14
++#define MDIO_CL73_USERB0_CL73_BAM_CTRL3_USE_CL73_HCD_MR 0x0001
++
++#define MDIO_REG_BANK_AER_BLOCK 0xFFD0
++#define MDIO_AER_BLOCK_AER_REG 0x1E
++
++#define MDIO_REG_BANK_COMBO_IEEE0 0xFFE0
++#define MDIO_COMBO_IEEE0_MII_CONTROL 0x10
++#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK 0x2040
++#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_10 0x0000
++#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100 0x2000
++#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000 0x0040
++#define MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX 0x0100
++#define MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN 0x0200
++#define MDIO_COMBO_IEEO_MII_CONTROL_AN_EN 0x1000
++#define MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK 0x4000
++#define MDIO_COMBO_IEEO_MII_CONTROL_RESET 0x8000
++#define MDIO_COMBO_IEEE0_MII_STATUS 0x11
++#define MDIO_COMBO_IEEE0_MII_STATUS_LINK_PASS 0x0004
++#define MDIO_COMBO_IEEE0_MII_STATUS_AUTONEG_COMPLETE 0x0020
++#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV 0x14
++#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX 0x0020
++#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_HALF_DUPLEX 0x0040
++#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK 0x0180
++#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE 0x0000
++#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC 0x0080
++#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC 0x0100
++#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH 0x0180
++#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_NEXT_PAGE 0x8000
++#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1 0x15
++#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_NEXT_PAGE 0x8000
++#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_ACK 0x4000
++#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_MASK 0x0180
++#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_NONE 0x0000
++#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_BOTH 0x0180
++#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_HALF_DUP_CAP 0x0040
++#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_FULL_DUP_CAP 0x0020
++/*WhenthelinkpartnerisinSGMIImode(bit0=1),then
++bit15=link,bit12=duplex,bits11:10=speed,bit14=acknowledge.
++Theotherbitsarereservedandshouldbezero*/
++#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_SGMII_MODE 0x0001
++
++
++#define MDIO_PMA_DEVAD 0x1
++/*ieee*/
++#define MDIO_PMA_REG_CTRL 0x0
++#define MDIO_PMA_REG_STATUS 0x1
++#define MDIO_PMA_REG_10G_CTRL2 0x7
++#define MDIO_PMA_REG_RX_SD 0xa
++/*bcm*/
++#define MDIO_PMA_REG_BCM_CTRL 0x0096
++#define MDIO_PMA_REG_FEC_CTRL 0x00ab
++#define MDIO_PMA_REG_RX_ALARM_CTRL 0x9000
++#define MDIO_PMA_REG_LASI_CTRL 0x9002
++#define MDIO_PMA_REG_RX_ALARM 0x9003
++#define MDIO_PMA_REG_TX_ALARM 0x9004
++#define MDIO_PMA_REG_LASI_STATUS 0x9005
++#define MDIO_PMA_REG_PHY_IDENTIFIER 0xc800
++#define MDIO_PMA_REG_DIGITAL_CTRL 0xc808
++#define MDIO_PMA_REG_DIGITAL_STATUS 0xc809
++#define MDIO_PMA_REG_TX_POWER_DOWN 0xca02
++#define MDIO_PMA_REG_CMU_PLL_BYPASS 0xca09
++#define MDIO_PMA_REG_MISC_CTRL 0xca0a
++#define MDIO_PMA_REG_GEN_CTRL 0xca10
++#define MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP 0x0188
++#define MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET 0x018a
++#define MDIO_PMA_REG_M8051_MSGIN_REG 0xca12
++#define MDIO_PMA_REG_M8051_MSGOUT_REG 0xca13
++#define MDIO_PMA_REG_ROM_VER1 0xca19
++#define MDIO_PMA_REG_ROM_VER2 0xca1a
++#define MDIO_PMA_REG_EDC_FFE_MAIN 0xca1b
++#define MDIO_PMA_REG_PLL_BANDWIDTH 0xca1d
++#define MDIO_PMA_REG_CDR_BANDWIDTH 0xca46
++#define MDIO_PMA_REG_MISC_CTRL1 0xca85
++
++#define MDIO_PMA_REG_7101_RESET 0xc000
++#define MDIO_PMA_REG_7107_LED_CNTL 0xc007
++#define MDIO_PMA_REG_7101_VER1 0xc026
++#define MDIO_PMA_REG_7101_VER2 0xc027
++
++
++#define MDIO_WIS_DEVAD 0x2
++/*bcm*/
++#define MDIO_WIS_REG_LASI_CNTL 0x9002
++#define MDIO_WIS_REG_LASI_STATUS 0x9005
++
++#define MDIO_PCS_DEVAD 0x3
++#define MDIO_PCS_REG_STATUS 0x0020
++#define MDIO_PCS_REG_LASI_STATUS 0x9005
++#define MDIO_PCS_REG_7101_DSP_ACCESS 0xD000
++#define MDIO_PCS_REG_7101_SPI_MUX 0xD008
++#define MDIO_PCS_REG_7101_SPI_CTRL_ADDR 0xE12A
++#define MDIO_PCS_REG_7101_SPI_RESET_BIT (5)
++#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR 0xE02A
++#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_WRITE_ENABLE_CMD (6)
++#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_BULK_ERASE_CMD (0xC7)
++#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_PAGE_PROGRAM_CMD (2)
++#define MDIO_PCS_REG_7101_SPI_BYTES_TO_TRANSFER_ADDR 0xE028
++
++
++#define MDIO_XS_DEVAD 0x4
++#define MDIO_XS_PLL_SEQUENCER 0x8000
++#define MDIO_XS_SFX7101_XGXS_TEST1 0xc00a
+
+-#define MDIO_REG_BANK_CL73_USERB0 0x8370
+-#define MDIO_CL73_USERB0_CL73_BAM_CTRL1 0x12
+-#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN 0x8000
+-#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN 0x4000
+-#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN 0x2000
+-#define MDIO_CL73_USERB0_CL73_BAM_CTRL3 0x14
+-#define MDIO_CL73_USERB0_CL73_BAM_CTRL3_USE_CL73_HCD_MR 0x0001
++#define MDIO_AN_DEVAD 0x7
++/*ieee*/
++#define MDIO_AN_REG_CTRL 0x0000
++#define MDIO_AN_REG_STATUS 0x0001
++#define MDIO_AN_REG_STATUS_AN_COMPLETE 0x0020
++#define MDIO_AN_REG_ADV_PAUSE 0x0010
++#define MDIO_AN_REG_ADV_PAUSE_PAUSE 0x0400
++#define MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC 0x0800
++#define MDIO_AN_REG_ADV_PAUSE_BOTH 0x0C00
++#define MDIO_AN_REG_ADV_PAUSE_MASK 0x0C00
++#define MDIO_AN_REG_ADV 0x0011
++#define MDIO_AN_REG_ADV2 0x0012
++#define MDIO_AN_REG_LP_AUTO_NEG 0x0013
++#define MDIO_AN_REG_MASTER_STATUS 0x0021
++/*bcm*/
++#define MDIO_AN_REG_LINK_STATUS 0x8304
++#define MDIO_AN_REG_CL37_CL73 0x8370
++#define MDIO_AN_REG_CL37_AN 0xffe0
++#define MDIO_AN_REG_CL37_FC_LD 0xffe4
++#define MDIO_AN_REG_CL37_FC_LP 0xffe5
+
+-#define MDIO_REG_BANK_AER_BLOCK 0xFFD0
+-#define MDIO_AER_BLOCK_AER_REG 0x1E
+
+-#define MDIO_REG_BANK_COMBO_IEEE0 0xFFE0
+-#define MDIO_COMBO_IEEE0_MII_CONTROL 0x10
+-#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK 0x2040
+-#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_10 0x0000
+-#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100 0x2000
+-#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000 0x0040
+-#define MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX 0x0100
+-#define MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN 0x0200
+-#define MDIO_COMBO_IEEO_MII_CONTROL_AN_EN 0x1000
+-#define MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK 0x4000
+-#define MDIO_COMBO_IEEO_MII_CONTROL_RESET 0x8000
+-#define MDIO_COMBO_IEEE0_MII_STATUS 0x11
+-#define MDIO_COMBO_IEEE0_MII_STATUS_LINK_PASS 0x0004
+-#define MDIO_COMBO_IEEE0_MII_STATUS_AUTONEG_COMPLETE 0x0020
+-#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV 0x14
+-#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX 0x0020
+-#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_HALF_DUPLEX 0x0040
+-#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK 0x0180
+-#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE 0x0000
+-#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC 0x0080
+-#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC 0x0100
+-#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH 0x0180
+-#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_NEXT_PAGE 0x8000
+-#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1 0x15
+-#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_NEXT_PAGE 0x8000
+-#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_ACK 0x4000
+-#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_MASK 0x0180
+-#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_NONE\
+- 0x0000
+-#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_BOTH\
+- 0x0180
+-#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_HALF_DUP_CAP 0x0040
+-#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_FULL_DUP_CAP 0x0020
+-#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_SGMII_MODE 0x0001
++#define IGU_FUNC_BASE 0x0400
+
++#define IGU_ADDR_MSIX 0x0000
++#define IGU_ADDR_INT_ACK 0x0200
++#define IGU_ADDR_PROD_UPD 0x0201
++#define IGU_ADDR_ATTN_BITS_UPD 0x0202
++#define IGU_ADDR_ATTN_BITS_SET 0x0203
++#define IGU_ADDR_ATTN_BITS_CLR 0x0204
++#define IGU_ADDR_COALESCE_NOW 0x0205
++#define IGU_ADDR_SIMD_MASK 0x0206
++#define IGU_ADDR_SIMD_NOMASK 0x0207
++#define IGU_ADDR_MSI_CTL 0x0210
++#define IGU_ADDR_MSI_ADDR_LO 0x0211
++#define IGU_ADDR_MSI_ADDR_HI 0x0212
++#define IGU_ADDR_MSI_DATA 0x0213
+
+-#define EXT_PHY_AUTO_NEG_DEVAD 0x7
+-#define EXT_PHY_OPT_PMA_PMD_DEVAD 0x1
+-#define EXT_PHY_OPT_WIS_DEVAD 0x2
+-#define EXT_PHY_OPT_PCS_DEVAD 0x3
+-#define EXT_PHY_OPT_PHY_XS_DEVAD 0x4
+-#define EXT_PHY_OPT_CNTL 0x0
+-#define EXT_PHY_OPT_CNTL2 0x7
+-#define EXT_PHY_OPT_PMD_RX_SD 0xa
+-#define EXT_PHY_OPT_PMD_MISC_CNTL 0xca0a
+-#define EXT_PHY_OPT_PHY_IDENTIFIER 0xc800
+-#define EXT_PHY_OPT_PMD_DIGITAL_CNT 0xc808
+-#define EXT_PHY_OPT_PMD_DIGITAL_SATUS 0xc809
+-#define EXT_PHY_OPT_CMU_PLL_BYPASS 0xca09
+-#define EXT_PHY_OPT_LASI_CNTL 0x9002
+-#define EXT_PHY_OPT_RX_ALARM 0x9003
+-#define EXT_PHY_OPT_LASI_STATUS 0x9005
+-#define EXT_PHY_OPT_PCS_STATUS 0x0020
+-#define EXT_PHY_OPT_XGXS_LANE_STATUS 0x0018
+-#define EXT_PHY_OPT_AN_LINK_STATUS 0x8304
+-#define EXT_PHY_OPT_AN_CL37_CL73 0x8370
+-#define EXT_PHY_OPT_AN_CL37_FD 0xffe4
+-#define EXT_PHY_OPT_AN_CL37_AN 0xffe0
+-#define EXT_PHY_OPT_AN_ADV 0x11
++#define IGU_INT_ENABLE 0
++#define IGU_INT_DISABLE 1
++#define IGU_INT_NOP 2
++#define IGU_INT_NOP2 3
+
+-#define EXT_PHY_KR_PMA_PMD_DEVAD 0x1
+-#define EXT_PHY_KR_PCS_DEVAD 0x3
+-#define EXT_PHY_KR_AUTO_NEG_DEVAD 0x7
+-#define EXT_PHY_KR_CTRL 0x0000
+-#define EXT_PHY_KR_STATUS 0x0001
+-#define EXT_PHY_KR_AUTO_NEG_COMPLETE 0x0020
+-#define EXT_PHY_KR_AUTO_NEG_ADVERT 0x0010
+-#define EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE 0x0400
+-#define EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_ASYMMETRIC 0x0800
+-#define EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_BOTH 0x0C00
+-#define EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_MASK 0x0C00
+-#define EXT_PHY_KR_LP_AUTO_NEG 0x0013
+-#define EXT_PHY_KR_CTRL2 0x0007
+-#define EXT_PHY_KR_PCS_STATUS 0x0020
+-#define EXT_PHY_KR_PMD_CTRL 0x0096
+-#define EXT_PHY_KR_LASI_CNTL 0x9002
+-#define EXT_PHY_KR_LASI_STATUS 0x9005
+-#define EXT_PHY_KR_MISC_CTRL1 0xca85
+-#define EXT_PHY_KR_GEN_CTRL 0xca10
+-#define EXT_PHY_KR_ROM_CODE 0xca19
+-#define EXT_PHY_KR_ROM_RESET_INTERNAL_MP 0x0188
+-#define EXT_PHY_KR_ROM_MICRO_RESET 0x018a
++#define COMMAND_REG_INT_ACK 0x0
++#define COMMAND_REG_PROD_UPD 0x4
++#define COMMAND_REG_ATTN_BITS_UPD 0x8
++#define COMMAND_REG_ATTN_BITS_SET 0xc
++#define COMMAND_REG_ATTN_BITS_CLR 0x10
++#define COMMAND_REG_COALESCE_NOW 0x14
++#define COMMAND_REG_SIMD_MASK 0x18
++#define COMMAND_REG_SIMD_NOMASK 0x1c
+
+-#define EXT_PHY_SFX7101_XGXS_TEST1 0xc00a
+
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 6595382..14475f5 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -1950,6 +1950,8 @@
+ #define PCI_DEVICE_ID_NX2_5708 0x164c
+ #define PCI_DEVICE_ID_TIGON3_5702FE 0x164d
+ #define PCI_DEVICE_ID_NX2_57710 0x164e
++#define PCI_DEVICE_ID_NX2_57711 0x164f
++#define PCI_DEVICE_ID_NX2_57711E 0x1650
+ #define PCI_DEVICE_ID_TIGON3_5705 0x1653
+ #define PCI_DEVICE_ID_TIGON3_5705_2 0x1654
+ #define PCI_DEVICE_ID_TIGON3_5720 0x1658
Modified: dists/lenny/linux-2.6/debian/patches/series/18
==============================================================================
--- dists/lenny/linux-2.6/debian/patches/series/18 Fri Jul 31 05:46:40 2009 (r14057)
+++ dists/lenny/linux-2.6/debian/patches/series/18 Fri Jul 31 06:10:25 2009 (r14058)
@@ -15,4 +15,6 @@
+ bugfix/all/fbmon-work-around-compiler-bug.patch
+ bugfix/all/alsa-via-fix-inversion-of-sound-and-side-channels.patch
+ bugfix/all/ntp-fix-convergence-regression.patch
-
+- debian/dfsg/drivers-net-bnx2x-disable.patch
++ features/all/bnx2x-update.patch
++ features/all/bnx2x-Separated-FW-from-the-source.patch
More information about the Kernel-svn-changes
mailing list